diff --git a/.gitattributes b/.gitattributes index a54613a36b03565f307587341f7e493a80d397b9..8614ad2531925fe2527f3bdd95f20b9a50c57e7b 100644 --- a/.gitattributes +++ b/.gitattributes @@ -7975,3 +7975,19 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text 2023/ZegCLIP_[[:space:]]Towards[[:space:]]Adapting[[:space:]]CLIP[[:space:]]for[[:space:]]Zero-Shot[[:space:]]Semantic[[:space:]]Segmentation/5aa3c465-5800-4cf0-842d-3ff527e740db_origin.pdf filter=lfs diff=lfs merge=lfs -text 2023/Zero-Shot[[:space:]]Dual-Lens[[:space:]]Super-Resolution/e5623582-a7e1-46ef-8ecf-c8f00837c751_origin.pdf filter=lfs diff=lfs merge=lfs -text 2023/Zero-Shot[[:space:]]Everything[[:space:]]Sketch-Based[[:space:]]Image[[:space:]]Retrieval,[[:space:]]and[[:space:]]in[[:space:]]Explainable[[:space:]]Style/225245ce-4b02-4bed-a89c-519460e55a67_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/Zero-Shot[[:space:]]Generative[[:space:]]Model[[:space:]]Adaptation[[:space:]]via[[:space:]]Image-Specific[[:space:]]Prompt[[:space:]]Learning/b1c7981e-30bf-4cd6-b2ff-8dd7951c08ab_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/Zero-Shot[[:space:]]Model[[:space:]]Diagnosis/549384d4-f244-4966-8076-15abf189c0a7_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/Zero-Shot[[:space:]]Noise2Noise_[[:space:]]Efficient[[:space:]]Image[[:space:]]Denoising[[:space:]]Without[[:space:]]Any[[:space:]]Data/2d536e4a-ee00-4291-84f0-1f5cbbcd1b0f_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/Zero-Shot[[:space:]]Object[[:space:]]Counting/a8ad52a0-ee27-4576-981a-5efd9d6920f0_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/Zero-Shot[[:space:]]Pose[[:space:]]Transfer[[:space:]]for[[:space:]]Unrigged[[:space:]]Stylized[[:space:]]3D[[:space:]]Characters/5ada3ef3-b974-4dfe-9b9c-f168fb79ad07_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/Zero-Shot[[:space:]]Referring[[:space:]]Image[[:space:]]Segmentation[[:space:]]With[[:space:]]Global-Local[[:space:]]Context[[:space:]]Features/d944ca48-5a24-4209-88df-55a9c1e47851_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/Zero-Shot[[:space:]]Text-to-Parameter[[:space:]]Translation[[:space:]]for[[:space:]]Game[[:space:]]Character[[:space:]]Auto-Creation/120ba89b-8ae1-4464-85a9-dac434567f6d_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/expOSE_[[:space:]]Accurate[[:space:]]Initialization-Free[[:space:]]Projective[[:space:]]Factorization[[:space:]]Using[[:space:]]Exponential[[:space:]]Regularization/6f578c7a-ccab-49cb-bc75-04e328397fea_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/gSDF_[[:space:]]Geometry-Driven[[:space:]]Signed[[:space:]]Distance[[:space:]]Functions[[:space:]]for[[:space:]]3D[[:space:]]Hand-Object[[:space:]]Reconstruction/ab15db8d-1011-4beb-9528-c9481ccf45d6_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/iCLIP_[[:space:]]Bridging[[:space:]]Image[[:space:]]Classification[[:space:]]and[[:space:]]Contrastive[[:space:]]Language-Image[[:space:]]Pre-Training[[:space:]]for[[:space:]]Visual[[:space:]]Recognition/3b34aaa2-686f-4539-b795-03e3193c1a2b_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/iDisc_[[:space:]]Internal[[:space:]]Discretization[[:space:]]for[[:space:]]Monocular[[:space:]]Depth[[:space:]]Estimation/f111aba4-6ed5-4778-a051-8ab6247508d3_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/iQuery_[[:space:]]Instruments[[:space:]]As[[:space:]]Queries[[:space:]]for[[:space:]]Audio-Visual[[:space:]]Sound[[:space:]]Separation/79c43f28-c9f8-4b22-8dca-1b2b47c85f07_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/itKD_[[:space:]]Interchange[[:space:]]Transfer-Based[[:space:]]Knowledge[[:space:]]Distillation[[:space:]]for[[:space:]]3D[[:space:]]Object[[:space:]]Detection/0c6f9efe-676e-4d3a-914a-2dc77bae5a75_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/pCON_[[:space:]]Polarimetric[[:space:]]Coordinate[[:space:]]Networks[[:space:]]for[[:space:]]Neural[[:space:]]Scene[[:space:]]Representations/762a8e4d-373c-4bab-83f9-f1ad8a1ae928_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/sRGB[[:space:]]Real[[:space:]]Noise[[:space:]]Synthesizing[[:space:]]With[[:space:]]Neighboring[[:space:]]Correlation-Aware[[:space:]]Noise[[:space:]]Model/4d3b606f-ee7b-460f-b2bf-d38a08fa5304_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/vMAP_[[:space:]]Vectorised[[:space:]]Object[[:space:]]Mapping[[:space:]]for[[:space:]]Neural[[:space:]]Field[[:space:]]SLAM/ff44bc7e-edea-45f5-9989-5020c0b824b0_origin.pdf filter=lfs diff=lfs merge=lfs -text diff --git a/2023/Zero-Shot Generative Model Adaptation via Image-Specific Prompt Learning/b1c7981e-30bf-4cd6-b2ff-8dd7951c08ab_content_list.json b/2023/Zero-Shot Generative Model Adaptation via Image-Specific Prompt Learning/b1c7981e-30bf-4cd6-b2ff-8dd7951c08ab_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..f626a7b711739a3fc709424dcad3ed03f14f582c --- /dev/null +++ b/2023/Zero-Shot Generative Model Adaptation via Image-Specific Prompt Learning/b1c7981e-30bf-4cd6-b2ff-8dd7951c08ab_content_list.json @@ -0,0 +1,1589 @@ +[ + { + "type": "text", + "text": "Zero-shot Generative Model Adaptation via Image-specific Prompt Learning", + "text_level": 1, + "bbox": [ + 98, + 130, + 872, + 155 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Jiayi Guo $^{1*}$ Chaofei Wang $^{1*}$ You Wu $^{2}$ Eric Zhang $^{3}$ Kai Wang $^{3}$ Xingqian Xu $^{3}$ Shiji Song $^{1}$ Humphrey Shi $^{3,4\\dagger}$ Gao Huang $^{1\\dagger}$", + "bbox": [ + 99, + 178, + 854, + 215 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Tsinghua University, BNRist $^{2}$ UCAS $^{3}$ SHI Labs @ Oregon & UIUC $^{4}$ Picsart AI Research (PAIR) https://github.com/Picsart-AI-Research/IPL-Zero-Shot-Generative-Model-Adaptation", + "bbox": [ + 125, + 217, + 839, + 252 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/d738b2a13791d5c547a3216529ad819d26214393a5987c93bbfd8295911f861c.jpg", + "image_caption": [ + "Figure 1. The mode collapse issue. For NADA [21] and our method, the same generator pre-trained on the source domain of \"Photo\" is adapted to the unseen target domains of \"Disney\", \"Anime painting\", \"Wall painting\" and \"Ukiyo-e\" only with the domain labels. The images above the dotted line are some examples from the internet. The generated images of NADA exhibit some similar unseen patterns (yellow box areas) which are undesired in terms of quality and diversity. This issue is largely addressed by our method." + ], + "image_footnote": [], + "bbox": [ + 89, + 263, + 885, + 568 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 650, + 313, + 666 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recently, CLIP-guided image synthesis has shown appealing performance on adapting a pre-trained source-domain generator to an unseen target domain. It does not require any target-domain samples but only the textual domain labels. The training is highly efficient, e.g., a few minutes. However, existing methods still have some limitations in the quality of generated images and may suffer from the mode collapse issue. A key reason is that a fixed adaptation direction is applied for all cross-domain image pairs, which leads to identical supervision signals. To address this issue, we propose an Image-specific Prompt Learning (IPL) method, which learns specific prompt vec", + "bbox": [ + 73, + 683, + 470, + 864 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "tors for each source-domain image. This produces a more precise adaptation direction for every cross-domain image pair, endowing the target-domain generator with greatly enhanced flexibility. Qualitative and quantitative evaluations on various domains demonstrate that IPL effectively improves the quality and diversity of synthesized images and alleviates the mode collapse. Moreover, IPL is independent of the structure of the generative model, such as generative adversarial networks or diffusion models. Code is available at https://github.com/Picsart-AI-Research/IPL-Zero-Shot-Generative-Model-Adaptation.", + "bbox": [ + 500, + 652, + 890, + 818 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 500, + 845, + 630, + 861 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In recent years, image synthesis using generative adversarial networks (GANs) [11] has been rapidly developed.", + "bbox": [ + 500, + 871, + 890, + 902 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Equal contribution.", + "bbox": [ + 94, + 876, + 204, + 888 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "† Corresponding authors.", + "bbox": [ + 96, + 888, + 225, + 901 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "11494", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The state-of-the-art methods can generate images that are hard to be distinguished from real data [14, 20, 21, 46, 50]. However, the GAN-based methods heavily rely on vast quantities of training examples, and adopt a cumbersome adversarial training scheme which generally costs many hours of training time. Unfortunately, in many real-world scenarios, data acquisition is difficult or expensive. For example, in the artistic domains, it is impossible to have artists make thousands of creations. The high training cost is also unacceptable on some embedded devices, e.g., cellphones.", + "bbox": [ + 75, + 90, + 472, + 243 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To address these issues, researchers begin to focus on the generative model adaptation. The goal of this task is to adapt a pre-trained source-domain generator to a target domain with limited data. Many few-shot GAN-based methods are proposed, such as TGAN [48], FreezeD [30], MinGAN [47], ADA [18], DiffAug [53], IDC [33] and RSSA [49], etc. However, these methods still require some training images of the target domain and follow the adversarial training scheme. As a pioneer work, StyleGAN-NADA [8] (NADA for short) proposes a zero-shot adaptation method, which only requires textual domain labels and discards the cumbersome adversarial training scheme by introducing a pre-trained CLIP model. Although efficient, it still has obvious deficiencies, i.e., the limited quality and mode collapse of generated images. As shown in Fig.1, we adapt a pretrained generator of \"Photo\" domain to \"Disney\", \"Anime painting\", \"Wall painting\" and \"Ukiyo-e\" domains. For the results of NADA [8], we notice that the generated images of the same target domain always show some homogeneous patterns which degrade the image quality and diversity, such as deep nasolabial folds in \"Disney\", squinting eyes in \"Anime painting\", red cheeks in \"Wall painting\" and blue eyebrows in \"Ukiyo-e\" (yellow box areas).", + "bbox": [ + 75, + 244, + 472, + 592 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "By exploring the factors behind this phenomenon, we find that the key factor is the fixed adaptation direction produced by manually designed prompts. Sharing the direction for all cross-domain image pairs leads to identical supervision signals for the model adaptation. Consider the example, adapting a generator of \"Human\" domain to \"Tolkien elf\" domain as shown in Fig.2. The previous works [8, 22] adopt manually designed prompts (e.g., \"A photo of a\") plus the domain label to produce a fixed adaptation direction, which is shared by all cross-domain image pairs (Fig.2 (a)) in the adaptation process. We argue that the constraint is too restrictive and suppresses the image-specific features, leading to homogeneous generated patterns.", + "bbox": [ + 75, + 595, + 468, + 792 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this paper, we propose an Image-specific Prompt Learning (IPL) method to address the above issue. The motivation is setting more precise and diversified adaptation directions by customizing more image-specific prompts, for instance \"Asian girl\", \"Curly hair lady\" and \"Elder glass man\" (Fig.2 (b)). These adaptation directions endow the target-domain generator with high flexibility to synthesize", + "bbox": [ + 75, + 795, + 470, + 902 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/7d3df592624051881c753925364c9354fc57843906aafa0d6691c58435607553.jpg", + "image_caption": [ + "Figure 2. An illustration of our motivation. The previous methods adopt manual prompts to compute a fixed adaptation direction for all cross-domain image pairs, while our method learns image-specific prompts for producing more precise and diversified adaptation directions." + ], + "image_footnote": [], + "bbox": [ + 501, + 90, + 890, + 327 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "more diversified images. The proposed IPL is a two-stage method. In Stage 1, a latent mapper is trained to produce an image-specific set of prompt vectors conditioned on each source-domain image by a contrastive training scheme. The learned prompt vectors contain more specific and diversified features of the source-domain images than the fixed prompt vectors. We further propose a domain regularization loss to ensure that the learned prompt vectors are compatible with the target domain. In Stage 2, we compute more precise and diversified adaptation directions for each cross-domain image pair, and train the target-domain generator with an adaptive directional CLIP loss, which can be viewed as an improved version of the Directional CLIP Loss [8]. As shown in Fig.1, our method alleviates the mode collapse issue well. Extensive experiments across a wide range of domains demonstrate that the proposed IPL effectively improves the quality of synthesized images and overcomes the mode collapse issue. User studies and ablation studies are also conducted to validate the effectiveness of our method.", + "bbox": [ + 496, + 422, + 890, + 709 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "It is worth noting that our proposed IPL method is independent of the structure of the generative model, and can be applied to the recent diffusion models [13,27,31,35,41-43, 51]. Thus we also combine IPL with diffusion models and get a more robust and stronger generative capacity, especially on complex images, which shows the high effectiveness and adaptability of our approach.", + "bbox": [ + 496, + 709, + 890, + 816 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 500, + 829, + 640, + 845 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Generative model adaptation. Generative model adaptation is the task of adapting a generative model trained on a large-scale source domain to a data-limited target domain.", + "bbox": [ + 496, + 854, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "11495", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "According to the size of the training dataset of the target domain, it can be directly divided into two main categories: few-shot generative model adaptation and zero-shot generative model adaptation. For the few-shot generative model adaptation task, the most natural approach is to fine-tune a pre-trained GAN [2, 4, 26, 48]. However, fine-tuning the entire network weights used to result in overfitting. Subsequently, many methods were proposed to alleviate the overfitting issue. They either imposed strong regularization [52, 54], or modified the network parameters with a slight perturbation [30, 32, 37, 47], or preserved some important information by cross-domain alignment [33, 49], or performed data augmentation [45, 53, 55].", + "bbox": [ + 75, + 90, + 472, + 287 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "For the zero-shot generative model adaptation task, NADA [8] first proposed to introduce a pre-trained CLIP model for supplying necessary prior knowledge. It only required textual domain labels, and encoded the domain gap as a text-guided adaptation direction in CLIP space. To enhance the identity-preserving capability of real-world image translation, Kim et al. further proposed DiffusionCLIP [22] which utilized diffusion models [42] instead of StyleGANs [18-21] in NADA. Nevertheless, these existing works all adopt a fixed adaptation direction which only contains the basic domain knowledge but no image-specific features. In this paper, we argue that this shared fixed adaptation direction may lead to the mode collapse issue. To produce more accurate and adaptive adaptation directions, we propose to learn diverse and specific prompt vectors for each image.", + "bbox": [ + 75, + 292, + 472, + 520 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Prompt learning. Prompt engineering is first introduced as a knowledge probing approach [34]. Given cloze-style prompts, it induces pre-trained language models to generate the corresponding answers. However, manually designed prompts may be sub-optimal and provide imprecise guidance. To tackle this issue, prompt learning [9, 16, 23, 25, 28, 40, 56] has been widely studied in natural language processing to automatically explore the optimal set of prompts. With the unprecedented development of vision-language models [15, 36] in recent years, researchers begin to apply prompt learning to computer vision tasks [7, 10, 17, 24, 57, 58]. In specific, Zhou et al. [57, 58] first adopted context optimization in image classification tasks by modeling context words with continuous vectors in the word embedding space. Subsequently, many downstream tasks in computer vision were also explored, e.g., object detection [7], visual grounding [24], video understanding [17] and transfer learning [10]. As far as we know, this is the first work to propose an adaptive prompt learning scheme for generative model adaptation. Different from previous prompt learning schemes, our method introduces a latent mapper to learn a specific set of prompt vectors for each image. When training the target-domain generator, the learned image-specific prompt vectors could produce more precise adaptation directions to provide better supervision signals.", + "bbox": [ + 75, + 523, + 472, + 902 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Methodology", + "text_level": 1, + "bbox": [ + 498, + 89, + 635, + 108 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The goal of zero-shot generative model adaptation is to adapt a pre-trained source-domain generator $G_{\\mathrm{s}}$ to an unseen target domain, and get the target-domain generator $G_{\\mathrm{t}}$ . The source domain with the domain label $\\mathrm{Y_s}$ , e.g., \"Human\", can obtain plentiful high-quality images by $G_{\\mathrm{s}}$ . The target domain is described only through the domain label $\\mathrm{Y_t}$ , e.g., \"Tolkien elf\", with no images. Following [8, 22], a pre-trained CLIP model [36] including an image encoder $E_{\\mathrm{I}}$ and a text encoder $E_{\\mathrm{T}}$ is introduced.", + "bbox": [ + 496, + 114, + 893, + 250 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We propose a two-stage method named Image-specific Prompt Learning (IPL). Its framework is shown in Fig.3. In Stage 1, a latent mapper $F$ is trained to produce a set of image-specific prompt vectors $\\{[\\mathbf{V}]_1^i, [\\mathbf{V}]_2^i, \\dots, [\\mathbf{V}]_m^i\\}$ for each latent code $w^i$ of a source-domain image. Each prompt vector has the same dimension with word embeddings in CLIP space. The training loss consists of a contrastive learning loss $\\mathcal{L}_{\\mathrm{contr}}$ and a domain regularization loss $\\mathcal{L}_{\\mathrm{domain}}$ . The former aims to preserve the image-specific features of each source domain image in the learned prompt vectors. The latter constrains the image-specific features to be suitable to the target domain, which means the learned features should not conflict with the target domain. For example, the features of prompts like \"round ear\" should not be contained in the ideal prompt vectors if the target domain is \"Tolkien elf\". In Stage 2, the trained latent mapper $F$ is plugged into the training process of the target-domain generator $G_{\\mathrm{t}}$ , and produces more precise and diversified adaptation directions for cross-domain image pairs. This training stage follows [8] except that learned prompt vectors produced by the latent mapper $F$ replace the fixed prompt vectors. The final textual supervision information includes shared learned prompt vectors and respective embeddings of the original domain labels.", + "bbox": [ + 496, + 251, + 893, + 613 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Image-specific prompt learning", + "text_level": 1, + "bbox": [ + 498, + 622, + 777, + 638 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "General prompts. The previous methods [8, 22] compute a fixed adaptation direction produced by two embeddings of manually designed prompts, e.g., \"a photo of a human\" and \"a photo of a Tolkien elf\", then constrain the directions of all cross-domain pairs to be parallel with the adaptation direction. In contrast to manually designed prompts, prompt learning [58] aims to find the optimal set of prompt vectors for a domain by directly tuning the embeddings of prompts. Formally, we define a general prompt matrix $\\mathrm{M_d}$ to represent a given domain d. $\\mathrm{M_d}$ consists of the prompt vectors $[\\mathbf{V}]_1, [\\mathbf{V}]_2, \\dots, [\\mathbf{V}]_m$ and the embedding of the domain label $[\\mathrm{Y_d}]$ as below:", + "bbox": [ + 496, + 646, + 893, + 827 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {M} _ {\\mathrm {d}} = [ \\mathbf {V} ] _ {1} [ \\mathbf {V} ] _ {2} \\dots [ \\mathbf {V} ] _ {m} [ \\mathrm {Y} _ {\\mathrm {d}} ], \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 596, + 832, + 890, + 849 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $m$ is the number of prompts. Suppose the dimension of each embedding is $k$ . Then the dimension of $\\mathbf{M}_{\\mathrm{d}}$ should be $(m + 1) \\times k$ . In [8, 22], the prompt vectors", + "bbox": [ + 496, + 854, + 893, + 902 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "11496", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/fff8e843236ad10f7f82005b31182998855786026da78fc0b4f6652c2ce03917.jpg", + "image_caption": [ + "Stage 1: Training latent mapper for prompt learning" + ], + "image_footnote": [], + "bbox": [ + 127, + 104, + 823, + 292 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/8d9c252ea1b985f5bf7a741110cdaaa4f4d7a769b1958bd4678937ff495d9bd4.jpg", + "image_caption": [ + "Stage 2: Training generator for image synthesis", + "Figure 3. The framework of our method. In Stage 1, a latent mapper $F$ is trained for prompt learning by a contrastive learning loss $\\mathcal{L}_{\\mathrm{contr}}$ and a domain regularization loss $\\mathcal{L}_{\\mathrm{domain}}$ . The image encoder $E_{\\mathrm{I}}$ and the text encoder $E_{\\mathrm{T}}$ are from the CLIP model [36]. In Stage 2, the target-domain generator $G_{\\mathrm{t}}$ is trained for image synthesis by the improved Directional CLIP Loss $\\mathcal{L}_{\\mathrm{adapt}}$ in which the adaptive prompts produced by the latent mapper are applied. In two stages, the locked modules are fixed while the unlocked modules are trained. For simplicity, we replace $E_{\\mathrm{I}}(G_{\\mathrm{s}}(w^{i}))$ and $E_{\\mathrm{T}}(\\mathrm{M}_{\\mathrm{s}}^{i})$ with $\\Gamma^i$ and $\\mathrm{T_s}^i$ , respectively." + ], + "image_footnote": [], + "bbox": [ + 153, + 310, + 751, + 439 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "$[\\mathbf{V}]_1, [\\mathbf{V}]_2, \\dots, [\\mathbf{V}]_m$ are fixed embeddings of manually designed prompts. For prompt learning [58], the prompt vectors are learned by encoding each training image of the domain $d$ with $E_{\\mathrm{I}}$ and the prompt matrix $\\mathrm{M_d}$ with $E_{\\mathrm{T}}$ , and then maximizing the cosine similarity between them.", + "bbox": [ + 75, + 539, + 468, + 614 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Inspired by prompt learning, in the zero-shot generative model adaptation task, a natural idea is to learn an optimal set of prompt vectors instead of the manually designed prompts in NADA [8]. Although the adaptation direction calculated by the learned prompt vectors seems to be more reasonable than that of the manually designed prompts, it is still fixed and shared for all cross-domain image pairs. These fixed learned prompt vectors can not solve the mode collapse issue (Experimental validations can be seen in Sec. 4.4). To obtain more flexible and diversified adaptation directions, we further propose to learn a set of image-specific prompt vectors for each image, which can be regarded as an improved version of prompt learning.", + "bbox": [ + 75, + 621, + 468, + 819 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Image-specific prompts. Utilizing the source-domain generator $G_{\\mathrm{s}}$ , we train a latent mapper $F$ as shown in Fig.3 (Stage 1). Through the mapper, each image of the source domain can be matched to an optimal set of prompt vectors. Formally, given a latent code $w^{i}$ , corresponding to", + "bbox": [ + 75, + 825, + 468, + 902 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "the $i^{\\mathrm{th}}$ image in the source domain, the image-specific set of prompt vectors $\\{[\\mathbf{V}]_1^i,[\\mathbf{V}]_2^i,\\dots ,[\\mathbf{V}]_m^i\\}$ can be obtained by $F(w^{i},\\theta)$ , where $\\theta$ denotes the parameters of the latent mapper $F$ . Following the definition of the prompt matrix in Eq.(1), we define an image-specific prompt matrix of the $i^{\\mathrm{th}}$ source-domain image as:", + "bbox": [ + 496, + 539, + 890, + 630 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {M} _ {\\mathrm {s}} ^ {i} = F \\left(w ^ {i}, \\theta\\right) \\left[ \\mathrm {Y} _ {\\mathrm {s}} \\right] = \\left[ \\mathrm {V} \\right] _ {1} ^ {i} \\left[ \\mathrm {V} \\right] _ {2} ^ {i} \\dots \\left[ \\mathrm {V} \\right] _ {m} ^ {i} \\left[ \\mathrm {Y} _ {\\mathrm {s}} \\right]. \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 544, + 637, + 890, + 655 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In this paper, $F$ is a common four-layer fully-connected network. Next, we show how to train it.", + "bbox": [ + 496, + 661, + 890, + 690 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Contrastive training scheme. Given a batch of latent codes $\\{w^1, w^2, \\dots, w^n\\}$ , we can produce a batch of sets of prompt matrices $\\{\\mathrm{M_s^1}, \\mathrm{M_s^2}, \\dots, \\mathrm{M_s^n}\\}$ by $F$ and a batch of images $\\{G_{\\mathrm{s}}(w^{1}), G_{\\mathrm{s}}(w^{2}), \\dots, G_{\\mathrm{s}}(w^{n})\\}$ by $G_{\\mathrm{s}}$ . Then $n \\times n$ pairs $< G_{\\mathrm{s}}(w^{i}), \\mathrm{M}_{\\mathrm{s}}^{j} >$ , $i, j \\in \\{1, 2, \\dots, n\\}$ have been obtained. Then, we take the pairs of $i = j$ as positive samples, and the pairs of $i \\neq j$ as negative samples for contrastive training. Specifically, we compute the similarity between embeddings of the $i^{\\text{th}}$ image and the $j^{\\text{th}}$ prompt matrix in CLIP space as:", + "bbox": [ + 496, + 691, + 890, + 843 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {S i m} _ {i j} = \\operatorname {C o s} \\left(\\operatorname {N o r m} \\left(E _ {\\mathrm {I}} \\left(G _ {\\mathrm {s}} \\left(w ^ {i}\\right)\\right)\\right), \\operatorname {N o r m} \\left(E _ {\\mathrm {T}} \\left(\\mathrm {M} _ {\\mathrm {s}} ^ {j}\\right)\\right)\\right), \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 519, + 847, + 890, + 864 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathrm{Norm}(\\cdot)$ and $\\mathrm{Cos}(\\cdot)$ represent $L_{2}$ normalization and the cosine function, respectively. The similarities of pos", + "bbox": [ + 496, + 869, + 890, + 901 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "11497", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "itive samples are maximized while the similarities of negative samples are minimized. The contrastive loss is expressed as:", + "bbox": [ + 75, + 90, + 468, + 135 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {c o n t r}} = \\mathbb {E} _ {w \\in \\mathcal {W}} \\left(\\sum_ {i \\neq j} \\left(\\operatorname {S i m} _ {i j}\\right) - \\sum_ {i = j} \\left(\\operatorname {S i m} _ {i j}\\right)\\right). \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 122, + 137, + 468, + 169 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Domain regularization loss. For the target domain without any prior knowledge except the domain label $\\mathrm{Y_t}$ , we can simply share the learned prompt vectors between the source and target domains following [8]. However, the shared prompt vectors may lead to the risk of generating unrealistic images for the target domain, because some learned prompt vectors may contain strongly relevant features to the source domain, leading to conflict with the target domain. For example, an image of \"Human\" domain is matched to prompt vectors of \"round ear\", but a corresponding image of \"Tolkien elf\" domain should not contain the features of \"round ear\". Sharing these prompt vectors is harmful to the target-domain image generation. Therefore, we further propose a domain regularization loss. Specifically, we constrain the angles between the embeddings of the image-specific prompt matrix $\\mathbf{M}_{\\mathrm{t}}^{i}$ and the target-domain label $\\mathrm{Y_t}$ in CLIP space to be small, to avoid the learned prompt vectors conflicting with the target domain. Formally, the domain regularization loss is described as:", + "bbox": [ + 75, + 171, + 472, + 458 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {d o m a i n}} = - \\mathbb {E} _ {w ^ {i} \\in \\mathcal {W}} \\sum_ {i = 1} ^ {n} \\left(\\operatorname {C o s} \\left(E _ {\\mathrm {T}} \\left(\\mathrm {M} _ {\\mathrm {t}} ^ {i}\\right), E _ {\\mathrm {T}} \\left(\\mathrm {Y} _ {\\mathrm {t}}\\right)\\right)\\right), \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 91, + 465, + 468, + 503 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\mathrm{M}_{\\mathrm{t}}^{i}$ is calculated by Eq.(2) except replacing the domain label, $\\mathrm{Cos}(\\cdot)$ represents the cosine similarity.", + "bbox": [ + 76, + 511, + 468, + 542 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "As a summary, the whole training loss function of the latent mapper $F$ is:", + "bbox": [ + 76, + 542, + 468, + 571 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = \\mathcal {L} _ {\\text {c o n s t r}} + \\lambda \\mathcal {L} _ {\\text {d o m a i n}}, \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 186, + 580, + 468, + 595 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\lambda$ is the ratio parameter. Optimized by $\\mathcal{L}$ , the learned prompt vectors can not only reflect the features of the source-domain images, but also adapt to the target domain.", + "bbox": [ + 76, + 603, + 468, + 650 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.2. Latent mapper guided generator training", + "text_level": 1, + "bbox": [ + 76, + 657, + 431, + 672 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "After training the latent mapper $F$ , we conduct the second stage: training the target-domain generator $G_{\\mathrm{t}}$ as shown in Fig.3 (Stage 2). In specific, we plug in the trained latent mapper, and train $G_{\\mathrm{t}}$ with an improved Directional CLIP Loss $\\mathcal{L}_{\\mathrm{adapt}}$ . Its main difference with [8] is using the image-specific prompt vectors that are produced on-the-fly by $F$ instead of the fixed ones of manually designed prompts. Formally, given a latent code $w^{i}$ , we calculate the direction of the $i^{\\mathrm{th}}$ source and target image pair as below:", + "bbox": [ + 75, + 680, + 468, + 815 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\Delta \\mathrm {I} _ {i} = \\operatorname {N o r m} \\left(E _ {\\mathrm {I}} \\left(G _ {\\mathrm {t}} \\left(w ^ {i}\\right)\\right) - \\operatorname {N o r m} \\left(E _ {\\mathrm {I}} \\left(G _ {\\mathrm {s}} \\left(w ^ {i}\\right)\\right), \\right. \\right. \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 99, + 821, + 468, + 840 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\mathrm{Norm}(\\cdot)$ represents $L_{2}$ normalization. The image-specific adaptation direction is calculated as below:", + "bbox": [ + 76, + 847, + 468, + 876 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\Delta \\mathrm {T} _ {i} = \\operatorname {N o r m} \\left(E _ {\\mathrm {T}} \\left(\\mathrm {M} _ {\\mathrm {t}} ^ {i}\\right)\\right) - \\operatorname {N o r m} \\left(E _ {\\mathrm {T}} \\left(\\mathrm {M} _ {\\mathrm {s}} ^ {i}\\right)\\right). \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 122, + 883, + 468, + 902 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The improved Directional CLIP Loss $\\mathcal{L}_{\\mathrm{adapt}}$ is:", + "bbox": [ + 500, + 90, + 815, + 107 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {a d a p t}} = \\mathbb {E} _ {w ^ {i} \\in \\mathcal {W}} \\sum_ {i = 1} ^ {n} \\left(1 - \\frac {\\Delta \\mathrm {I} _ {i} \\cdot \\Delta \\mathrm {T} _ {i}}{| \\Delta \\mathrm {I} _ {i} | | \\Delta \\mathrm {T} _ {i} |}\\right), \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 560, + 117, + 890, + 156 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $n$ is the batch size of latent codes. $\\mathcal{L}_{\\mathrm{adapt}}$ constrains the direction of each image pair $\\Delta \\mathrm{I}_i$ with an image-specific adaptation direction $\\Delta \\mathrm{T}_i$ .", + "bbox": [ + 498, + 165, + 890, + 210 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 500, + 224, + 633, + 239 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In this section, we evaluate our method qualitatively and quantitatively. The experimental setup is firstly presented in Sec. 4.1. Then we show image synthesis results across various domains in Sec. 4.2. Utilizing a GAN inversion model and diffusion models, results of real-world image translation are provided in Sec. 4.3. Finally, we carefully conduct ablation studies on prompt designing schemes and loss term ratios in Sec. 4.4.", + "bbox": [ + 498, + 250, + 890, + 368 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Experimental setup", + "text_level": 1, + "bbox": [ + 500, + 378, + 687, + 395 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Baselines and settings. Two strong methods are chosen as our competitors. For zero-shot image synthesis, NADA [8] is the state-of-the-art method. Following NADA [8], we adapt the pre-trained StyleGANv2 [21] generators on (i) Flickr-Faces-HQ (FFHQ) [8] and (ii) Animal FacesHQ (AFHQ) [3], utilize the same pre-trained CLIP [36] built on ViT-B/32 [6]. For zero-shot real-world image translation, we utilize Restyle [1] with e4e [44] encoder to invert a real image into the latent space $\\mathcal{W}$ for StyleGANs. DiffusionCLIP (Diff-CLIP for short) [22] is the state-of-the-art method. We follow the setting of [22] except replacing denoising diffusion implicit models (DDIM) [42] with diffusion autoencoders [35]. The training process includes 300 iterations for prompt learning and 300 iterations for generator adaptation using a single NVIDIA RTX 3090 GPU. The batch size is set to 32 for prompt learning and 2 for generator adaptation. The number of learned prompt vectors $m$ is set to 4. For each domain, the ratio parameter $\\lambda$ in Eq.(6) is selected among [1, 10], according to the best Inception Score [38] of adapted generators. The whole training process requires about $10\\sim 20$ minutes. More implementation details can be seen in supplementary materials.", + "bbox": [ + 498, + 402, + 890, + 733 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Evaluation metrics. The ideal generated images should have: 1) high quality and diversity, 2) correct target-domain style, and 3) necessary source-domain information preservation (e.g., structure or identity). For a comprehensive evaluation, we utilize the popular Inception Score (IS) [38] to evaluate the image quality and diversity, the Single Image Fréchet Inception Distance (SIFID) [39] to evaluate the target-domain style, the Structural Consistency Score (SCS) [49] to evaluate the structure preservation, the identity similarity (ID) [5, 12] to evaluate the identity preservation. More details can be seen in supplementary materials.", + "bbox": [ + 496, + 734, + 890, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "11498", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/29fb6b7f34be5303901b7120fd27c50add553887e1983dc4e092eba95845498f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 89, + 92, + 346, + 309 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/08418b9a6f0b65e8dec972b97a56e626c27bc7f5e4dc21961dfc895d8a67c363.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 352, + 93, + 607, + 308 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/d74e7911721bc05f5b888638160c73bd8faa6ecd3bda79ab6cc2324278edb17d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 614, + 93, + 870, + 309 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/2dc87c38b88cb36faeb6cd7945f5e3b66de331822eba924bd546c0e624226e44.jpg", + "image_caption": [ + "Figure 4. Image synthesis comparison results. For FFHQ [21], the source domain is \"Human\" and the target domains are \"Pixar character\", \"Tolkien elf\", and \"Werewolf\". For AFHQ-Dog [3], the source domain is \"Photo\" and the target domains are \"Cartoon\", \"Pointillism\", and \"Cubism\". The yellow box areas show the mode collapse problem of NADA [8]." + ], + "image_footnote": [], + "bbox": [ + 89, + 321, + 344, + 523 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/d6df7d3195dc9f738872649e3d2812238e7b48f155c54a226759d7ccd9a05f95.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 352, + 321, + 609, + 523 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/ed555e54f9be07325dd42f45c54854a4745e971370d6a0517d61066e98e8ce7a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 614, + 321, + 870, + 523 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/697dd8ceb06d0b29c8fb6f2e43ffa527abdeb6f8c975d2daf907724941da19c6.jpg", + "table_caption": [ + "Table 1. Quantitative evaluation results. US denotes user study. The best results are bold." + ], + "table_footnote": [], + "table_body": "
DatasetSource→TargetIS [38] (↑)SCS [49] (↑)ID [5,12] (↑)SIFID [39] (↓)US (↑)
NADAIPLNADAIPLNADAIPLNADAIPL
R1R2R3R1R2R3
FFHQ [8]Photo→Disney2.7213.0890.4070.4480.7820.8012.7763.1363.6702.5172.9303.49782.6%
Photo→Anime painting2.4503.0510.3240.5180.6660.7762.9561.8111.2422.8451.5951.02179.3%
Photo→Wall painting2.1832.6760.4390.4870.5940.6371.9441.2201.3311.9301.1831.27480.9%
Photo→Ukiyo-e2.2052.9740.4200.5060.7750.6321.9541.9901.3261.1651.2550.87885.9%
Human→Pixar character2.7032.7850.3790.4610.7570.8530.7930.9320.8650.6380.8211.09286.7%
Human→Tolkien elf2.4792.7780.4160.4910.7110.7720.6321.4951.4520.6900.6370.70176.8%
Human→Werewolf2.6192.8090.3990.4170.6420.7471.9691.8461.9671.7341.6881.91172.7%
AFHQ [3]Photo→Cartoon6.5058.6580.4070.5630.9250.9412.7082.6723.8702.5172.4773.27887.6%
Photo→Pointillism5.4196.9130.2240.5420.7750.8817.0815.2887.1424.8183.0894.07478.5%
Photo→Cubism4.1656.4500.3860.4630.9340.9432.7792.9383.1992.4312.9562.28474.3%
", + "bbox": [ + 78, + 597, + 897, + 765 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Generative model adaptation", + "text_level": 1, + "bbox": [ + 76, + 777, + 338, + 792 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Qualitative comparison. In addition to Fig.1, we conduct extensive experiments across a wide range of domains as shown in Fig.4. All results indicate that our proposed approach outperforms NADA consistently. The yellow box areas in the figures denote the main different features between NADA and our IPL. From the quality of the gener", + "bbox": [ + 75, + 810, + 470, + 902 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "ated images, the results of NADA have more incorrect features and noise, such as green mussy noise on hairs (Tolkien elf), ruined noses (Werewolf) and unshaped necks (Pointillism), while the results of IPL are more clear and correct. From the mode collapse perspective of the generated images, NADA is prone to collapse to some similar facial features for different images, such as depressed emotions (Pixar character), folded ears (Cartoon) and blue noses (Cu", + "bbox": [ + 496, + 777, + 890, + 898 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "11499", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "\"Photo\" $\\rightarrow$ \"Wall painting\"", + "bbox": [ + 411, + 92, + 557, + 104 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/0268edaa23f642103bd1f705e64ee50c57d5519fc6cbf5a435497c24df1f5acd.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 84, + 104, + 173, + 238 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/70ae5f6ce32ee7b7a22ca816cecd35cef0f9b1fd65ad245e20525bb713fea231.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 176, + 104, + 264, + 238 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/269fc30244248f9c7014d3a679e18bd2ad6f633c4ebe0d4a4847ebd42467e94a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 264, + 104, + 349, + 238 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/dadb6e2a11a8a7cd0a9aeca2c8fa601789afb8b890b353e11e32e2601826a1c5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 349, + 104, + 437, + 238 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/4687422010a3419ad8aa9635ccde0f89c1a9bdfe3517bd6651caa42d74798c4b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 437, + 104, + 527, + 238 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/fabe97722fa861eaa5d6afd08724ae22e7fb11443d4695cd3bb126ae86b5446d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 527, + 104, + 614, + 238 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/e7af42432eb1da68e9e7d6b1894a86454d2c738c0ec1b4d399de583cf7332101.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 617, + 104, + 705, + 238 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/79042054a2ae262e18ffe0fc1d954021da116f3308a780ba49b49623567cc96f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 705, + 104, + 792, + 238 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/cd443fa96b2fde1eed83d4bfb736c84c63d44686ba3fa96b3cd7f41101a7129f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 792, + 104, + 877, + 238 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "\"Human\" $\\rightarrow$ \"Tolkien elf\"", + "bbox": [ + 415, + 239, + 552, + 250 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/25c38f9cca72c89ddbb5ffad71a66b9291ff39396201b148a82ba074a5b641ee.jpg", + "image_caption": [ + "Real Images", + "Figure 5. Real-world image translation comparison results. Baselines are NADA [8], Diff-CLIP [22] and Diff-CLIP+ (an improved version of Diff-CLIP). Recon1, Recon2 and Recon3 refer to inversion results via Restyle [1], DDIM and diffusion autoencoders, respectively. GAN-IPL and Diff-IPL denote integrating IPL with NADA and Diff-CLIP+, respectively. Real images are from CelebA-HQ dataset [29] and translated into two styles of images, \"Wall painting\" and \"Tolkien elf\". The yellow boxes show the key observation areas." + ], + "image_footnote": [], + "bbox": [ + 84, + 250, + 173, + 383 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/3554ec76e38e70f52b8788d4d92390f422f2f746cc296883976998f13ded151b.jpg", + "image_caption": [ + "Recon1" + ], + "image_footnote": [], + "bbox": [ + 176, + 250, + 264, + 383 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/1a3fcd2901d98d2b5a9334bce46342c1e92c6ca58ebd207841b67e6cb52b34b3.jpg", + "image_caption": [ + "NADA" + ], + "image_footnote": [], + "bbox": [ + 264, + 250, + 349, + 383 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/f5b419a0a98122ff4b831e60890433270dd62a07fed5fc62bae54df243de7bbb.jpg", + "image_caption": [ + "GAN-IPL" + ], + "image_footnote": [], + "bbox": [ + 349, + 250, + 437, + 383 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/fd02d4ef7fcca9b1bff27811a8aec50c12890e34cd577549341714e42c83056a.jpg", + "image_caption": [ + "Recon2" + ], + "image_footnote": [], + "bbox": [ + 439, + 250, + 527, + 383 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/520fd4a0ab9340aa229fd044e9cfd1ee7b585aab42909b89c04c0361f58f73bc.jpg", + "image_caption": [ + "Diff-CLIP" + ], + "image_footnote": [], + "bbox": [ + 527, + 250, + 614, + 383 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/37386bea78b0c8edf5a9d57d86167dae92e5f7e3622d12f72063ae790a8bbaf6.jpg", + "image_caption": [ + "Recon3" + ], + "image_footnote": [], + "bbox": [ + 617, + 250, + 705, + 383 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/716b93708d5f65e373def0309008655580349ad226402db625a51cac6be3c994.jpg", + "image_caption": [ + "Diff-CLIP+" + ], + "image_footnote": [], + "bbox": [ + 705, + 250, + 792, + 383 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/78f9acc720855cb593279202034e32f89e677c92a2db8a81fdba192bdde14f2a.jpg", + "image_caption": [ + "Diff-IPL" + ], + "image_footnote": [], + "bbox": [ + 792, + 250, + 877, + 383 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "bism), while IPL presents consistently higher diversity and solve the mode collapse issue well. Our advantages mainly come from the fact that the latent mapper preserves sufficient image-specific and target-domain friendly features from the source-domain images. The produced prompt vectors provide more precise and diversified adaptation directions for the target-domain generator adaptation.", + "bbox": [ + 75, + 462, + 468, + 568 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Quantitative comparison. To quantify the performance improvement of IPL compared to NADA [8], IS, SCS, ID and SIFID are evaluated. As reported in Tab.1, for IS, IPL outperforms NADA on all 10 settings, indicating our method achieves better image quality and diversity. For SCS and ID, IPL outperforms NADA on most of the 10 settings except \"Human $\\rightarrow$ Ukiyo-e\". It is mainly because that \"Ukiyo-e\" naturally favors humans with narrow eyes and pale skin, which encourages identity changes during training. For SIFID, we collect 3 reference images $(\\mathbb{R}_1,\\mathbb{R}_2,$ and $\\mathbb{R}_3)$ on the internet for each target domain. Tab.1 shows that IPL outperforms NADA in most cases, indicating our superiority in generating precise target-domain styles.", + "bbox": [ + 75, + 568, + 468, + 763 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "User studies. For each target domain, 32 images generated by NADA and our method are provided to human observers, together with their corresponding source images and textual labels of target domains. Human observers are required to choose better synthesized images which are semantically more consistent with the target domain labels and preserve the useful source-domain information better. We collect 1210 responses from 121 people using a survey platform. As reported in the last column of Tab.1, $80.5\\%$ of", + "bbox": [ + 75, + 765, + 468, + 900 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "users prefer our approach to NADA on average.", + "bbox": [ + 500, + 463, + 813, + 478 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3. Real-world image translation", + "text_level": 1, + "bbox": [ + 500, + 484, + 759, + 500 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "This task first inverts a real-world image to the latent code by a pre-trained inversion model and then feeds it to the trained target-domain generator to get the translated target-domain image. For GAN-based generators, we compare our method (GAN-IPL) with NADA by connecting the inversion model Restyle [1]. For diffusion model generators, we compare our method (Diff-IPL) with Diff-CLIP [22] and Diff-CLIP+ which is an improved version of Diff-CLIP [22] by replacing the original DDIM [42] with a diffusion autoencoder [35]. For these diffusion models, a deterministic inversion process is naturally provided.", + "bbox": [ + 496, + 508, + 890, + 674 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "As shown in Fig.5, comparing the results of NADA and GAN-IPL, IPL's superiority of alleviating mode collapse over NADA can still be observed. Comparing the results of Recon1, Recon2 and Recon3, diffusion models (Recon2 and Recon3) consistently perform better identity preservation than Restyle (Recon1) for real image inversion, especially for some uncommon stuffs in a human face photo, e.g., the hats, hands and tattoos in Fig.5. However, this property is not well inherited in the target domain generators with a fixed adaptation direction (see the results of Diff-CLIP and Diff-CLIP+). Our proposed IPL could help preserve the details in source images better and present the target-domain styles correctly (see the results of Diff-IPL). Quantitative evaluation results of Diff-CLIP, Diff-CLIP+ and Diff-IPL can be seen in supplementary materials.", + "bbox": [ + 496, + 674, + 890, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "11500", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/32128f89966999a701937ed1a444c461eece8cf86d9b70eedbb719c0633a1b18.jpg", + "image_caption": [ + "Figure 6. Ablation results of prompt designing schemes." + ], + "image_footnote": [], + "bbox": [ + 86, + 93, + 454, + 364 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.4. Ablation studies", + "text_level": 1, + "bbox": [ + 76, + 393, + 238, + 407 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Prompt designing schemes. We investigate four different prompt designing schemes: 1) manually fixed prompts (NADA), 2) learned fixed prompts, 3) random prompts and 4) adaptive prompts (Ours). Manually fixed prompts mean simply utilizing the manually designed prompts as NADA [8]. Learned fixed prompts denote unified prompt vectors produced by common prompt learning strategy [58] and shared for all images. Random prompts refer to prompt vectors produced by a randomly initialized latent mapper. Adaptive prompts denote the learned image-specific prompt vectors produced by our IPL method.", + "bbox": [ + 75, + 417, + 468, + 582 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "As illustrated in Fig.6, synthesized images with manually fixed prompts and learned fixed prompts show some similar mode collapse issues, e.g., blue eyebrows (Ukiyo-e) and depressed emotions (Pixar character). They both produce a fixed adaptation direction, which leads to identical supervision signals for all image pairs. Synthesized images with random prompts present more photo-realistic results but lack the desired target-domain style. A possible reason is that the random prompts contain some features conflicting with the target domain and impede the learning of the target domain style. Our adaptive prompts perform best since the prompts contain more image-specific and target-domain friendly features from the source-domain images.", + "bbox": [ + 75, + 584, + 468, + 779 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Loss term ratios. We compare different values of the ratio parameter $\\lambda$ in Eq.(6), which is used to adjust the intensity of the domain regularization loss. Visual results are shown in Fig.7. In specific, when we set $\\lambda$ to a small value ( $\\lambda = 0$ as an extreme case), there is almost no constraint from the target domain. The learned prompts would excessively preserve the source-domain features. Thus the synthesized images are similar to their corresponding source", + "bbox": [ + 75, + 780, + 470, + 901 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/047ec3af94a9ec2da033b34d28cf97a94320a17fb212f632782bbafa48bb1f72.jpg", + "image_caption": [ + "Figure 7. Ablation results of loss term ratios." + ], + "image_footnote": [], + "bbox": [ + 511, + 93, + 883, + 353 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "images. In contrast, if $\\lambda$ is set to a large value ( $\\lambda = 20$ as an example), a strong target-domain constraint will limit the diversity of the learned prompts. As a result, the synthesized images would slightly show some similar undesired patterns as images generated via fixed prompts. Therefore, in practical applications, $\\lambda$ should be a trade-off value (i.e., between 1 and 10).", + "bbox": [ + 496, + 397, + 890, + 502 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 513, + 617, + 527 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this paper, we have proposed a novel zero-shot generative model adaptation approach called Image-specific Prompt Learning (IPL). In specific, we build a projection from latent codes to image-specific sets of prompt vectors via a latent mapper. With a contrastive learning scheme and a domain regularization constraint, the learned prompt vectors represent image-specific but target-domain-friendly features, producing more precise and diversified adaptation directions for target domain generator training. Compared with the state-of-the-art approaches, IPL consistently improves the quality of synthesized images and alleviates the mode collapse issue. Furthermore, IPL is independent of the type of generator and works well with both GANs and diffusion models, which exhibits good universality and adaptability. In the future, we will try to apply the proposed image-specific prompt learning strategy in other downstream tasks, such as unsupervised image captioning.", + "bbox": [ + 496, + 535, + 890, + 792 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgements", + "text_level": 1, + "bbox": [ + 500, + 803, + 666, + 819 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This work is supported in part by the National Key R&D Program of China (2019YFC1408703), the National Natural Science Foundation of China (62022048, 62276150), Guoqiang Institute of Tsinghua University and Beijing Academy of Artificial Intelligence.", + "bbox": [ + 496, + 825, + 890, + 900 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "11501", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 174, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Yuval Alaluf, Or Patashnik, and Daniel Cohen-Or. ReStyle: A residual-based StyleGAN encoder via iterative refinement. In ICCV, 2021. 5, 7", + "[2] Sergey Bartunov and Dmitry Vetrov. Few-shot generative modelling with generative matching networks. In AISTATS, 2018. 3", + "[3] Yunjey Choi, Youngjung Uh, Jaejun Yoo, and Jung-Woo Ha. StarGAN v2: Diverse image synthesis for multiple domains. In CVPR, 2020. 5, 6", + "[4] Louis Clouatre and Marc Demers. FIGR: Few-shot image generation with reptile. arXiv preprint arXiv:1901.02199, 2019.3", + "[5] Jiankang Deng, Jia Guo, Niannan Xue, and Stefanos Zafeiriou. ArcFace: Additive angular margin loss for deep face recognition. In CVPR, 2019. 5, 6", + "[6] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. In ICLR, 2020. 5", + "[7] Yu Du, Fangyun Wei, Zihe Zhang, Miaojing Shi, Yue Gao, and Guoqi Li. Learning to prompt for open-vocabulary object detection with vision-language model. In CVPR, 2022. 3", + "[8] Rinon Gal, Or Patashnik, Haggai Maron, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. StyleGAN-NADA: CLIP-guided domain adaptation of image generators. In SIGGRAPH, 2022. 2, 3, 4, 5, 6, 7, 8", + "[9] Tianyu Gao, Adam Fisch, and Danqi Chen. Making pre-trained language models better few-shot learners. In ACL/IJCNLP, 2021. 3", + "[10] Chunjiang Ge, Rui Huang, Mixue Xie, Zihang Lai, Shiji Song, Shuang Li, and Gao Huang. Domain adaptation via prompt learning. arXiv preprint arXiv:2202.06687, 2022. 3", + "[11] Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. In NeurIPS, 2014. 1", + "[12] Ju He, Jie-Neng Chen, Shuai Liu, Adam Kortylewski, Cheng Yang, Yutong Bai, and Changhu Wang. TransFG: A transformer architecture for fine-grained recognition. In AAAI, 2022. 5, 6", + "[13] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. In NeurIPS, 2020. 2", + "[14] Phillip Isola, Jun-Yan Zhu, Tinghui Zhou, and Alexei A Efros. Image-to-image translation with conditional adversarial networks. In CVPR, 2017. 2", + "[15] Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. In ICML, 2021. 3", + "[16] Zhengbao Jiang, Frank F Xu, Jun Araki, and Graham Neubig. How can we know what language models know? TACL, 2020. 3" + ], + "bbox": [ + 78, + 114, + 467, + 898 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[17] Chen Ju, Tengda Han, Kunhao Zheng, Ya Zhang, and Weidi Xie. Prompting visual-language models for efficient video understanding. In ECCV, 2021. 3", + "[18] Tero Karras, Miika Aittala, Janne Hellsten, Samuli Laine, Jaakko Lehtinen, and Timo Aila. Training generative adversarial networks with limited data. In NeurIPS, 2020. 2, 3", + "[19] Tero Karras, Miika Aittala, Samuli Laine, Erik Härkönen, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Alias-free generative adversarial networks. In NeurIPS, 2021. 3", + "[20] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In CVPR, 2019. 2, 3", + "[21] Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and improving the image quality of StyleGAN. In CVPR, 2020. 1, 2, 3, 5, 6", + "[22] Gwanghyun Kim, Taesung Kwon, and Jong Chul Ye. DiffusionCLIP: Text-guided diffusion models for robust image manipulation. In CVPR, 2022. 2, 3, 5, 7", + "[23] Brian Lester, Rami Al-Rfou, and Noah Constant. The power of scale for parameter-efficient prompt tuning. In EMNLP, 2021. 3", + "[24] Liunian Harold Li, Pengchuan Zhang, Haotian Zhang, Jianwei Yang, Chunyuan Li, Yiwu Zhong, Lijuan Wang, Lu Yuan, Lei Zhang, Jenq-Neng Hwang, Kai-Wei Chang, and Jianfeng Gao. Grounded language-image pre-training. In CVPR, 2022. 3", + "[25] Xiang Lisa Li and Percy Liang. Prefix-Tuning: Optimizing continuous prompts for generation. In ACL/JCNLP, 2021. 3", + "[26] Weixin Liang, Zixuan Liu, and Can Liu. DAWSON: A domain adaptive few shot generation framework. arXiv preprint arXiv:2001.00576, 2020.3", + "[27] Xihui Liu, Dong Huk Park, Samaneh Azadi, Gong Zhang, Arman Chopikyan, Yuxiao Hu, Humphrey Shi, Anna Rohrbach, and Trevor Darrell. More control for free! image synthesis with semantic diffusion guidance. In WACV, 2023. 2", + "[28] Xiao Liu, Yanan Zheng, Zhengxiao Du, Ming Ding, Yujie Qian, Zhilin Yang, and Jie Tang. GPT understands, too. arXiv preprint arXiv:2103.10385, 2021. 3", + "[29] Ziwei Liu, Ping Luo, Xiaogang Wang, and Xiaou Tang. Deep learning face attributes in the wild. In ICCV, 2015. 7", + "[30] Sangwoo Mo, Minsu Cho, and Jinwoo Shin. Freeze the discriminator: a simple baseline for fine-tuning GANs. In CVPR Workshops, 2020. 2, 3", + "[31] Alexander Quinn Nichol and Prafulla Dhariwal. Improved denoising diffusion probabilistic models. In ICML, 2021. 2", + "[32] Atsuhiro Noguchi and Tatsuya Harada. Image generation from small datasets via batch statistics adaptation. In ICCV, 2019. 3", + "[33] Utkarsh Ojha, Yijun Li, Jingwan Lu, Alexei A Efros, Yong Jae Lee, Eli Shechtman, and Richard Zhang. Few-shot image generation via cross-domain correspondence. In CVPR, 2021. 2, 3", + "[34] Fabio Petroni, Tim Rocktäschel, Patrick Lewis, Anton Bakhtin, Yuxiang Wu, Alexander H Miller, and Sebastian" + ], + "bbox": [ + 501, + 92, + 890, + 898 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "11502", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Riedel. Language models as knowledge bases? In EMNLP-IJCNLP, 2019. 3", + "[35] Konpat Preechakul, Nattanat Chathee, Suttisak Wizadwongsa, and Supasorn Suwajanakorn. Diffusion Autoencoders: Toward a meaningful and decodable representation. In CVPR, 2022. 2, 5, 7", + "[36] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In ICML, 2021. 3, 4, 5", + "[37] Esther Robb, Wen-Sheng Chu, Abhishek Kumar, and Jia-Bin Huang. Few-shot adaptation of generative adversarial networks. arXiv preprint arXiv:2010.11943, 2020. 3", + "[38] Tim Salimans, Ian Goodfellow, Wojciech Zaremba, Vicki Cheung, Alec Radford, and Xi Chen. Improved techniques for training GANs. In NeurIPS, 2016. 5, 6", + "[39] Tamar Rott Shaham, Tali Dekel, and Tomer Michaeli. SinGAN: Learning a generative model from a single natural image. In ICCV, 2019. 5, 6", + "[40] Taylor Shin, Yasaman Razeghi, Robert L Logan IV, Eric Wallace, and Sameer Singh. AutoPrompt: Eliciting knowledge from language models with automatically generated prompts. In EMNLP, 2020. 3", + "[41] Jascha Sohl-Dickstein, Eric Weiss, Niru Maheswaranathan, and Surya Ganguli. Deep unsupervised learning using nonequilibrium thermodynamics. In ICML, 2015. 2", + "[42] Jiaming Song, Chenlin Meng, and Stefano Ermon. Denoising diffusion implicit models. In ICLR, 2020. 2, 3, 5, 7", + "[43] Yang Song and Stefano Ermon. Generative modeling by estimating gradients of the data distribution. NeurIPS, 2019. 2", + "[44] Omer Tov, Yuval Alaluf, Yotam Nitzan, Or Patashnik, and Daniel Cohen-Or. Designing an encoder for StyleGAN image manipulation. TOG, 2021. 5", + "[45] Ngoc-Trung Tran, Viet-Hung Tran, Ngoc-Bao Nguyen, Trung-Kien Nguyen, and Ngai-Man Cheung. On data augmentation for GAN training. TIP, 2021. 3", + "[46] Steven Walton, Ali Hassani, Xingqian Xu, Zhangyang Wang, and Humphrey Shi. StyleNAT: Giving each head a new perspective. arXiv preprint arXiv:2211.05770, 2022. 2", + "[47] Yaxing Wang, Abel Gonzalez-Garcia, David Berga, Luis Herranz, Fahad Shahbaz Khan, and Joost van de Weijer. MineGAN: effective knowledge transfer from GANs to target domains with few images. In CVPR, 2020. 2, 3", + "[48] Yaxing Wang, Chenshen Wu, Luis Herranz, Joost van de Weijer, Abel Gonzalez-Garcia, and Bogdan Raducanu. Transferring GANs: generating images from limited data. In ECCV, 2018. 2, 3", + "[49] Jiayu Xiao, Liang Li, Chaofei Wang, Zheng-Jun Zha, and Qingming Huang. Few shot generative model adaption via relaxed spatial structural alignment. In CVPR, 2022. 2, 3, 5, 6", + "[50] Xingqian Xu, Shant Navasardyan, Vahram Tadevosyan, Andranik Sargsyan, Yadong Mu, and Humphrey Shi. Image completion with heterogeneously filtered spectral hints. In WACV, 2023. 2" + ], + "bbox": [ + 78, + 90, + 468, + 898 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[51] Xingqian Xu, Zhangyang Wang, Eric Zhang, Kai Wang, and Humphrey Shi. Versatile Diffusion: Text, images and variations all in one diffusion model. arXiv preprint arXiv:2211.08332, 2022. 2", + "[52] Han Zhang, Zizhao Zhang, Augustus Odena, and Honglak Lee. Consistency regularization for generative adversarial networks. In ICLR, 2019. 3", + "[53] Shengyu Zhao, Zhijian Liu, Ji Lin, Jun-Yan Zhu, and Song Han. Differentiable augmentation for data-efficient GAN training. In NeurIPS, 2020. 2, 3", + "[54] Zhengli Zhao, Sameer Singh, Honglak Lee, Zizhao Zhang, Augustus Odena, and Han Zhang. Improved consistency regularization for GANs. In AAAI, 2021. 3", + "[55] Zhengli Zhao, Zizhao Zhang, Ting Chen, Sameer Singh, and Han Zhang. Image augmentations for GAN training. arXiv preprint arXiv:2006.02595, 2020. 3", + "[56] Zexuan Zhong, Dan Friedman, and Danqi Chen. Factual probing is [MASK]: Learning vs. learning to recall. In NAACL-HLT, 2021. 3", + "[57] Kaiyang Zhou, Jingkang Yang, Chen Change Loy, and Zwei Liu. Conditional prompt learning for vision-language models. In CVPR, 2022. 3", + "[58] Kaiyang Zhou, Jingkang Yang, Chen Change Loy, and Ziwei Liu. Learning to prompt for vision-language models. IJCV, 2022. 3, 4, 8" + ], + "bbox": [ + 501, + 92, + 890, + 445 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "11503", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2023/Zero-Shot Generative Model Adaptation via Image-Specific Prompt Learning/b1c7981e-30bf-4cd6-b2ff-8dd7951c08ab_model.json b/2023/Zero-Shot Generative Model Adaptation via Image-Specific Prompt Learning/b1c7981e-30bf-4cd6-b2ff-8dd7951c08ab_model.json new file mode 100644 index 0000000000000000000000000000000000000000..79b7a6d51afbfb514d4f25f345840b2c5aaad6e9 --- /dev/null +++ b/2023/Zero-Shot Generative Model Adaptation via Image-Specific Prompt Learning/b1c7981e-30bf-4cd6-b2ff-8dd7951c08ab_model.json @@ -0,0 +1,2288 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.0, + 0.812, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.099, + 0.131, + 0.873, + 0.156 + ], + "angle": 0, + "content": "Zero-shot Generative Model Adaptation via Image-specific Prompt Learning" + }, + { + "type": "text", + "bbox": [ + 0.101, + 0.179, + 0.856, + 0.217 + ], + "angle": 0, + "content": "Jiayi Guo\\(^{1*}\\) Chaofei Wang\\(^{1*}\\) You Wu\\(^{2}\\) Eric Zhang\\(^{3}\\) Kai Wang\\(^{3}\\) Xingqian Xu\\(^{3}\\) Shiji Song\\(^{1}\\) Humphrey Shi\\(^{3,4\\dagger}\\) Gao Huang\\(^{1\\dagger}\\)" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.218, + 0.84, + 0.253 + ], + "angle": 0, + "content": "\\(^{1}\\)Tsinghua University, BNRist \\(^{2}\\)UCAS \\(^{3}\\)SHI Labs @ Oregon & UIUC \\(^{4}\\)Picsart AI Research (PAIR) https://github.com/Picsart-AI-Research/IPL-Zero-Shot-Generative-Model-Adaptation" + }, + { + "type": "image", + "bbox": [ + 0.09, + 0.264, + 0.887, + 0.569 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.575, + 0.893, + 0.632 + ], + "angle": 0, + "content": "Figure 1. The mode collapse issue. For NADA [21] and our method, the same generator pre-trained on the source domain of \"Photo\" is adapted to the unseen target domains of \"Disney\", \"Anime painting\", \"Wall painting\" and \"Ukiyo-e\" only with the domain labels. The images above the dotted line are some examples from the internet. The generated images of NADA exhibit some similar unseen patterns (yellow box areas) which are undesired in terms of quality and diversity. This issue is largely addressed by our method." + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.651, + 0.314, + 0.667 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.684, + 0.471, + 0.866 + ], + "angle": 0, + "content": "Recently, CLIP-guided image synthesis has shown appealing performance on adapting a pre-trained source-domain generator to an unseen target domain. It does not require any target-domain samples but only the textual domain labels. The training is highly efficient, e.g., a few minutes. However, existing methods still have some limitations in the quality of generated images and may suffer from the mode collapse issue. A key reason is that a fixed adaptation direction is applied for all cross-domain image pairs, which leads to identical supervision signals. To address this issue, we propose an Image-specific Prompt Learning (IPL) method, which learns specific prompt vec" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.653, + 0.892, + 0.819 + ], + "angle": 0, + "content": "tors for each source-domain image. This produces a more precise adaptation direction for every cross-domain image pair, endowing the target-domain generator with greatly enhanced flexibility. Qualitative and quantitative evaluations on various domains demonstrate that IPL effectively improves the quality and diversity of synthesized images and alleviates the mode collapse. Moreover, IPL is independent of the structure of the generative model, such as generative adversarial networks or diffusion models. Code is available at https://github.com/Picsart-AI-Research/IPL-Zero-Shot-Generative-Model-Adaptation." + }, + { + "type": "title", + "bbox": [ + 0.501, + 0.847, + 0.631, + 0.862 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.872, + 0.892, + 0.903 + ], + "angle": 0, + "content": "In recent years, image synthesis using generative adversarial networks (GANs) [11] has been rapidly developed." + }, + { + "type": "page_footnote", + "bbox": [ + 0.096, + 0.877, + 0.205, + 0.889 + ], + "angle": 0, + "content": "*Equal contribution." + }, + { + "type": "page_footnote", + "bbox": [ + 0.097, + 0.889, + 0.227, + 0.902 + ], + "angle": 0, + "content": "† Corresponding authors." + }, + { + "type": "list", + "bbox": [ + 0.096, + 0.877, + 0.227, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "11494" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.244 + ], + "angle": 0, + "content": "The state-of-the-art methods can generate images that are hard to be distinguished from real data [14, 20, 21, 46, 50]. However, the GAN-based methods heavily rely on vast quantities of training examples, and adopt a cumbersome adversarial training scheme which generally costs many hours of training time. Unfortunately, in many real-world scenarios, data acquisition is difficult or expensive. For example, in the artistic domains, it is impossible to have artists make thousands of creations. The high training cost is also unacceptable on some embedded devices, e.g., cellphones." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.246, + 0.473, + 0.593 + ], + "angle": 0, + "content": "To address these issues, researchers begin to focus on the generative model adaptation. The goal of this task is to adapt a pre-trained source-domain generator to a target domain with limited data. Many few-shot GAN-based methods are proposed, such as TGAN [48], FreezeD [30], MinGAN [47], ADA [18], DiffAug [53], IDC [33] and RSSA [49], etc. However, these methods still require some training images of the target domain and follow the adversarial training scheme. As a pioneer work, StyleGAN-NADA [8] (NADA for short) proposes a zero-shot adaptation method, which only requires textual domain labels and discards the cumbersome adversarial training scheme by introducing a pre-trained CLIP model. Although efficient, it still has obvious deficiencies, i.e., the limited quality and mode collapse of generated images. As shown in Fig.1, we adapt a pretrained generator of \"Photo\" domain to \"Disney\", \"Anime painting\", \"Wall painting\" and \"Ukiyo-e\" domains. For the results of NADA [8], we notice that the generated images of the same target domain always show some homogeneous patterns which degrade the image quality and diversity, such as deep nasolabial folds in \"Disney\", squinting eyes in \"Anime painting\", red cheeks in \"Wall painting\" and blue eyebrows in \"Ukiyo-e\" (yellow box areas)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.596, + 0.47, + 0.793 + ], + "angle": 0, + "content": "By exploring the factors behind this phenomenon, we find that the key factor is the fixed adaptation direction produced by manually designed prompts. Sharing the direction for all cross-domain image pairs leads to identical supervision signals for the model adaptation. Consider the example, adapting a generator of \"Human\" domain to \"Tolkien elf\" domain as shown in Fig.2. The previous works [8, 22] adopt manually designed prompts (e.g., \"A photo of a\") plus the domain label to produce a fixed adaptation direction, which is shared by all cross-domain image pairs (Fig.2 (a)) in the adaptation process. We argue that the constraint is too restrictive and suppresses the image-specific features, leading to homogeneous generated patterns." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.796, + 0.471, + 0.903 + ], + "angle": 0, + "content": "In this paper, we propose an Image-specific Prompt Learning (IPL) method to address the above issue. The motivation is setting more precise and diversified adaptation directions by customizing more image-specific prompts, for instance \"Asian girl\", \"Curly hair lady\" and \"Elder glass man\" (Fig.2 (b)). These adaptation directions endow the target-domain generator with high flexibility to synthesize" + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.092, + 0.892, + 0.328 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.341, + 0.895, + 0.41 + ], + "angle": 0, + "content": "Figure 2. An illustration of our motivation. The previous methods adopt manual prompts to compute a fixed adaptation direction for all cross-domain image pairs, while our method learns image-specific prompts for producing more precise and diversified adaptation directions." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.423, + 0.892, + 0.71 + ], + "angle": 0, + "content": "more diversified images. The proposed IPL is a two-stage method. In Stage 1, a latent mapper is trained to produce an image-specific set of prompt vectors conditioned on each source-domain image by a contrastive training scheme. The learned prompt vectors contain more specific and diversified features of the source-domain images than the fixed prompt vectors. We further propose a domain regularization loss to ensure that the learned prompt vectors are compatible with the target domain. In Stage 2, we compute more precise and diversified adaptation directions for each cross-domain image pair, and train the target-domain generator with an adaptive directional CLIP loss, which can be viewed as an improved version of the Directional CLIP Loss [8]. As shown in Fig.1, our method alleviates the mode collapse issue well. Extensive experiments across a wide range of domains demonstrate that the proposed IPL effectively improves the quality of synthesized images and overcomes the mode collapse issue. User studies and ablation studies are also conducted to validate the effectiveness of our method." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.71, + 0.892, + 0.817 + ], + "angle": 0, + "content": "It is worth noting that our proposed IPL method is independent of the structure of the generative model, and can be applied to the recent diffusion models [13,27,31,35,41-43, 51]. Thus we also combine IPL with diffusion models and get a more robust and stronger generative capacity, especially on complex images, which shows the high effectiveness and adaptability of our approach." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.83, + 0.642, + 0.847 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.856, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Generative model adaptation. Generative model adaptation is the task of adapting a generative model trained on a large-scale source domain to a data-limited target domain." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "11495" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.289 + ], + "angle": 0, + "content": "According to the size of the training dataset of the target domain, it can be directly divided into two main categories: few-shot generative model adaptation and zero-shot generative model adaptation. For the few-shot generative model adaptation task, the most natural approach is to fine-tune a pre-trained GAN [2, 4, 26, 48]. However, fine-tuning the entire network weights used to result in overfitting. Subsequently, many methods were proposed to alleviate the overfitting issue. They either imposed strong regularization [52, 54], or modified the network parameters with a slight perturbation [30, 32, 37, 47], or preserved some important information by cross-domain alignment [33, 49], or performed data augmentation [45, 53, 55]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.293, + 0.473, + 0.521 + ], + "angle": 0, + "content": "For the zero-shot generative model adaptation task, NADA [8] first proposed to introduce a pre-trained CLIP model for supplying necessary prior knowledge. It only required textual domain labels, and encoded the domain gap as a text-guided adaptation direction in CLIP space. To enhance the identity-preserving capability of real-world image translation, Kim et al. further proposed DiffusionCLIP [22] which utilized diffusion models [42] instead of StyleGANs [18-21] in NADA. Nevertheless, these existing works all adopt a fixed adaptation direction which only contains the basic domain knowledge but no image-specific features. In this paper, we argue that this shared fixed adaptation direction may lead to the mode collapse issue. To produce more accurate and adaptive adaptation directions, we propose to learn diverse and specific prompt vectors for each image." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.524, + 0.473, + 0.903 + ], + "angle": 0, + "content": "Prompt learning. Prompt engineering is first introduced as a knowledge probing approach [34]. Given cloze-style prompts, it induces pre-trained language models to generate the corresponding answers. However, manually designed prompts may be sub-optimal and provide imprecise guidance. To tackle this issue, prompt learning [9, 16, 23, 25, 28, 40, 56] has been widely studied in natural language processing to automatically explore the optimal set of prompts. With the unprecedented development of vision-language models [15, 36] in recent years, researchers begin to apply prompt learning to computer vision tasks [7, 10, 17, 24, 57, 58]. In specific, Zhou et al. [57, 58] first adopted context optimization in image classification tasks by modeling context words with continuous vectors in the word embedding space. Subsequently, many downstream tasks in computer vision were also explored, e.g., object detection [7], visual grounding [24], video understanding [17] and transfer learning [10]. As far as we know, this is the first work to propose an adaptive prompt learning scheme for generative model adaptation. Different from previous prompt learning schemes, our method introduces a latent mapper to learn a specific set of prompt vectors for each image. When training the target-domain generator, the learned image-specific prompt vectors could produce more precise adaptation directions to provide better supervision signals." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.09, + 0.637, + 0.109 + ], + "angle": 0, + "content": "3. Methodology" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.115, + 0.895, + 0.251 + ], + "angle": 0, + "content": "The goal of zero-shot generative model adaptation is to adapt a pre-trained source-domain generator \\( G_{\\mathrm{s}} \\) to an unseen target domain, and get the target-domain generator \\( G_{\\mathrm{t}} \\). The source domain with the domain label \\( \\mathrm{Y_s} \\), e.g., \"Human\", can obtain plentiful high-quality images by \\( G_{\\mathrm{s}} \\). The target domain is described only through the domain label \\( \\mathrm{Y_t} \\), e.g., \"Tolkien elf\", with no images. Following [8, 22], a pre-trained CLIP model [36] including an image encoder \\( E_{\\mathrm{I}} \\) and a text encoder \\( E_{\\mathrm{T}} \\) is introduced." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.252, + 0.895, + 0.614 + ], + "angle": 0, + "content": "We propose a two-stage method named Image-specific Prompt Learning (IPL). Its framework is shown in Fig.3. In Stage 1, a latent mapper \\( F \\) is trained to produce a set of image-specific prompt vectors \\( \\{[\\mathbf{V}]_1^i, [\\mathbf{V}]_2^i, \\dots, [\\mathbf{V}]_m^i\\} \\) for each latent code \\( w^i \\) of a source-domain image. Each prompt vector has the same dimension with word embeddings in CLIP space. The training loss consists of a contrastive learning loss \\( \\mathcal{L}_{\\mathrm{contr}} \\) and a domain regularization loss \\( \\mathcal{L}_{\\mathrm{domain}} \\). The former aims to preserve the image-specific features of each source domain image in the learned prompt vectors. The latter constrains the image-specific features to be suitable to the target domain, which means the learned features should not conflict with the target domain. For example, the features of prompts like \"round ear\" should not be contained in the ideal prompt vectors if the target domain is \"Tolkien elf\". In Stage 2, the trained latent mapper \\( F \\) is plugged into the training process of the target-domain generator \\( G_{\\mathrm{t}} \\), and produces more precise and diversified adaptation directions for cross-domain image pairs. This training stage follows [8] except that learned prompt vectors produced by the latent mapper \\( F \\) replace the fixed prompt vectors. The final textual supervision information includes shared learned prompt vectors and respective embeddings of the original domain labels." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.623, + 0.779, + 0.64 + ], + "angle": 0, + "content": "3.1. Image-specific prompt learning" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.647, + 0.895, + 0.828 + ], + "angle": 0, + "content": "General prompts. The previous methods [8, 22] compute a fixed adaptation direction produced by two embeddings of manually designed prompts, e.g., \"a photo of a human\" and \"a photo of a Tolkien elf\", then constrain the directions of all cross-domain pairs to be parallel with the adaptation direction. In contrast to manually designed prompts, prompt learning [58] aims to find the optimal set of prompt vectors for a domain by directly tuning the embeddings of prompts. Formally, we define a general prompt matrix \\(\\mathrm{M_d}\\) to represent a given domain d. \\(\\mathrm{M_d}\\) consists of the prompt vectors \\([\\mathbf{V}]_1, [\\mathbf{V}]_2, \\dots, [\\mathbf{V}]_m\\) and the embedding of the domain label \\([\\mathrm{Y_d}]\\) as below:" + }, + { + "type": "equation", + "bbox": [ + 0.597, + 0.833, + 0.892, + 0.851 + ], + "angle": 0, + "content": "\\[\n\\mathrm {M} _ {\\mathrm {d}} = [ \\mathbf {V} ] _ {1} [ \\mathbf {V} ] _ {2} \\dots [ \\mathbf {V} ] _ {m} [ \\mathrm {Y} _ {\\mathrm {d}} ], \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.856, + 0.895, + 0.903 + ], + "angle": 0, + "content": "where \\(m\\) is the number of prompts. Suppose the dimension of each embedding is \\(k\\). Then the dimension of \\(\\mathbf{M}_{\\mathrm{d}}\\) should be \\((m + 1) \\times k\\). In [8, 22], the prompt vectors" + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "11496" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.131, + 0.092, + 0.411, + 0.105 + ], + "angle": 0, + "content": "Stage 1: Training latent mapper for prompt learning" + }, + { + "type": "image", + "bbox": [ + 0.129, + 0.106, + 0.825, + 0.293 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.137, + 0.298, + 0.388, + 0.311 + ], + "angle": 0, + "content": "Stage 2: Training generator for image synthesis" + }, + { + "type": "image", + "bbox": [ + 0.154, + 0.311, + 0.753, + 0.44 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.451, + 0.892, + 0.522 + ], + "angle": 0, + "content": "Figure 3. The framework of our method. In Stage 1, a latent mapper \\( F \\) is trained for prompt learning by a contrastive learning loss \\( \\mathcal{L}_{\\mathrm{contr}} \\) and a domain regularization loss \\( \\mathcal{L}_{\\mathrm{domain}} \\). The image encoder \\( E_{\\mathrm{I}} \\) and the text encoder \\( E_{\\mathrm{T}} \\) are from the CLIP model [36]. In Stage 2, the target-domain generator \\( G_{\\mathrm{t}} \\) is trained for image synthesis by the improved Directional CLIP Loss \\( \\mathcal{L}_{\\mathrm{adapt}} \\) in which the adaptive prompts produced by the latent mapper are applied. In two stages, the locked modules are fixed while the unlocked modules are trained. For simplicity, we replace \\( E_{\\mathrm{I}}(G_{\\mathrm{s}}(w^{i})) \\) and \\( E_{\\mathrm{T}}(\\mathrm{M}_{\\mathrm{s}}^{i}) \\) with \\( \\Gamma^i \\) and \\( \\mathrm{T_s}^i \\), respectively." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.54, + 0.47, + 0.616 + ], + "angle": 0, + "content": "\\([\\mathbf{V}]_1, [\\mathbf{V}]_2, \\dots, [\\mathbf{V}]_m\\) are fixed embeddings of manually designed prompts. For prompt learning [58], the prompt vectors are learned by encoding each training image of the domain \\(d\\) with \\(E_{\\mathrm{I}}\\) and the prompt matrix \\(\\mathrm{M_d}\\) with \\(E_{\\mathrm{T}}\\), and then maximizing the cosine similarity between them." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.622, + 0.47, + 0.82 + ], + "angle": 0, + "content": "Inspired by prompt learning, in the zero-shot generative model adaptation task, a natural idea is to learn an optimal set of prompt vectors instead of the manually designed prompts in NADA [8]. Although the adaptation direction calculated by the learned prompt vectors seems to be more reasonable than that of the manually designed prompts, it is still fixed and shared for all cross-domain image pairs. These fixed learned prompt vectors can not solve the mode collapse issue (Experimental validations can be seen in Sec. 4.4). To obtain more flexible and diversified adaptation directions, we further propose to learn a set of image-specific prompt vectors for each image, which can be regarded as an improved version of prompt learning." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.826, + 0.47, + 0.903 + ], + "angle": 0, + "content": "Image-specific prompts. Utilizing the source-domain generator \\( G_{\\mathrm{s}} \\), we train a latent mapper \\( F \\) as shown in Fig.3 (Stage 1). Through the mapper, each image of the source domain can be matched to an optimal set of prompt vectors. Formally, given a latent code \\( w^{i} \\), corresponding to" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.54, + 0.892, + 0.631 + ], + "angle": 0, + "content": "the \\(i^{\\mathrm{th}}\\) image in the source domain, the image-specific set of prompt vectors \\(\\{[\\mathbf{V}]_1^i,[\\mathbf{V}]_2^i,\\dots ,[\\mathbf{V}]_m^i\\}\\) can be obtained by \\(F(w^{i},\\theta)\\), where \\(\\theta\\) denotes the parameters of the latent mapper \\(F\\). Following the definition of the prompt matrix in Eq.(1), we define an image-specific prompt matrix of the \\(i^{\\mathrm{th}}\\) source-domain image as:" + }, + { + "type": "equation", + "bbox": [ + 0.545, + 0.638, + 0.892, + 0.656 + ], + "angle": 0, + "content": "\\[\n\\mathrm {M} _ {\\mathrm {s}} ^ {i} = F \\left(w ^ {i}, \\theta\\right) \\left[ \\mathrm {Y} _ {\\mathrm {s}} \\right] = \\left[ \\mathrm {V} \\right] _ {1} ^ {i} \\left[ \\mathrm {V} \\right] _ {2} ^ {i} \\dots \\left[ \\mathrm {V} \\right] _ {m} ^ {i} \\left[ \\mathrm {Y} _ {\\mathrm {s}} \\right]. \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.662, + 0.891, + 0.691 + ], + "angle": 0, + "content": "In this paper, \\( F \\) is a common four-layer fully-connected network. Next, we show how to train it." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.692, + 0.892, + 0.844 + ], + "angle": 0, + "content": "Contrastive training scheme. Given a batch of latent codes \\(\\{w^1, w^2, \\dots, w^n\\}\\), we can produce a batch of sets of prompt matrices \\(\\{\\mathrm{M_s^1}, \\mathrm{M_s^2}, \\dots, \\mathrm{M_s^n}\\}\\) by \\(F\\) and a batch of images \\(\\{G_{\\mathrm{s}}(w^{1}), G_{\\mathrm{s}}(w^{2}), \\dots, G_{\\mathrm{s}}(w^{n})\\}\\) by \\(G_{\\mathrm{s}}\\). Then \\(n \\times n\\) pairs \\(< G_{\\mathrm{s}}(w^{i}), \\mathrm{M}_{\\mathrm{s}}^{j} >\\), \\(i, j \\in \\{1, 2, \\dots, n\\}\\) have been obtained. Then, we take the pairs of \\(i = j\\) as positive samples, and the pairs of \\(i \\neq j\\) as negative samples for contrastive training. Specifically, we compute the similarity between embeddings of the \\(i^{\\text{th}}\\) image and the \\(j^{\\text{th}}\\) prompt matrix in CLIP space as:" + }, + { + "type": "equation", + "bbox": [ + 0.52, + 0.848, + 0.892, + 0.866 + ], + "angle": 0, + "content": "\\[\n\\operatorname {S i m} _ {i j} = \\operatorname {C o s} \\left(\\operatorname {N o r m} \\left(E _ {\\mathrm {I}} \\left(G _ {\\mathrm {s}} \\left(w ^ {i}\\right)\\right)\\right), \\operatorname {N o r m} \\left(E _ {\\mathrm {T}} \\left(\\mathrm {M} _ {\\mathrm {s}} ^ {j}\\right)\\right)\\right), \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.871, + 0.892, + 0.902 + ], + "angle": 0, + "content": "where \\(\\mathrm{Norm}(\\cdot)\\) and \\(\\mathrm{Cos}(\\cdot)\\) represent \\(L_{2}\\) normalization and the cosine function, respectively. The similarities of pos" + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "11497" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.136 + ], + "angle": 0, + "content": "itive samples are maximized while the similarities of negative samples are minimized. The contrastive loss is expressed as:" + }, + { + "type": "equation", + "bbox": [ + 0.124, + 0.138, + 0.469, + 0.17 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {c o n t r}} = \\mathbb {E} _ {w \\in \\mathcal {W}} \\left(\\sum_ {i \\neq j} \\left(\\operatorname {S i m} _ {i j}\\right) - \\sum_ {i = j} \\left(\\operatorname {S i m} _ {i j}\\right)\\right). \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.172, + 0.473, + 0.459 + ], + "angle": 0, + "content": "Domain regularization loss. For the target domain without any prior knowledge except the domain label \\( \\mathrm{Y_t} \\), we can simply share the learned prompt vectors between the source and target domains following [8]. However, the shared prompt vectors may lead to the risk of generating unrealistic images for the target domain, because some learned prompt vectors may contain strongly relevant features to the source domain, leading to conflict with the target domain. For example, an image of \"Human\" domain is matched to prompt vectors of \"round ear\", but a corresponding image of \"Tolkien elf\" domain should not contain the features of \"round ear\". Sharing these prompt vectors is harmful to the target-domain image generation. Therefore, we further propose a domain regularization loss. Specifically, we constrain the angles between the embeddings of the image-specific prompt matrix \\( \\mathbf{M}_{\\mathrm{t}}^{i} \\) and the target-domain label \\( \\mathrm{Y_t} \\) in CLIP space to be small, to avoid the learned prompt vectors conflicting with the target domain. Formally, the domain regularization loss is described as:" + }, + { + "type": "equation", + "bbox": [ + 0.093, + 0.466, + 0.47, + 0.505 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {d o m a i n}} = - \\mathbb {E} _ {w ^ {i} \\in \\mathcal {W}} \\sum_ {i = 1} ^ {n} \\left(\\operatorname {C o s} \\left(E _ {\\mathrm {T}} \\left(\\mathrm {M} _ {\\mathrm {t}} ^ {i}\\right), E _ {\\mathrm {T}} \\left(\\mathrm {Y} _ {\\mathrm {t}}\\right)\\right)\\right), \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.512, + 0.469, + 0.543 + ], + "angle": 0, + "content": "where \\(\\mathrm{M}_{\\mathrm{t}}^{i}\\) is calculated by Eq.(2) except replacing the domain label, \\(\\mathrm{Cos}(\\cdot)\\) represents the cosine similarity." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.543, + 0.469, + 0.573 + ], + "angle": 0, + "content": "As a summary, the whole training loss function of the latent mapper \\(F\\) is:" + }, + { + "type": "equation", + "bbox": [ + 0.187, + 0.581, + 0.469, + 0.597 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = \\mathcal {L} _ {\\text {c o n s t r}} + \\lambda \\mathcal {L} _ {\\text {d o m a i n}}, \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.604, + 0.469, + 0.651 + ], + "angle": 0, + "content": "where \\(\\lambda\\) is the ratio parameter. Optimized by \\(\\mathcal{L}\\), the learned prompt vectors can not only reflect the features of the source-domain images, but also adapt to the target domain." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.658, + 0.433, + 0.674 + ], + "angle": 0, + "content": "3.2. Latent mapper guided generator training" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.681, + 0.469, + 0.816 + ], + "angle": 0, + "content": "After training the latent mapper \\( F \\), we conduct the second stage: training the target-domain generator \\( G_{\\mathrm{t}} \\) as shown in Fig.3 (Stage 2). In specific, we plug in the trained latent mapper, and train \\( G_{\\mathrm{t}} \\) with an improved Directional CLIP Loss \\( \\mathcal{L}_{\\mathrm{adapt}} \\). Its main difference with [8] is using the image-specific prompt vectors that are produced on-the-fly by \\( F \\) instead of the fixed ones of manually designed prompts. Formally, given a latent code \\( w^{i} \\), we calculate the direction of the \\( i^{\\mathrm{th}} \\) source and target image pair as below:" + }, + { + "type": "equation", + "bbox": [ + 0.1, + 0.823, + 0.469, + 0.841 + ], + "angle": 0, + "content": "\\[\n\\Delta \\mathrm {I} _ {i} = \\operatorname {N o r m} \\left(E _ {\\mathrm {I}} \\left(G _ {\\mathrm {t}} \\left(w ^ {i}\\right)\\right) - \\operatorname {N o r m} \\left(E _ {\\mathrm {I}} \\left(G _ {\\mathrm {s}} \\left(w ^ {i}\\right)\\right), \\right. \\right. \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.848, + 0.469, + 0.877 + ], + "angle": 0, + "content": "where \\(\\mathrm{Norm}(\\cdot)\\) represents \\(L_{2}\\) normalization. The image-specific adaptation direction is calculated as below:" + }, + { + "type": "equation", + "bbox": [ + 0.124, + 0.885, + 0.469, + 0.903 + ], + "angle": 0, + "content": "\\[\n\\Delta \\mathrm {T} _ {i} = \\operatorname {N o r m} \\left(E _ {\\mathrm {T}} \\left(\\mathrm {M} _ {\\mathrm {t}} ^ {i}\\right)\\right) - \\operatorname {N o r m} \\left(E _ {\\mathrm {T}} \\left(\\mathrm {M} _ {\\mathrm {s}} ^ {i}\\right)\\right). \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.092, + 0.816, + 0.108 + ], + "angle": 0, + "content": "The improved Directional CLIP Loss \\(\\mathcal{L}_{\\mathrm{adapt}}\\) is:" + }, + { + "type": "equation", + "bbox": [ + 0.561, + 0.118, + 0.892, + 0.157 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {a d a p t}} = \\mathbb {E} _ {w ^ {i} \\in \\mathcal {W}} \\sum_ {i = 1} ^ {n} \\left(1 - \\frac {\\Delta \\mathrm {I} _ {i} \\cdot \\Delta \\mathrm {T} _ {i}}{| \\Delta \\mathrm {I} _ {i} | | \\Delta \\mathrm {T} _ {i} |}\\right), \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.166, + 0.892, + 0.212 + ], + "angle": 0, + "content": "where \\( n \\) is the batch size of latent codes. \\( \\mathcal{L}_{\\mathrm{adapt}} \\) constrains the direction of each image pair \\( \\Delta \\mathrm{I}_i \\) with an image-specific adaptation direction \\( \\Delta \\mathrm{T}_i \\)." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.225, + 0.634, + 0.241 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.25, + 0.892, + 0.369 + ], + "angle": 0, + "content": "In this section, we evaluate our method qualitatively and quantitatively. The experimental setup is firstly presented in Sec. 4.1. Then we show image synthesis results across various domains in Sec. 4.2. Utilizing a GAN inversion model and diffusion models, results of real-world image translation are provided in Sec. 4.3. Finally, we carefully conduct ablation studies on prompt designing schemes and loss term ratios in Sec. 4.4." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.38, + 0.688, + 0.396 + ], + "angle": 0, + "content": "4.1. Experimental setup" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.403, + 0.892, + 0.734 + ], + "angle": 0, + "content": "Baselines and settings. Two strong methods are chosen as our competitors. For zero-shot image synthesis, NADA [8] is the state-of-the-art method. Following NADA [8], we adapt the pre-trained StyleGANv2 [21] generators on (i) Flickr-Faces-HQ (FFHQ) [8] and (ii) Animal FacesHQ (AFHQ) [3], utilize the same pre-trained CLIP [36] built on ViT-B/32 [6]. For zero-shot real-world image translation, we utilize Restyle [1] with e4e [44] encoder to invert a real image into the latent space \\(\\mathcal{W}\\) for StyleGANs. DiffusionCLIP (Diff-CLIP for short) [22] is the state-of-the-art method. We follow the setting of [22] except replacing denoising diffusion implicit models (DDIM) [42] with diffusion autoencoders [35]. The training process includes 300 iterations for prompt learning and 300 iterations for generator adaptation using a single NVIDIA RTX 3090 GPU. The batch size is set to 32 for prompt learning and 2 for generator adaptation. The number of learned prompt vectors \\(m\\) is set to 4. For each domain, the ratio parameter \\(\\lambda\\) in Eq.(6) is selected among [1, 10], according to the best Inception Score [38] of adapted generators. The whole training process requires about \\(10\\sim 20\\) minutes. More implementation details can be seen in supplementary materials." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.735, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Evaluation metrics. The ideal generated images should have: 1) high quality and diversity, 2) correct target-domain style, and 3) necessary source-domain information preservation (e.g., structure or identity). For a comprehensive evaluation, we utilize the popular Inception Score (IS) [38] to evaluate the image quality and diversity, the Single Image Fréchet Inception Distance (SIFID) [39] to evaluate the target-domain style, the Structural Consistency Score (SCS) [49] to evaluate the structure preservation, the identity similarity (ID) [5, 12] to evaluate the identity preservation. More details can be seen in supplementary materials." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "11498" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.091, + 0.093, + 0.348, + 0.31 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.354, + 0.094, + 0.609, + 0.309 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.615, + 0.094, + 0.871, + 0.31 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.091, + 0.322, + 0.346, + 0.525 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.353, + 0.322, + 0.61, + 0.525 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.616, + 0.322, + 0.871, + 0.525 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.528, + 0.894, + 0.571 + ], + "angle": 0, + "content": "Figure 4. Image synthesis comparison results. For FFHQ [21], the source domain is \"Human\" and the target domains are \"Pixar character\", \"Tolkien elf\", and \"Werewolf\". For AFHQ-Dog [3], the source domain is \"Photo\" and the target domains are \"Cartoon\", \"Pointillism\", and \"Cubism\". The yellow box areas show the mode collapse problem of NADA [8]." + }, + { + "type": "table_caption", + "bbox": [ + 0.219, + 0.578, + 0.748, + 0.592 + ], + "angle": 0, + "content": "Table 1. Quantitative evaluation results. US denotes user study. The best results are bold." + }, + { + "type": "table", + "bbox": [ + 0.079, + 0.598, + 0.898, + 0.766 + ], + "angle": 0, + "content": "
DatasetSource→TargetIS [38] (↑)SCS [49] (↑)ID [5,12] (↑)SIFID [39] (↓)US (↑)
NADAIPLNADAIPLNADAIPLNADAIPL
R1R2R3R1R2R3
FFHQ [8]Photo→Disney2.7213.0890.4070.4480.7820.8012.7763.1363.6702.5172.9303.49782.6%
Photo→Anime painting2.4503.0510.3240.5180.6660.7762.9561.8111.2422.8451.5951.02179.3%
Photo→Wall painting2.1832.6760.4390.4870.5940.6371.9441.2201.3311.9301.1831.27480.9%
Photo→Ukiyo-e2.2052.9740.4200.5060.7750.6321.9541.9901.3261.1651.2550.87885.9%
Human→Pixar character2.7032.7850.3790.4610.7570.8530.7930.9320.8650.6380.8211.09286.7%
Human→Tolkien elf2.4792.7780.4160.4910.7110.7720.6321.4951.4520.6900.6370.70176.8%
Human→Werewolf2.6192.8090.3990.4170.6420.7471.9691.8461.9671.7341.6881.91172.7%
AFHQ [3]Photo→Cartoon6.5058.6580.4070.5630.9250.9412.7082.6723.8702.5172.4773.27887.6%
Photo→Pointillism5.4196.9130.2240.5420.7750.8817.0815.2887.1424.8183.0894.07478.5%
Photo→Cubism4.1656.4500.3860.4630.9340.9432.7792.9383.1992.4312.9562.28474.3%
" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.778, + 0.339, + 0.794 + ], + "angle": 0, + "content": "4.2. Generative model adaptation" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.811, + 0.471, + 0.903 + ], + "angle": 0, + "content": "Qualitative comparison. In addition to Fig.1, we conduct extensive experiments across a wide range of domains as shown in Fig.4. All results indicate that our proposed approach outperforms NADA consistently. The yellow box areas in the figures denote the main different features between NADA and our IPL. From the quality of the gener" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.779, + 0.892, + 0.9 + ], + "angle": 0, + "content": "ated images, the results of NADA have more incorrect features and noise, such as green mussy noise on hairs (Tolkien elf), ruined noses (Werewolf) and unshaped necks (Pointillism), while the results of IPL are more clear and correct. From the mode collapse perspective of the generated images, NADA is prone to collapse to some similar facial features for different images, such as depressed emotions (Pixar character), folded ears (Cartoon) and blue noses (Cu" + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "11499" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.413, + 0.093, + 0.558, + 0.106 + ], + "angle": 0, + "content": "\"Photo\" \\(\\rightarrow\\) \"Wall painting\"" + }, + { + "type": "image", + "bbox": [ + 0.086, + 0.106, + 0.174, + 0.239 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.178, + 0.106, + 0.265, + 0.239 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.265, + 0.106, + 0.351, + 0.239 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.351, + 0.106, + 0.438, + 0.239 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.439, + 0.106, + 0.528, + 0.239 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.528, + 0.106, + 0.616, + 0.239 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.618, + 0.106, + 0.706, + 0.239 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.706, + 0.106, + 0.793, + 0.239 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.793, + 0.106, + 0.879, + 0.239 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.416, + 0.24, + 0.553, + 0.251 + ], + "angle": 0, + "content": "\"Human\" \\(\\rightarrow\\) \"Tolkien elf\"" + }, + { + "type": "image", + "bbox": [ + 0.086, + 0.251, + 0.174, + 0.384 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.097, + 0.385, + 0.162, + 0.395 + ], + "angle": 0, + "content": "Real Images" + }, + { + "type": "image", + "bbox": [ + 0.178, + 0.251, + 0.265, + 0.384 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.202, + 0.385, + 0.241, + 0.394 + ], + "angle": 0, + "content": "Recon1" + }, + { + "type": "image", + "bbox": [ + 0.265, + 0.251, + 0.351, + 0.384 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.29, + 0.385, + 0.327, + 0.394 + ], + "angle": 0, + "content": "NADA" + }, + { + "type": "image", + "bbox": [ + 0.351, + 0.251, + 0.438, + 0.384 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.37, + 0.385, + 0.42, + 0.394 + ], + "angle": 0, + "content": "GAN-IPL" + }, + { + "type": "image", + "bbox": [ + 0.44, + 0.251, + 0.528, + 0.384 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.462, + 0.385, + 0.502, + 0.394 + ], + "angle": 0, + "content": "Recon2" + }, + { + "type": "image", + "bbox": [ + 0.528, + 0.251, + 0.616, + 0.384 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.545, + 0.385, + 0.601, + 0.394 + ], + "angle": 0, + "content": "Diff-CLIP" + }, + { + "type": "image", + "bbox": [ + 0.619, + 0.251, + 0.706, + 0.384 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.642, + 0.385, + 0.682, + 0.394 + ], + "angle": 0, + "content": "Recon3" + }, + { + "type": "image", + "bbox": [ + 0.706, + 0.251, + 0.793, + 0.384 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.72, + 0.385, + 0.781, + 0.394 + ], + "angle": 0, + "content": "Diff-CLIP+" + }, + { + "type": "image", + "bbox": [ + 0.794, + 0.251, + 0.879, + 0.384 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.815, + 0.385, + 0.86, + 0.394 + ], + "angle": 0, + "content": "Diff-IPL" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.398, + 0.892, + 0.455 + ], + "angle": 0, + "content": "Figure 5. Real-world image translation comparison results. Baselines are NADA [8], Diff-CLIP [22] and Diff-CLIP+ (an improved version of Diff-CLIP). Recon1, Recon2 and Recon3 refer to inversion results via Restyle [1], DDIM and diffusion autoencoders, respectively. GAN-IPL and Diff-IPL denote integrating IPL with NADA and Diff-CLIP+, respectively. Real images are from CelebA-HQ dataset [29] and translated into two styles of images, \"Wall painting\" and \"Tolkien elf\". The yellow boxes show the key observation areas." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.463, + 0.469, + 0.569 + ], + "angle": 0, + "content": "bism), while IPL presents consistently higher diversity and solve the mode collapse issue well. Our advantages mainly come from the fact that the latent mapper preserves sufficient image-specific and target-domain friendly features from the source-domain images. The produced prompt vectors provide more precise and diversified adaptation directions for the target-domain generator adaptation." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.569, + 0.469, + 0.765 + ], + "angle": 0, + "content": "Quantitative comparison. To quantify the performance improvement of IPL compared to NADA [8], IS, SCS, ID and SIFID are evaluated. As reported in Tab.1, for IS, IPL outperforms NADA on all 10 settings, indicating our method achieves better image quality and diversity. For SCS and ID, IPL outperforms NADA on most of the 10 settings except \"Human \\(\\rightarrow\\) Ukiyo-e\". It is mainly because that \"Ukiyo-e\" naturally favors humans with narrow eyes and pale skin, which encourages identity changes during training. For SIFID, we collect 3 reference images \\((\\mathbb{R}_1,\\mathbb{R}_2,\\) and \\(\\mathbb{R}_3)\\) on the internet for each target domain. Tab.1 shows that IPL outperforms NADA in most cases, indicating our superiority in generating precise target-domain styles." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.766, + 0.469, + 0.901 + ], + "angle": 0, + "content": "User studies. For each target domain, 32 images generated by NADA and our method are provided to human observers, together with their corresponding source images and textual labels of target domains. Human observers are required to choose better synthesized images which are semantically more consistent with the target domain labels and preserve the useful source-domain information better. We collect 1210 responses from 121 people using a survey platform. As reported in the last column of Tab.1, \\(80.5\\%\\) of" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.464, + 0.815, + 0.479 + ], + "angle": 0, + "content": "users prefer our approach to NADA on average." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.485, + 0.761, + 0.501 + ], + "angle": 0, + "content": "4.3. Real-world image translation" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.509, + 0.892, + 0.675 + ], + "angle": 0, + "content": "This task first inverts a real-world image to the latent code by a pre-trained inversion model and then feeds it to the trained target-domain generator to get the translated target-domain image. For GAN-based generators, we compare our method (GAN-IPL) with NADA by connecting the inversion model Restyle [1]. For diffusion model generators, we compare our method (Diff-IPL) with Diff-CLIP [22] and Diff-CLIP+ which is an improved version of Diff-CLIP [22] by replacing the original DDIM [42] with a diffusion autoencoder [35]. For these diffusion models, a deterministic inversion process is naturally provided." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.675, + 0.892, + 0.901 + ], + "angle": 0, + "content": "As shown in Fig.5, comparing the results of NADA and GAN-IPL, IPL's superiority of alleviating mode collapse over NADA can still be observed. Comparing the results of Recon1, Recon2 and Recon3, diffusion models (Recon2 and Recon3) consistently perform better identity preservation than Restyle (Recon1) for real image inversion, especially for some uncommon stuffs in a human face photo, e.g., the hats, hands and tattoos in Fig.5. However, this property is not well inherited in the target domain generators with a fixed adaptation direction (see the results of Diff-CLIP and Diff-CLIP+). Our proposed IPL could help preserve the details in source images better and present the target-domain styles correctly (see the results of Diff-IPL). Quantitative evaluation results of Diff-CLIP, Diff-CLIP+ and Diff-IPL can be seen in supplementary materials." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "11500" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.088, + 0.094, + 0.455, + 0.366 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.105, + 0.369, + 0.441, + 0.383 + ], + "angle": 0, + "content": "Figure 6. Ablation results of prompt designing schemes." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.395, + 0.239, + 0.409 + ], + "angle": 0, + "content": "4.4. Ablation studies" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.418, + 0.47, + 0.583 + ], + "angle": 0, + "content": "Prompt designing schemes. We investigate four different prompt designing schemes: 1) manually fixed prompts (NADA), 2) learned fixed prompts, 3) random prompts and 4) adaptive prompts (Ours). Manually fixed prompts mean simply utilizing the manually designed prompts as NADA [8]. Learned fixed prompts denote unified prompt vectors produced by common prompt learning strategy [58] and shared for all images. Random prompts refer to prompt vectors produced by a randomly initialized latent mapper. Adaptive prompts denote the learned image-specific prompt vectors produced by our IPL method." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.585, + 0.47, + 0.78 + ], + "angle": 0, + "content": "As illustrated in Fig.6, synthesized images with manually fixed prompts and learned fixed prompts show some similar mode collapse issues, e.g., blue eyebrows (Ukiyo-e) and depressed emotions (Pixar character). They both produce a fixed adaptation direction, which leads to identical supervision signals for all image pairs. Synthesized images with random prompts present more photo-realistic results but lack the desired target-domain style. A possible reason is that the random prompts contain some features conflicting with the target domain and impede the learning of the target domain style. Our adaptive prompts perform best since the prompts contain more image-specific and target-domain friendly features from the source-domain images." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.781, + 0.471, + 0.902 + ], + "angle": 0, + "content": "Loss term ratios. We compare different values of the ratio parameter \\(\\lambda\\) in Eq.(6), which is used to adjust the intensity of the domain regularization loss. Visual results are shown in Fig.7. In specific, when we set \\(\\lambda\\) to a small value (\\(\\lambda = 0\\) as an extreme case), there is almost no constraint from the target domain. The learned prompts would excessively preserve the source-domain features. Thus the synthesized images are similar to their corresponding source" + }, + { + "type": "image", + "bbox": [ + 0.513, + 0.094, + 0.885, + 0.354 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.561, + 0.371, + 0.831, + 0.385 + ], + "angle": 0, + "content": "Figure 7. Ablation results of loss term ratios." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.398, + 0.892, + 0.503 + ], + "angle": 0, + "content": "images. In contrast, if \\(\\lambda\\) is set to a large value (\\(\\lambda = 20\\) as an example), a strong target-domain constraint will limit the diversity of the learned prompts. As a result, the synthesized images would slightly show some similar undesired patterns as images generated via fixed prompts. Therefore, in practical applications, \\(\\lambda\\) should be a trade-off value (i.e., between 1 and 10)." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.514, + 0.619, + 0.529 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.536, + 0.892, + 0.793 + ], + "angle": 0, + "content": "In this paper, we have proposed a novel zero-shot generative model adaptation approach called Image-specific Prompt Learning (IPL). In specific, we build a projection from latent codes to image-specific sets of prompt vectors via a latent mapper. With a contrastive learning scheme and a domain regularization constraint, the learned prompt vectors represent image-specific but target-domain-friendly features, producing more precise and diversified adaptation directions for target domain generator training. Compared with the state-of-the-art approaches, IPL consistently improves the quality of synthesized images and alleviates the mode collapse issue. Furthermore, IPL is independent of the type of generator and works well with both GANs and diffusion models, which exhibits good universality and adaptability. In the future, we will try to apply the proposed image-specific prompt learning strategy in other downstream tasks, such as unsupervised image captioning." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.804, + 0.667, + 0.82 + ], + "angle": 0, + "content": "Acknowledgements" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.826, + 0.892, + 0.901 + ], + "angle": 0, + "content": "This work is supported in part by the National Key R&D Program of China (2019YFC1408703), the National Natural Science Foundation of China (62022048, 62276150), Guoqiang Institute of Tsinghua University and Beijing Academy of Artificial Intelligence." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "11501" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.175, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.116, + 0.468, + 0.156 + ], + "angle": 0, + "content": "[1] Yuval Alaluf, Or Patashnik, and Daniel Cohen-Or. ReStyle: A residual-based StyleGAN encoder via iterative refinement. In ICCV, 2021. 5, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.159, + 0.468, + 0.199 + ], + "angle": 0, + "content": "[2] Sergey Bartunov and Dmitry Vetrov. Few-shot generative modelling with generative matching networks. In AISTATS, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.089, + 0.203, + 0.468, + 0.242 + ], + "angle": 0, + "content": "[3] Yunjey Choi, Youngjung Uh, Jaejun Yoo, and Jung-Woo Ha. StarGAN v2: Diverse image synthesis for multiple domains. In CVPR, 2020. 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.246, + 0.468, + 0.285 + ], + "angle": 0, + "content": "[4] Louis Clouatre and Marc Demers. FIGR: Few-shot image generation with reptile. arXiv preprint arXiv:1901.02199, 2019.3" + }, + { + "type": "ref_text", + "bbox": [ + 0.089, + 0.289, + 0.468, + 0.329 + ], + "angle": 0, + "content": "[5] Jiankang Deng, Jia Guo, Niannan Xue, and Stefanos Zafeiriou. ArcFace: Additive angular margin loss for deep face recognition. In CVPR, 2019. 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.089, + 0.332, + 0.468, + 0.413 + ], + "angle": 0, + "content": "[6] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. In ICLR, 2020. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.089, + 0.417, + 0.468, + 0.469 + ], + "angle": 0, + "content": "[7] Yu Du, Fangyun Wei, Zihe Zhang, Miaojing Shi, Yue Gao, and Guoqi Li. Learning to prompt for open-vocabulary object detection with vision-language model. In CVPR, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.089, + 0.474, + 0.468, + 0.527 + ], + "angle": 0, + "content": "[8] Rinon Gal, Or Patashnik, Haggai Maron, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. StyleGAN-NADA: CLIP-guided domain adaptation of image generators. In SIGGRAPH, 2022. 2, 3, 4, 5, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.531, + 0.468, + 0.57 + ], + "angle": 0, + "content": "[9] Tianyu Gao, Adam Fisch, and Danqi Chen. Making pre-trained language models better few-shot learners. In ACL/IJCNLP, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.574, + 0.468, + 0.614 + ], + "angle": 0, + "content": "[10] Chunjiang Ge, Rui Huang, Mixue Xie, Zihang Lai, Shiji Song, Shuang Li, and Gao Huang. Domain adaptation via prompt learning. arXiv preprint arXiv:2202.06687, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.617, + 0.468, + 0.67 + ], + "angle": 0, + "content": "[11] Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. In NeurIPS, 2014. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.674, + 0.468, + 0.727 + ], + "angle": 0, + "content": "[12] Ju He, Jie-Neng Chen, Shuai Liu, Adam Kortylewski, Cheng Yang, Yutong Bai, and Changhu Wang. TransFG: A transformer architecture for fine-grained recognition. In AAAI, 2022. 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.731, + 0.468, + 0.757 + ], + "angle": 0, + "content": "[13] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. In NeurIPS, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.76, + 0.468, + 0.8 + ], + "angle": 0, + "content": "[14] Phillip Isola, Jun-Yan Zhu, Tinghui Zhou, and Alexei A Efros. Image-to-image translation with conditional adversarial networks. In CVPR, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.803, + 0.468, + 0.857 + ], + "angle": 0, + "content": "[15] Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. In ICML, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.86, + 0.468, + 0.899 + ], + "angle": 0, + "content": "[16] Zhengbao Jiang, Frank F Xu, Jun Araki, and Graham Neubig. How can we know what language models know? TACL, 2020. 3" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.116, + 0.468, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.134 + ], + "angle": 0, + "content": "[17] Chen Ju, Tengda Han, Kunhao Zheng, Ya Zhang, and Weidi Xie. Prompting visual-language models for efficient video understanding. In ECCV, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.137, + 0.892, + 0.175 + ], + "angle": 0, + "content": "[18] Tero Karras, Miika Aittala, Janne Hellsten, Samuli Laine, Jaakko Lehtinen, and Timo Aila. Training generative adversarial networks with limited data. In NeurIPS, 2020. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.179, + 0.892, + 0.219 + ], + "angle": 0, + "content": "[19] Tero Karras, Miika Aittala, Samuli Laine, Erik Härkönen, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Alias-free generative adversarial networks. In NeurIPS, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.222, + 0.892, + 0.261 + ], + "angle": 0, + "content": "[20] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In CVPR, 2019. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.265, + 0.892, + 0.304 + ], + "angle": 0, + "content": "[21] Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and improving the image quality of StyleGAN. In CVPR, 2020. 1, 2, 3, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.307, + 0.892, + 0.347 + ], + "angle": 0, + "content": "[22] Gwanghyun Kim, Taesung Kwon, and Jong Chul Ye. DiffusionCLIP: Text-guided diffusion models for robust image manipulation. In CVPR, 2022. 2, 3, 5, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.35, + 0.892, + 0.388 + ], + "angle": 0, + "content": "[23] Brian Lester, Rami Al-Rfou, and Noah Constant. The power of scale for parameter-efficient prompt tuning. In EMNLP, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.392, + 0.892, + 0.459 + ], + "angle": 0, + "content": "[24] Liunian Harold Li, Pengchuan Zhang, Haotian Zhang, Jianwei Yang, Chunyuan Li, Yiwu Zhong, Lijuan Wang, Lu Yuan, Lei Zhang, Jenq-Neng Hwang, Kai-Wei Chang, and Jianfeng Gao. Grounded language-image pre-training. In CVPR, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.463, + 0.892, + 0.501 + ], + "angle": 0, + "content": "[25] Xiang Lisa Li and Percy Liang. Prefix-Tuning: Optimizing continuous prompts for generation. In ACL/JCNLP, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.505, + 0.892, + 0.545 + ], + "angle": 0, + "content": "[26] Weixin Liang, Zixuan Liu, and Can Liu. DAWSON: A domain adaptive few shot generation framework. arXiv preprint arXiv:2001.00576, 2020.3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.548, + 0.892, + 0.614 + ], + "angle": 0, + "content": "[27] Xihui Liu, Dong Huk Park, Samaneh Azadi, Gong Zhang, Arman Chopikyan, Yuxiao Hu, Humphrey Shi, Anna Rohrbach, and Trevor Darrell. More control for free! image synthesis with semantic diffusion guidance. In WACV, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.618, + 0.892, + 0.658 + ], + "angle": 0, + "content": "[28] Xiao Liu, Yanan Zheng, Zhengxiao Du, Ming Ding, Yujie Qian, Zhilin Yang, and Jie Tang. GPT understands, too. arXiv preprint arXiv:2103.10385, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.661, + 0.892, + 0.699 + ], + "angle": 0, + "content": "[29] Ziwei Liu, Ping Luo, Xiaogang Wang, and Xiaou Tang. Deep learning face attributes in the wild. In ICCV, 2015. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.704, + 0.892, + 0.743 + ], + "angle": 0, + "content": "[30] Sangwoo Mo, Minsu Cho, and Jinwoo Shin. Freeze the discriminator: a simple baseline for fine-tuning GANs. In CVPR Workshops, 2020. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.747, + 0.892, + 0.772 + ], + "angle": 0, + "content": "[31] Alexander Quinn Nichol and Prafulla Dhariwal. Improved denoising diffusion probabilistic models. In ICML, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.776, + 0.892, + 0.814 + ], + "angle": 0, + "content": "[32] Atsuhiro Noguchi and Tatsuya Harada. Image generation from small datasets via batch statistics adaptation. In ICCV, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.818, + 0.892, + 0.87 + ], + "angle": 0, + "content": "[33] Utkarsh Ojha, Yijun Li, Jingwan Lu, Alexei A Efros, Yong Jae Lee, Eli Shechtman, and Richard Zhang. Few-shot image generation via cross-domain correspondence. In CVPR, 2021. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.874, + 0.892, + 0.9 + ], + "angle": 0, + "content": "[34] Fabio Petroni, Tim Rocktäschel, Patrick Lewis, Anton Bakhtin, Yuxiang Wu, Alexander H Miller, and Sebastian" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "11502" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.11, + 0.092, + 0.468, + 0.119 + ], + "angle": 0, + "content": "Riedel. Language models as knowledge bases? In EMNLP-IJCNLP, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.121, + 0.469, + 0.175 + ], + "angle": 0, + "content": "[35] Konpat Preechakul, Nattanat Chathee, Suttisak Wizadwongsa, and Supasorn Suwajanakorn. Diffusion Autoencoders: Toward a meaningful and decodable representation. In CVPR, 2022. 2, 5, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.177, + 0.469, + 0.258 + ], + "angle": 0, + "content": "[36] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In ICML, 2021. 3, 4, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.26, + 0.469, + 0.3 + ], + "angle": 0, + "content": "[37] Esther Robb, Wen-Sheng Chu, Abhishek Kumar, and Jia-Bin Huang. Few-shot adaptation of generative adversarial networks. arXiv preprint arXiv:2010.11943, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.302, + 0.469, + 0.342 + ], + "angle": 0, + "content": "[38] Tim Salimans, Ian Goodfellow, Wojciech Zaremba, Vicki Cheung, Alec Radford, and Xi Chen. Improved techniques for training GANs. In NeurIPS, 2016. 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.344, + 0.469, + 0.384 + ], + "angle": 0, + "content": "[39] Tamar Rott Shaham, Tali Dekel, and Tomer Michaeli. SinGAN: Learning a generative model from a single natural image. In ICCV, 2019. 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.386, + 0.469, + 0.44 + ], + "angle": 0, + "content": "[40] Taylor Shin, Yasaman Razeghi, Robert L Logan IV, Eric Wallace, and Sameer Singh. AutoPrompt: Eliciting knowledge from language models with automatically generated prompts. In EMNLP, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.442, + 0.469, + 0.482 + ], + "angle": 0, + "content": "[41] Jascha Sohl-Dickstein, Eric Weiss, Niru Maheswaranathan, and Surya Ganguli. Deep unsupervised learning using nonequilibrium thermodynamics. In ICML, 2015. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.484, + 0.469, + 0.51 + ], + "angle": 0, + "content": "[42] Jiaming Song, Chenlin Meng, and Stefano Ermon. Denoising diffusion implicit models. In ICLR, 2020. 2, 3, 5, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.512, + 0.469, + 0.55 + ], + "angle": 0, + "content": "[43] Yang Song and Stefano Ermon. Generative modeling by estimating gradients of the data distribution. NeurIPS, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.553, + 0.469, + 0.593 + ], + "angle": 0, + "content": "[44] Omer Tov, Yuval Alaluf, Yotam Nitzan, Or Patashnik, and Daniel Cohen-Or. Designing an encoder for StyleGAN image manipulation. TOG, 2021. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.596, + 0.469, + 0.636 + ], + "angle": 0, + "content": "[45] Ngoc-Trung Tran, Viet-Hung Tran, Ngoc-Bao Nguyen, Trung-Kien Nguyen, and Ngai-Man Cheung. On data augmentation for GAN training. TIP, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.638, + 0.469, + 0.677 + ], + "angle": 0, + "content": "[46] Steven Walton, Ali Hassani, Xingqian Xu, Zhangyang Wang, and Humphrey Shi. StyleNAT: Giving each head a new perspective. arXiv preprint arXiv:2211.05770, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.679, + 0.469, + 0.733 + ], + "angle": 0, + "content": "[47] Yaxing Wang, Abel Gonzalez-Garcia, David Berga, Luis Herranz, Fahad Shahbaz Khan, and Joost van de Weijer. MineGAN: effective knowledge transfer from GANs to target domains with few images. In CVPR, 2020. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.735, + 0.469, + 0.788 + ], + "angle": 0, + "content": "[48] Yaxing Wang, Chenshen Wu, Luis Herranz, Joost van de Weijer, Abel Gonzalez-Garcia, and Bogdan Raducanu. Transferring GANs: generating images from limited data. In ECCV, 2018. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.79, + 0.469, + 0.842 + ], + "angle": 0, + "content": "[49] Jiayu Xiao, Liang Li, Chaofei Wang, Zheng-Jun Zha, and Qingming Huang. Few shot generative model adaption via relaxed spatial structural alignment. In CVPR, 2022. 2, 3, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.846, + 0.469, + 0.899 + ], + "angle": 0, + "content": "[50] Xingqian Xu, Shant Navasardyan, Vahram Tadevosyan, Andranik Sargsyan, Yadong Mu, and Humphrey Shi. Image completion with heterogeneously filtered spectral hints. In WACV, 2023. 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.469, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.146 + ], + "angle": 0, + "content": "[51] Xingqian Xu, Zhangyang Wang, Eric Zhang, Kai Wang, and Humphrey Shi. Versatile Diffusion: Text, images and variations all in one diffusion model. arXiv preprint arXiv:2211.08332, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.149, + 0.892, + 0.189 + ], + "angle": 0, + "content": "[52] Han Zhang, Zizhao Zhang, Augustus Odena, and Honglak Lee. Consistency regularization for generative adversarial networks. In ICLR, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.192, + 0.892, + 0.233 + ], + "angle": 0, + "content": "[53] Shengyu Zhao, Zhijian Liu, Ji Lin, Jun-Yan Zhu, and Song Han. Differentiable augmentation for data-efficient GAN training. In NeurIPS, 2020. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.235, + 0.892, + 0.275 + ], + "angle": 0, + "content": "[54] Zhengli Zhao, Sameer Singh, Honglak Lee, Zizhao Zhang, Augustus Odena, and Han Zhang. Improved consistency regularization for GANs. In AAAI, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.278, + 0.892, + 0.318 + ], + "angle": 0, + "content": "[55] Zhengli Zhao, Zizhao Zhang, Ting Chen, Sameer Singh, and Han Zhang. Image augmentations for GAN training. arXiv preprint arXiv:2006.02595, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.321, + 0.892, + 0.36 + ], + "angle": 0, + "content": "[56] Zexuan Zhong, Dan Friedman, and Danqi Chen. Factual probing is [MASK]: Learning vs. learning to recall. In NAACL-HLT, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.363, + 0.892, + 0.403 + ], + "angle": 0, + "content": "[57] Kaiyang Zhou, Jingkang Yang, Chen Change Loy, and Zwei Liu. Conditional prompt learning for vision-language models. In CVPR, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.406, + 0.892, + 0.446 + ], + "angle": 0, + "content": "[58] Kaiyang Zhou, Jingkang Yang, Chen Change Loy, and Ziwei Liu. Learning to prompt for vision-language models. IJCV, 2022. 3, 4, 8" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.446 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.956 + ], + "angle": 0, + "content": "11503" + } + ] +] \ No newline at end of file diff --git a/2023/Zero-Shot Generative Model Adaptation via Image-Specific Prompt Learning/b1c7981e-30bf-4cd6-b2ff-8dd7951c08ab_origin.pdf b/2023/Zero-Shot Generative Model Adaptation via Image-Specific Prompt Learning/b1c7981e-30bf-4cd6-b2ff-8dd7951c08ab_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..08522df196caa7765bdbc59cccc7780d410827a4 --- /dev/null +++ b/2023/Zero-Shot Generative Model Adaptation via Image-Specific Prompt Learning/b1c7981e-30bf-4cd6-b2ff-8dd7951c08ab_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9275c2b0760155974976395f8fcee1bbbd708ca8ad2de5cb5265065460d32a4e +size 4333540 diff --git a/2023/Zero-Shot Generative Model Adaptation via Image-Specific Prompt Learning/full.md b/2023/Zero-Shot Generative Model Adaptation via Image-Specific Prompt Learning/full.md new file mode 100644 index 0000000000000000000000000000000000000000..e1814c12a781a24e2bb43763809a49283a82d6c8 --- /dev/null +++ b/2023/Zero-Shot Generative Model Adaptation via Image-Specific Prompt Learning/full.md @@ -0,0 +1,318 @@ +# Zero-shot Generative Model Adaptation via Image-specific Prompt Learning + +Jiayi Guo $^{1*}$ Chaofei Wang $^{1*}$ You Wu $^{2}$ Eric Zhang $^{3}$ Kai Wang $^{3}$ Xingqian Xu $^{3}$ Shiji Song $^{1}$ Humphrey Shi $^{3,4\dagger}$ Gao Huang $^{1\dagger}$ + +$^{1}$ Tsinghua University, BNRist $^{2}$ UCAS $^{3}$ SHI Labs @ Oregon & UIUC $^{4}$ Picsart AI Research (PAIR) https://github.com/Picsart-AI-Research/IPL-Zero-Shot-Generative-Model-Adaptation + +![](images/d738b2a13791d5c547a3216529ad819d26214393a5987c93bbfd8295911f861c.jpg) +Figure 1. The mode collapse issue. For NADA [21] and our method, the same generator pre-trained on the source domain of "Photo" is adapted to the unseen target domains of "Disney", "Anime painting", "Wall painting" and "Ukiyo-e" only with the domain labels. The images above the dotted line are some examples from the internet. The generated images of NADA exhibit some similar unseen patterns (yellow box areas) which are undesired in terms of quality and diversity. This issue is largely addressed by our method. + +# Abstract + +Recently, CLIP-guided image synthesis has shown appealing performance on adapting a pre-trained source-domain generator to an unseen target domain. It does not require any target-domain samples but only the textual domain labels. The training is highly efficient, e.g., a few minutes. However, existing methods still have some limitations in the quality of generated images and may suffer from the mode collapse issue. A key reason is that a fixed adaptation direction is applied for all cross-domain image pairs, which leads to identical supervision signals. To address this issue, we propose an Image-specific Prompt Learning (IPL) method, which learns specific prompt vec + +tors for each source-domain image. This produces a more precise adaptation direction for every cross-domain image pair, endowing the target-domain generator with greatly enhanced flexibility. Qualitative and quantitative evaluations on various domains demonstrate that IPL effectively improves the quality and diversity of synthesized images and alleviates the mode collapse. Moreover, IPL is independent of the structure of the generative model, such as generative adversarial networks or diffusion models. Code is available at https://github.com/Picsart-AI-Research/IPL-Zero-Shot-Generative-Model-Adaptation. + +# 1. Introduction + +In recent years, image synthesis using generative adversarial networks (GANs) [11] has been rapidly developed. + +The state-of-the-art methods can generate images that are hard to be distinguished from real data [14, 20, 21, 46, 50]. However, the GAN-based methods heavily rely on vast quantities of training examples, and adopt a cumbersome adversarial training scheme which generally costs many hours of training time. Unfortunately, in many real-world scenarios, data acquisition is difficult or expensive. For example, in the artistic domains, it is impossible to have artists make thousands of creations. The high training cost is also unacceptable on some embedded devices, e.g., cellphones. + +To address these issues, researchers begin to focus on the generative model adaptation. The goal of this task is to adapt a pre-trained source-domain generator to a target domain with limited data. Many few-shot GAN-based methods are proposed, such as TGAN [48], FreezeD [30], MinGAN [47], ADA [18], DiffAug [53], IDC [33] and RSSA [49], etc. However, these methods still require some training images of the target domain and follow the adversarial training scheme. As a pioneer work, StyleGAN-NADA [8] (NADA for short) proposes a zero-shot adaptation method, which only requires textual domain labels and discards the cumbersome adversarial training scheme by introducing a pre-trained CLIP model. Although efficient, it still has obvious deficiencies, i.e., the limited quality and mode collapse of generated images. As shown in Fig.1, we adapt a pretrained generator of "Photo" domain to "Disney", "Anime painting", "Wall painting" and "Ukiyo-e" domains. For the results of NADA [8], we notice that the generated images of the same target domain always show some homogeneous patterns which degrade the image quality and diversity, such as deep nasolabial folds in "Disney", squinting eyes in "Anime painting", red cheeks in "Wall painting" and blue eyebrows in "Ukiyo-e" (yellow box areas). + +By exploring the factors behind this phenomenon, we find that the key factor is the fixed adaptation direction produced by manually designed prompts. Sharing the direction for all cross-domain image pairs leads to identical supervision signals for the model adaptation. Consider the example, adapting a generator of "Human" domain to "Tolkien elf" domain as shown in Fig.2. The previous works [8, 22] adopt manually designed prompts (e.g., "A photo of a") plus the domain label to produce a fixed adaptation direction, which is shared by all cross-domain image pairs (Fig.2 (a)) in the adaptation process. We argue that the constraint is too restrictive and suppresses the image-specific features, leading to homogeneous generated patterns. + +In this paper, we propose an Image-specific Prompt Learning (IPL) method to address the above issue. The motivation is setting more precise and diversified adaptation directions by customizing more image-specific prompts, for instance "Asian girl", "Curly hair lady" and "Elder glass man" (Fig.2 (b)). These adaptation directions endow the target-domain generator with high flexibility to synthesize + +![](images/7d3df592624051881c753925364c9354fc57843906aafa0d6691c58435607553.jpg) +Figure 2. An illustration of our motivation. The previous methods adopt manual prompts to compute a fixed adaptation direction for all cross-domain image pairs, while our method learns image-specific prompts for producing more precise and diversified adaptation directions. + +more diversified images. The proposed IPL is a two-stage method. In Stage 1, a latent mapper is trained to produce an image-specific set of prompt vectors conditioned on each source-domain image by a contrastive training scheme. The learned prompt vectors contain more specific and diversified features of the source-domain images than the fixed prompt vectors. We further propose a domain regularization loss to ensure that the learned prompt vectors are compatible with the target domain. In Stage 2, we compute more precise and diversified adaptation directions for each cross-domain image pair, and train the target-domain generator with an adaptive directional CLIP loss, which can be viewed as an improved version of the Directional CLIP Loss [8]. As shown in Fig.1, our method alleviates the mode collapse issue well. Extensive experiments across a wide range of domains demonstrate that the proposed IPL effectively improves the quality of synthesized images and overcomes the mode collapse issue. User studies and ablation studies are also conducted to validate the effectiveness of our method. + +It is worth noting that our proposed IPL method is independent of the structure of the generative model, and can be applied to the recent diffusion models [13,27,31,35,41-43, 51]. Thus we also combine IPL with diffusion models and get a more robust and stronger generative capacity, especially on complex images, which shows the high effectiveness and adaptability of our approach. + +# 2. Related Work + +Generative model adaptation. Generative model adaptation is the task of adapting a generative model trained on a large-scale source domain to a data-limited target domain. + +According to the size of the training dataset of the target domain, it can be directly divided into two main categories: few-shot generative model adaptation and zero-shot generative model adaptation. For the few-shot generative model adaptation task, the most natural approach is to fine-tune a pre-trained GAN [2, 4, 26, 48]. However, fine-tuning the entire network weights used to result in overfitting. Subsequently, many methods were proposed to alleviate the overfitting issue. They either imposed strong regularization [52, 54], or modified the network parameters with a slight perturbation [30, 32, 37, 47], or preserved some important information by cross-domain alignment [33, 49], or performed data augmentation [45, 53, 55]. + +For the zero-shot generative model adaptation task, NADA [8] first proposed to introduce a pre-trained CLIP model for supplying necessary prior knowledge. It only required textual domain labels, and encoded the domain gap as a text-guided adaptation direction in CLIP space. To enhance the identity-preserving capability of real-world image translation, Kim et al. further proposed DiffusionCLIP [22] which utilized diffusion models [42] instead of StyleGANs [18-21] in NADA. Nevertheless, these existing works all adopt a fixed adaptation direction which only contains the basic domain knowledge but no image-specific features. In this paper, we argue that this shared fixed adaptation direction may lead to the mode collapse issue. To produce more accurate and adaptive adaptation directions, we propose to learn diverse and specific prompt vectors for each image. + +Prompt learning. Prompt engineering is first introduced as a knowledge probing approach [34]. Given cloze-style prompts, it induces pre-trained language models to generate the corresponding answers. However, manually designed prompts may be sub-optimal and provide imprecise guidance. To tackle this issue, prompt learning [9, 16, 23, 25, 28, 40, 56] has been widely studied in natural language processing to automatically explore the optimal set of prompts. With the unprecedented development of vision-language models [15, 36] in recent years, researchers begin to apply prompt learning to computer vision tasks [7, 10, 17, 24, 57, 58]. In specific, Zhou et al. [57, 58] first adopted context optimization in image classification tasks by modeling context words with continuous vectors in the word embedding space. Subsequently, many downstream tasks in computer vision were also explored, e.g., object detection [7], visual grounding [24], video understanding [17] and transfer learning [10]. As far as we know, this is the first work to propose an adaptive prompt learning scheme for generative model adaptation. Different from previous prompt learning schemes, our method introduces a latent mapper to learn a specific set of prompt vectors for each image. When training the target-domain generator, the learned image-specific prompt vectors could produce more precise adaptation directions to provide better supervision signals. + +# 3. Methodology + +The goal of zero-shot generative model adaptation is to adapt a pre-trained source-domain generator $G_{\mathrm{s}}$ to an unseen target domain, and get the target-domain generator $G_{\mathrm{t}}$ . The source domain with the domain label $\mathrm{Y_s}$ , e.g., "Human", can obtain plentiful high-quality images by $G_{\mathrm{s}}$ . The target domain is described only through the domain label $\mathrm{Y_t}$ , e.g., "Tolkien elf", with no images. Following [8, 22], a pre-trained CLIP model [36] including an image encoder $E_{\mathrm{I}}$ and a text encoder $E_{\mathrm{T}}$ is introduced. + +We propose a two-stage method named Image-specific Prompt Learning (IPL). Its framework is shown in Fig.3. In Stage 1, a latent mapper $F$ is trained to produce a set of image-specific prompt vectors $\{[\mathbf{V}]_1^i, [\mathbf{V}]_2^i, \dots, [\mathbf{V}]_m^i\}$ for each latent code $w^i$ of a source-domain image. Each prompt vector has the same dimension with word embeddings in CLIP space. The training loss consists of a contrastive learning loss $\mathcal{L}_{\mathrm{contr}}$ and a domain regularization loss $\mathcal{L}_{\mathrm{domain}}$ . The former aims to preserve the image-specific features of each source domain image in the learned prompt vectors. The latter constrains the image-specific features to be suitable to the target domain, which means the learned features should not conflict with the target domain. For example, the features of prompts like "round ear" should not be contained in the ideal prompt vectors if the target domain is "Tolkien elf". In Stage 2, the trained latent mapper $F$ is plugged into the training process of the target-domain generator $G_{\mathrm{t}}$ , and produces more precise and diversified adaptation directions for cross-domain image pairs. This training stage follows [8] except that learned prompt vectors produced by the latent mapper $F$ replace the fixed prompt vectors. The final textual supervision information includes shared learned prompt vectors and respective embeddings of the original domain labels. + +# 3.1. Image-specific prompt learning + +General prompts. The previous methods [8, 22] compute a fixed adaptation direction produced by two embeddings of manually designed prompts, e.g., "a photo of a human" and "a photo of a Tolkien elf", then constrain the directions of all cross-domain pairs to be parallel with the adaptation direction. In contrast to manually designed prompts, prompt learning [58] aims to find the optimal set of prompt vectors for a domain by directly tuning the embeddings of prompts. Formally, we define a general prompt matrix $\mathrm{M_d}$ to represent a given domain d. $\mathrm{M_d}$ consists of the prompt vectors $[\mathbf{V}]_1, [\mathbf{V}]_2, \dots, [\mathbf{V}]_m$ and the embedding of the domain label $[\mathrm{Y_d}]$ as below: + +$$ +\mathrm {M} _ {\mathrm {d}} = [ \mathbf {V} ] _ {1} [ \mathbf {V} ] _ {2} \dots [ \mathbf {V} ] _ {m} [ \mathrm {Y} _ {\mathrm {d}} ], \tag {1} +$$ + +where $m$ is the number of prompts. Suppose the dimension of each embedding is $k$ . Then the dimension of $\mathbf{M}_{\mathrm{d}}$ should be $(m + 1) \times k$ . In [8, 22], the prompt vectors + +![](images/fff8e843236ad10f7f82005b31182998855786026da78fc0b4f6652c2ce03917.jpg) +Stage 1: Training latent mapper for prompt learning + +![](images/8d9c252ea1b985f5bf7a741110cdaaa4f4d7a769b1958bd4678937ff495d9bd4.jpg) +Stage 2: Training generator for image synthesis +Figure 3. The framework of our method. In Stage 1, a latent mapper $F$ is trained for prompt learning by a contrastive learning loss $\mathcal{L}_{\mathrm{contr}}$ and a domain regularization loss $\mathcal{L}_{\mathrm{domain}}$ . The image encoder $E_{\mathrm{I}}$ and the text encoder $E_{\mathrm{T}}$ are from the CLIP model [36]. In Stage 2, the target-domain generator $G_{\mathrm{t}}$ is trained for image synthesis by the improved Directional CLIP Loss $\mathcal{L}_{\mathrm{adapt}}$ in which the adaptive prompts produced by the latent mapper are applied. In two stages, the locked modules are fixed while the unlocked modules are trained. For simplicity, we replace $E_{\mathrm{I}}(G_{\mathrm{s}}(w^{i}))$ and $E_{\mathrm{T}}(\mathrm{M}_{\mathrm{s}}^{i})$ with $\Gamma^i$ and $\mathrm{T_s}^i$ , respectively. + +$[\mathbf{V}]_1, [\mathbf{V}]_2, \dots, [\mathbf{V}]_m$ are fixed embeddings of manually designed prompts. For prompt learning [58], the prompt vectors are learned by encoding each training image of the domain $d$ with $E_{\mathrm{I}}$ and the prompt matrix $\mathrm{M_d}$ with $E_{\mathrm{T}}$ , and then maximizing the cosine similarity between them. + +Inspired by prompt learning, in the zero-shot generative model adaptation task, a natural idea is to learn an optimal set of prompt vectors instead of the manually designed prompts in NADA [8]. Although the adaptation direction calculated by the learned prompt vectors seems to be more reasonable than that of the manually designed prompts, it is still fixed and shared for all cross-domain image pairs. These fixed learned prompt vectors can not solve the mode collapse issue (Experimental validations can be seen in Sec. 4.4). To obtain more flexible and diversified adaptation directions, we further propose to learn a set of image-specific prompt vectors for each image, which can be regarded as an improved version of prompt learning. + +Image-specific prompts. Utilizing the source-domain generator $G_{\mathrm{s}}$ , we train a latent mapper $F$ as shown in Fig.3 (Stage 1). Through the mapper, each image of the source domain can be matched to an optimal set of prompt vectors. Formally, given a latent code $w^{i}$ , corresponding to + +the $i^{\mathrm{th}}$ image in the source domain, the image-specific set of prompt vectors $\{[\mathbf{V}]_1^i,[\mathbf{V}]_2^i,\dots ,[\mathbf{V}]_m^i\}$ can be obtained by $F(w^{i},\theta)$ , where $\theta$ denotes the parameters of the latent mapper $F$ . Following the definition of the prompt matrix in Eq.(1), we define an image-specific prompt matrix of the $i^{\mathrm{th}}$ source-domain image as: + +$$ +\mathrm {M} _ {\mathrm {s}} ^ {i} = F \left(w ^ {i}, \theta\right) \left[ \mathrm {Y} _ {\mathrm {s}} \right] = \left[ \mathrm {V} \right] _ {1} ^ {i} \left[ \mathrm {V} \right] _ {2} ^ {i} \dots \left[ \mathrm {V} \right] _ {m} ^ {i} \left[ \mathrm {Y} _ {\mathrm {s}} \right]. \tag {2} +$$ + +In this paper, $F$ is a common four-layer fully-connected network. Next, we show how to train it. + +Contrastive training scheme. Given a batch of latent codes $\{w^1, w^2, \dots, w^n\}$ , we can produce a batch of sets of prompt matrices $\{\mathrm{M_s^1}, \mathrm{M_s^2}, \dots, \mathrm{M_s^n}\}$ by $F$ and a batch of images $\{G_{\mathrm{s}}(w^{1}), G_{\mathrm{s}}(w^{2}), \dots, G_{\mathrm{s}}(w^{n})\}$ by $G_{\mathrm{s}}$ . Then $n \times n$ pairs $< G_{\mathrm{s}}(w^{i}), \mathrm{M}_{\mathrm{s}}^{j} >$ , $i, j \in \{1, 2, \dots, n\}$ have been obtained. Then, we take the pairs of $i = j$ as positive samples, and the pairs of $i \neq j$ as negative samples for contrastive training. Specifically, we compute the similarity between embeddings of the $i^{\text{th}}$ image and the $j^{\text{th}}$ prompt matrix in CLIP space as: + +$$ +\operatorname {S i m} _ {i j} = \operatorname {C o s} \left(\operatorname {N o r m} \left(E _ {\mathrm {I}} \left(G _ {\mathrm {s}} \left(w ^ {i}\right)\right)\right), \operatorname {N o r m} \left(E _ {\mathrm {T}} \left(\mathrm {M} _ {\mathrm {s}} ^ {j}\right)\right)\right), \tag {3} +$$ + +where $\mathrm{Norm}(\cdot)$ and $\mathrm{Cos}(\cdot)$ represent $L_{2}$ normalization and the cosine function, respectively. The similarities of pos + +itive samples are maximized while the similarities of negative samples are minimized. The contrastive loss is expressed as: + +$$ +\mathcal {L} _ {\text {c o n t r}} = \mathbb {E} _ {w \in \mathcal {W}} \left(\sum_ {i \neq j} \left(\operatorname {S i m} _ {i j}\right) - \sum_ {i = j} \left(\operatorname {S i m} _ {i j}\right)\right). \tag {4} +$$ + +Domain regularization loss. For the target domain without any prior knowledge except the domain label $\mathrm{Y_t}$ , we can simply share the learned prompt vectors between the source and target domains following [8]. However, the shared prompt vectors may lead to the risk of generating unrealistic images for the target domain, because some learned prompt vectors may contain strongly relevant features to the source domain, leading to conflict with the target domain. For example, an image of "Human" domain is matched to prompt vectors of "round ear", but a corresponding image of "Tolkien elf" domain should not contain the features of "round ear". Sharing these prompt vectors is harmful to the target-domain image generation. Therefore, we further propose a domain regularization loss. Specifically, we constrain the angles between the embeddings of the image-specific prompt matrix $\mathbf{M}_{\mathrm{t}}^{i}$ and the target-domain label $\mathrm{Y_t}$ in CLIP space to be small, to avoid the learned prompt vectors conflicting with the target domain. Formally, the domain regularization loss is described as: + +$$ +\mathcal {L} _ {\text {d o m a i n}} = - \mathbb {E} _ {w ^ {i} \in \mathcal {W}} \sum_ {i = 1} ^ {n} \left(\operatorname {C o s} \left(E _ {\mathrm {T}} \left(\mathrm {M} _ {\mathrm {t}} ^ {i}\right), E _ {\mathrm {T}} \left(\mathrm {Y} _ {\mathrm {t}}\right)\right)\right), \tag {5} +$$ + +where $\mathrm{M}_{\mathrm{t}}^{i}$ is calculated by Eq.(2) except replacing the domain label, $\mathrm{Cos}(\cdot)$ represents the cosine similarity. + +As a summary, the whole training loss function of the latent mapper $F$ is: + +$$ +\mathcal {L} = \mathcal {L} _ {\text {c o n s t r}} + \lambda \mathcal {L} _ {\text {d o m a i n}}, \tag {6} +$$ + +where $\lambda$ is the ratio parameter. Optimized by $\mathcal{L}$ , the learned prompt vectors can not only reflect the features of the source-domain images, but also adapt to the target domain. + +# 3.2. Latent mapper guided generator training + +After training the latent mapper $F$ , we conduct the second stage: training the target-domain generator $G_{\mathrm{t}}$ as shown in Fig.3 (Stage 2). In specific, we plug in the trained latent mapper, and train $G_{\mathrm{t}}$ with an improved Directional CLIP Loss $\mathcal{L}_{\mathrm{adapt}}$ . Its main difference with [8] is using the image-specific prompt vectors that are produced on-the-fly by $F$ instead of the fixed ones of manually designed prompts. Formally, given a latent code $w^{i}$ , we calculate the direction of the $i^{\mathrm{th}}$ source and target image pair as below: + +$$ +\Delta \mathrm {I} _ {i} = \operatorname {N o r m} \left(E _ {\mathrm {I}} \left(G _ {\mathrm {t}} \left(w ^ {i}\right)\right) - \operatorname {N o r m} \left(E _ {\mathrm {I}} \left(G _ {\mathrm {s}} \left(w ^ {i}\right)\right), \right. \right. \tag {7} +$$ + +where $\mathrm{Norm}(\cdot)$ represents $L_{2}$ normalization. The image-specific adaptation direction is calculated as below: + +$$ +\Delta \mathrm {T} _ {i} = \operatorname {N o r m} \left(E _ {\mathrm {T}} \left(\mathrm {M} _ {\mathrm {t}} ^ {i}\right)\right) - \operatorname {N o r m} \left(E _ {\mathrm {T}} \left(\mathrm {M} _ {\mathrm {s}} ^ {i}\right)\right). \tag {8} +$$ + +The improved Directional CLIP Loss $\mathcal{L}_{\mathrm{adapt}}$ is: + +$$ +\mathcal {L} _ {\mathrm {a d a p t}} = \mathbb {E} _ {w ^ {i} \in \mathcal {W}} \sum_ {i = 1} ^ {n} \left(1 - \frac {\Delta \mathrm {I} _ {i} \cdot \Delta \mathrm {T} _ {i}}{| \Delta \mathrm {I} _ {i} | | \Delta \mathrm {T} _ {i} |}\right), \tag {9} +$$ + +where $n$ is the batch size of latent codes. $\mathcal{L}_{\mathrm{adapt}}$ constrains the direction of each image pair $\Delta \mathrm{I}_i$ with an image-specific adaptation direction $\Delta \mathrm{T}_i$ . + +# 4. Experiments + +In this section, we evaluate our method qualitatively and quantitatively. The experimental setup is firstly presented in Sec. 4.1. Then we show image synthesis results across various domains in Sec. 4.2. Utilizing a GAN inversion model and diffusion models, results of real-world image translation are provided in Sec. 4.3. Finally, we carefully conduct ablation studies on prompt designing schemes and loss term ratios in Sec. 4.4. + +# 4.1. Experimental setup + +Baselines and settings. Two strong methods are chosen as our competitors. For zero-shot image synthesis, NADA [8] is the state-of-the-art method. Following NADA [8], we adapt the pre-trained StyleGANv2 [21] generators on (i) Flickr-Faces-HQ (FFHQ) [8] and (ii) Animal FacesHQ (AFHQ) [3], utilize the same pre-trained CLIP [36] built on ViT-B/32 [6]. For zero-shot real-world image translation, we utilize Restyle [1] with e4e [44] encoder to invert a real image into the latent space $\mathcal{W}$ for StyleGANs. DiffusionCLIP (Diff-CLIP for short) [22] is the state-of-the-art method. We follow the setting of [22] except replacing denoising diffusion implicit models (DDIM) [42] with diffusion autoencoders [35]. The training process includes 300 iterations for prompt learning and 300 iterations for generator adaptation using a single NVIDIA RTX 3090 GPU. The batch size is set to 32 for prompt learning and 2 for generator adaptation. The number of learned prompt vectors $m$ is set to 4. For each domain, the ratio parameter $\lambda$ in Eq.(6) is selected among [1, 10], according to the best Inception Score [38] of adapted generators. The whole training process requires about $10\sim 20$ minutes. More implementation details can be seen in supplementary materials. + +Evaluation metrics. The ideal generated images should have: 1) high quality and diversity, 2) correct target-domain style, and 3) necessary source-domain information preservation (e.g., structure or identity). For a comprehensive evaluation, we utilize the popular Inception Score (IS) [38] to evaluate the image quality and diversity, the Single Image Fréchet Inception Distance (SIFID) [39] to evaluate the target-domain style, the Structural Consistency Score (SCS) [49] to evaluate the structure preservation, the identity similarity (ID) [5, 12] to evaluate the identity preservation. More details can be seen in supplementary materials. + +![](images/29fb6b7f34be5303901b7120fd27c50add553887e1983dc4e092eba95845498f.jpg) + +![](images/08418b9a6f0b65e8dec972b97a56e626c27bc7f5e4dc21961dfc895d8a67c363.jpg) + +![](images/d74e7911721bc05f5b888638160c73bd8faa6ecd3bda79ab6cc2324278edb17d.jpg) + +![](images/2dc87c38b88cb36faeb6cd7945f5e3b66de331822eba924bd546c0e624226e44.jpg) +Figure 4. Image synthesis comparison results. For FFHQ [21], the source domain is "Human" and the target domains are "Pixar character", "Tolkien elf", and "Werewolf". For AFHQ-Dog [3], the source domain is "Photo" and the target domains are "Cartoon", "Pointillism", and "Cubism". The yellow box areas show the mode collapse problem of NADA [8]. + +![](images/d6df7d3195dc9f738872649e3d2812238e7b48f155c54a226759d7ccd9a05f95.jpg) + +![](images/ed555e54f9be07325dd42f45c54854a4745e971370d6a0517d61066e98e8ce7a.jpg) + +Table 1. Quantitative evaluation results. US denotes user study. The best results are bold. + +
DatasetSource→TargetIS [38] (↑)SCS [49] (↑)ID [5,12] (↑)SIFID [39] (↓)US (↑)
NADAIPLNADAIPLNADAIPLNADAIPL
R1R2R3R1R2R3
FFHQ [8]Photo→Disney2.7213.0890.4070.4480.7820.8012.7763.1363.6702.5172.9303.49782.6%
Photo→Anime painting2.4503.0510.3240.5180.6660.7762.9561.8111.2422.8451.5951.02179.3%
Photo→Wall painting2.1832.6760.4390.4870.5940.6371.9441.2201.3311.9301.1831.27480.9%
Photo→Ukiyo-e2.2052.9740.4200.5060.7750.6321.9541.9901.3261.1651.2550.87885.9%
Human→Pixar character2.7032.7850.3790.4610.7570.8530.7930.9320.8650.6380.8211.09286.7%
Human→Tolkien elf2.4792.7780.4160.4910.7110.7720.6321.4951.4520.6900.6370.70176.8%
Human→Werewolf2.6192.8090.3990.4170.6420.7471.9691.8461.9671.7341.6881.91172.7%
AFHQ [3]Photo→Cartoon6.5058.6580.4070.5630.9250.9412.7082.6723.8702.5172.4773.27887.6%
Photo→Pointillism5.4196.9130.2240.5420.7750.8817.0815.2887.1424.8183.0894.07478.5%
Photo→Cubism4.1656.4500.3860.4630.9340.9432.7792.9383.1992.4312.9562.28474.3%
+ +# 4.2. Generative model adaptation + +Qualitative comparison. In addition to Fig.1, we conduct extensive experiments across a wide range of domains as shown in Fig.4. All results indicate that our proposed approach outperforms NADA consistently. The yellow box areas in the figures denote the main different features between NADA and our IPL. From the quality of the gener + +ated images, the results of NADA have more incorrect features and noise, such as green mussy noise on hairs (Tolkien elf), ruined noses (Werewolf) and unshaped necks (Pointillism), while the results of IPL are more clear and correct. From the mode collapse perspective of the generated images, NADA is prone to collapse to some similar facial features for different images, such as depressed emotions (Pixar character), folded ears (Cartoon) and blue noses (Cu + +"Photo" $\rightarrow$ "Wall painting" + +![](images/0268edaa23f642103bd1f705e64ee50c57d5519fc6cbf5a435497c24df1f5acd.jpg) + +![](images/70ae5f6ce32ee7b7a22ca816cecd35cef0f9b1fd65ad245e20525bb713fea231.jpg) + +![](images/269fc30244248f9c7014d3a679e18bd2ad6f633c4ebe0d4a4847ebd42467e94a.jpg) + +![](images/dadb6e2a11a8a7cd0a9aeca2c8fa601789afb8b890b353e11e32e2601826a1c5.jpg) + +![](images/4687422010a3419ad8aa9635ccde0f89c1a9bdfe3517bd6651caa42d74798c4b.jpg) + +![](images/fabe97722fa861eaa5d6afd08724ae22e7fb11443d4695cd3bb126ae86b5446d.jpg) + +![](images/e7af42432eb1da68e9e7d6b1894a86454d2c738c0ec1b4d399de583cf7332101.jpg) + +![](images/79042054a2ae262e18ffe0fc1d954021da116f3308a780ba49b49623567cc96f.jpg) + +![](images/cd443fa96b2fde1eed83d4bfb736c84c63d44686ba3fa96b3cd7f41101a7129f.jpg) + +"Human" $\rightarrow$ "Tolkien elf" + +![](images/25c38f9cca72c89ddbb5ffad71a66b9291ff39396201b148a82ba074a5b641ee.jpg) +Real Images +Figure 5. Real-world image translation comparison results. Baselines are NADA [8], Diff-CLIP [22] and Diff-CLIP+ (an improved version of Diff-CLIP). Recon1, Recon2 and Recon3 refer to inversion results via Restyle [1], DDIM and diffusion autoencoders, respectively. GAN-IPL and Diff-IPL denote integrating IPL with NADA and Diff-CLIP+, respectively. Real images are from CelebA-HQ dataset [29] and translated into two styles of images, "Wall painting" and "Tolkien elf". The yellow boxes show the key observation areas. + +![](images/3554ec76e38e70f52b8788d4d92390f422f2f746cc296883976998f13ded151b.jpg) +Recon1 + +![](images/1a3fcd2901d98d2b5a9334bce46342c1e92c6ca58ebd207841b67e6cb52b34b3.jpg) +NADA + +![](images/f5b419a0a98122ff4b831e60890433270dd62a07fed5fc62bae54df243de7bbb.jpg) +GAN-IPL + +![](images/fd02d4ef7fcca9b1bff27811a8aec50c12890e34cd577549341714e42c83056a.jpg) +Recon2 + +![](images/520fd4a0ab9340aa229fd044e9cfd1ee7b585aab42909b89c04c0361f58f73bc.jpg) +Diff-CLIP + +![](images/37386bea78b0c8edf5a9d57d86167dae92e5f7e3622d12f72063ae790a8bbaf6.jpg) +Recon3 + +![](images/716b93708d5f65e373def0309008655580349ad226402db625a51cac6be3c994.jpg) +Diff-CLIP+ + +![](images/78f9acc720855cb593279202034e32f89e677c92a2db8a81fdba192bdde14f2a.jpg) +Diff-IPL + +bism), while IPL presents consistently higher diversity and solve the mode collapse issue well. Our advantages mainly come from the fact that the latent mapper preserves sufficient image-specific and target-domain friendly features from the source-domain images. The produced prompt vectors provide more precise and diversified adaptation directions for the target-domain generator adaptation. + +Quantitative comparison. To quantify the performance improvement of IPL compared to NADA [8], IS, SCS, ID and SIFID are evaluated. As reported in Tab.1, for IS, IPL outperforms NADA on all 10 settings, indicating our method achieves better image quality and diversity. For SCS and ID, IPL outperforms NADA on most of the 10 settings except "Human $\rightarrow$ Ukiyo-e". It is mainly because that "Ukiyo-e" naturally favors humans with narrow eyes and pale skin, which encourages identity changes during training. For SIFID, we collect 3 reference images $(\mathbb{R}_1,\mathbb{R}_2,$ and $\mathbb{R}_3)$ on the internet for each target domain. Tab.1 shows that IPL outperforms NADA in most cases, indicating our superiority in generating precise target-domain styles. + +User studies. For each target domain, 32 images generated by NADA and our method are provided to human observers, together with their corresponding source images and textual labels of target domains. Human observers are required to choose better synthesized images which are semantically more consistent with the target domain labels and preserve the useful source-domain information better. We collect 1210 responses from 121 people using a survey platform. As reported in the last column of Tab.1, $80.5\%$ of + +users prefer our approach to NADA on average. + +# 4.3. Real-world image translation + +This task first inverts a real-world image to the latent code by a pre-trained inversion model and then feeds it to the trained target-domain generator to get the translated target-domain image. For GAN-based generators, we compare our method (GAN-IPL) with NADA by connecting the inversion model Restyle [1]. For diffusion model generators, we compare our method (Diff-IPL) with Diff-CLIP [22] and Diff-CLIP+ which is an improved version of Diff-CLIP [22] by replacing the original DDIM [42] with a diffusion autoencoder [35]. For these diffusion models, a deterministic inversion process is naturally provided. + +As shown in Fig.5, comparing the results of NADA and GAN-IPL, IPL's superiority of alleviating mode collapse over NADA can still be observed. Comparing the results of Recon1, Recon2 and Recon3, diffusion models (Recon2 and Recon3) consistently perform better identity preservation than Restyle (Recon1) for real image inversion, especially for some uncommon stuffs in a human face photo, e.g., the hats, hands and tattoos in Fig.5. However, this property is not well inherited in the target domain generators with a fixed adaptation direction (see the results of Diff-CLIP and Diff-CLIP+). Our proposed IPL could help preserve the details in source images better and present the target-domain styles correctly (see the results of Diff-IPL). Quantitative evaluation results of Diff-CLIP, Diff-CLIP+ and Diff-IPL can be seen in supplementary materials. + +![](images/32128f89966999a701937ed1a444c461eece8cf86d9b70eedbb719c0633a1b18.jpg) +Figure 6. Ablation results of prompt designing schemes. + +# 4.4. Ablation studies + +Prompt designing schemes. We investigate four different prompt designing schemes: 1) manually fixed prompts (NADA), 2) learned fixed prompts, 3) random prompts and 4) adaptive prompts (Ours). Manually fixed prompts mean simply utilizing the manually designed prompts as NADA [8]. Learned fixed prompts denote unified prompt vectors produced by common prompt learning strategy [58] and shared for all images. Random prompts refer to prompt vectors produced by a randomly initialized latent mapper. Adaptive prompts denote the learned image-specific prompt vectors produced by our IPL method. + +As illustrated in Fig.6, synthesized images with manually fixed prompts and learned fixed prompts show some similar mode collapse issues, e.g., blue eyebrows (Ukiyo-e) and depressed emotions (Pixar character). They both produce a fixed adaptation direction, which leads to identical supervision signals for all image pairs. Synthesized images with random prompts present more photo-realistic results but lack the desired target-domain style. A possible reason is that the random prompts contain some features conflicting with the target domain and impede the learning of the target domain style. Our adaptive prompts perform best since the prompts contain more image-specific and target-domain friendly features from the source-domain images. + +Loss term ratios. We compare different values of the ratio parameter $\lambda$ in Eq.(6), which is used to adjust the intensity of the domain regularization loss. Visual results are shown in Fig.7. In specific, when we set $\lambda$ to a small value ( $\lambda = 0$ as an extreme case), there is almost no constraint from the target domain. The learned prompts would excessively preserve the source-domain features. Thus the synthesized images are similar to their corresponding source + +![](images/047ec3af94a9ec2da033b34d28cf97a94320a17fb212f632782bbafa48bb1f72.jpg) +Figure 7. Ablation results of loss term ratios. + +images. In contrast, if $\lambda$ is set to a large value ( $\lambda = 20$ as an example), a strong target-domain constraint will limit the diversity of the learned prompts. As a result, the synthesized images would slightly show some similar undesired patterns as images generated via fixed prompts. Therefore, in practical applications, $\lambda$ should be a trade-off value (i.e., between 1 and 10). + +# 5. Conclusion + +In this paper, we have proposed a novel zero-shot generative model adaptation approach called Image-specific Prompt Learning (IPL). In specific, we build a projection from latent codes to image-specific sets of prompt vectors via a latent mapper. With a contrastive learning scheme and a domain regularization constraint, the learned prompt vectors represent image-specific but target-domain-friendly features, producing more precise and diversified adaptation directions for target domain generator training. Compared with the state-of-the-art approaches, IPL consistently improves the quality of synthesized images and alleviates the mode collapse issue. Furthermore, IPL is independent of the type of generator and works well with both GANs and diffusion models, which exhibits good universality and adaptability. In the future, we will try to apply the proposed image-specific prompt learning strategy in other downstream tasks, such as unsupervised image captioning. + +# Acknowledgements + +This work is supported in part by the National Key R&D Program of China (2019YFC1408703), the National Natural Science Foundation of China (62022048, 62276150), Guoqiang Institute of Tsinghua University and Beijing Academy of Artificial Intelligence. + +# References + +[1] Yuval Alaluf, Or Patashnik, and Daniel Cohen-Or. ReStyle: A residual-based StyleGAN encoder via iterative refinement. In ICCV, 2021. 5, 7 +[2] Sergey Bartunov and Dmitry Vetrov. Few-shot generative modelling with generative matching networks. In AISTATS, 2018. 3 +[3] Yunjey Choi, Youngjung Uh, Jaejun Yoo, and Jung-Woo Ha. StarGAN v2: Diverse image synthesis for multiple domains. In CVPR, 2020. 5, 6 +[4] Louis Clouatre and Marc Demers. FIGR: Few-shot image generation with reptile. arXiv preprint arXiv:1901.02199, 2019.3 +[5] Jiankang Deng, Jia Guo, Niannan Xue, and Stefanos Zafeiriou. ArcFace: Additive angular margin loss for deep face recognition. In CVPR, 2019. 5, 6 +[6] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. In ICLR, 2020. 5 +[7] Yu Du, Fangyun Wei, Zihe Zhang, Miaojing Shi, Yue Gao, and Guoqi Li. Learning to prompt for open-vocabulary object detection with vision-language model. In CVPR, 2022. 3 +[8] Rinon Gal, Or Patashnik, Haggai Maron, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. StyleGAN-NADA: CLIP-guided domain adaptation of image generators. In SIGGRAPH, 2022. 2, 3, 4, 5, 6, 7, 8 +[9] Tianyu Gao, Adam Fisch, and Danqi Chen. Making pre-trained language models better few-shot learners. In ACL/IJCNLP, 2021. 3 +[10] Chunjiang Ge, Rui Huang, Mixue Xie, Zihang Lai, Shiji Song, Shuang Li, and Gao Huang. Domain adaptation via prompt learning. arXiv preprint arXiv:2202.06687, 2022. 3 +[11] Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. In NeurIPS, 2014. 1 +[12] Ju He, Jie-Neng Chen, Shuai Liu, Adam Kortylewski, Cheng Yang, Yutong Bai, and Changhu Wang. TransFG: A transformer architecture for fine-grained recognition. In AAAI, 2022. 5, 6 +[13] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. In NeurIPS, 2020. 2 +[14] Phillip Isola, Jun-Yan Zhu, Tinghui Zhou, and Alexei A Efros. Image-to-image translation with conditional adversarial networks. In CVPR, 2017. 2 +[15] Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. In ICML, 2021. 3 +[16] Zhengbao Jiang, Frank F Xu, Jun Araki, and Graham Neubig. How can we know what language models know? TACL, 2020. 3 + +[17] Chen Ju, Tengda Han, Kunhao Zheng, Ya Zhang, and Weidi Xie. Prompting visual-language models for efficient video understanding. In ECCV, 2021. 3 +[18] Tero Karras, Miika Aittala, Janne Hellsten, Samuli Laine, Jaakko Lehtinen, and Timo Aila. Training generative adversarial networks with limited data. In NeurIPS, 2020. 2, 3 +[19] Tero Karras, Miika Aittala, Samuli Laine, Erik Härkönen, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Alias-free generative adversarial networks. In NeurIPS, 2021. 3 +[20] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In CVPR, 2019. 2, 3 +[21] Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and improving the image quality of StyleGAN. In CVPR, 2020. 1, 2, 3, 5, 6 +[22] Gwanghyun Kim, Taesung Kwon, and Jong Chul Ye. DiffusionCLIP: Text-guided diffusion models for robust image manipulation. In CVPR, 2022. 2, 3, 5, 7 +[23] Brian Lester, Rami Al-Rfou, and Noah Constant. The power of scale for parameter-efficient prompt tuning. In EMNLP, 2021. 3 +[24] Liunian Harold Li, Pengchuan Zhang, Haotian Zhang, Jianwei Yang, Chunyuan Li, Yiwu Zhong, Lijuan Wang, Lu Yuan, Lei Zhang, Jenq-Neng Hwang, Kai-Wei Chang, and Jianfeng Gao. Grounded language-image pre-training. In CVPR, 2022. 3 +[25] Xiang Lisa Li and Percy Liang. Prefix-Tuning: Optimizing continuous prompts for generation. In ACL/JCNLP, 2021. 3 +[26] Weixin Liang, Zixuan Liu, and Can Liu. DAWSON: A domain adaptive few shot generation framework. arXiv preprint arXiv:2001.00576, 2020.3 +[27] Xihui Liu, Dong Huk Park, Samaneh Azadi, Gong Zhang, Arman Chopikyan, Yuxiao Hu, Humphrey Shi, Anna Rohrbach, and Trevor Darrell. More control for free! image synthesis with semantic diffusion guidance. In WACV, 2023. 2 +[28] Xiao Liu, Yanan Zheng, Zhengxiao Du, Ming Ding, Yujie Qian, Zhilin Yang, and Jie Tang. GPT understands, too. arXiv preprint arXiv:2103.10385, 2021. 3 +[29] Ziwei Liu, Ping Luo, Xiaogang Wang, and Xiaou Tang. Deep learning face attributes in the wild. In ICCV, 2015. 7 +[30] Sangwoo Mo, Minsu Cho, and Jinwoo Shin. Freeze the discriminator: a simple baseline for fine-tuning GANs. In CVPR Workshops, 2020. 2, 3 +[31] Alexander Quinn Nichol and Prafulla Dhariwal. Improved denoising diffusion probabilistic models. In ICML, 2021. 2 +[32] Atsuhiro Noguchi and Tatsuya Harada. Image generation from small datasets via batch statistics adaptation. In ICCV, 2019. 3 +[33] Utkarsh Ojha, Yijun Li, Jingwan Lu, Alexei A Efros, Yong Jae Lee, Eli Shechtman, and Richard Zhang. Few-shot image generation via cross-domain correspondence. In CVPR, 2021. 2, 3 +[34] Fabio Petroni, Tim Rocktäschel, Patrick Lewis, Anton Bakhtin, Yuxiang Wu, Alexander H Miller, and Sebastian + +Riedel. Language models as knowledge bases? In EMNLP-IJCNLP, 2019. 3 +[35] Konpat Preechakul, Nattanat Chathee, Suttisak Wizadwongsa, and Supasorn Suwajanakorn. Diffusion Autoencoders: Toward a meaningful and decodable representation. In CVPR, 2022. 2, 5, 7 +[36] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In ICML, 2021. 3, 4, 5 +[37] Esther Robb, Wen-Sheng Chu, Abhishek Kumar, and Jia-Bin Huang. Few-shot adaptation of generative adversarial networks. arXiv preprint arXiv:2010.11943, 2020. 3 +[38] Tim Salimans, Ian Goodfellow, Wojciech Zaremba, Vicki Cheung, Alec Radford, and Xi Chen. Improved techniques for training GANs. In NeurIPS, 2016. 5, 6 +[39] Tamar Rott Shaham, Tali Dekel, and Tomer Michaeli. SinGAN: Learning a generative model from a single natural image. In ICCV, 2019. 5, 6 +[40] Taylor Shin, Yasaman Razeghi, Robert L Logan IV, Eric Wallace, and Sameer Singh. AutoPrompt: Eliciting knowledge from language models with automatically generated prompts. In EMNLP, 2020. 3 +[41] Jascha Sohl-Dickstein, Eric Weiss, Niru Maheswaranathan, and Surya Ganguli. Deep unsupervised learning using nonequilibrium thermodynamics. In ICML, 2015. 2 +[42] Jiaming Song, Chenlin Meng, and Stefano Ermon. Denoising diffusion implicit models. In ICLR, 2020. 2, 3, 5, 7 +[43] Yang Song and Stefano Ermon. Generative modeling by estimating gradients of the data distribution. NeurIPS, 2019. 2 +[44] Omer Tov, Yuval Alaluf, Yotam Nitzan, Or Patashnik, and Daniel Cohen-Or. Designing an encoder for StyleGAN image manipulation. TOG, 2021. 5 +[45] Ngoc-Trung Tran, Viet-Hung Tran, Ngoc-Bao Nguyen, Trung-Kien Nguyen, and Ngai-Man Cheung. On data augmentation for GAN training. TIP, 2021. 3 +[46] Steven Walton, Ali Hassani, Xingqian Xu, Zhangyang Wang, and Humphrey Shi. StyleNAT: Giving each head a new perspective. arXiv preprint arXiv:2211.05770, 2022. 2 +[47] Yaxing Wang, Abel Gonzalez-Garcia, David Berga, Luis Herranz, Fahad Shahbaz Khan, and Joost van de Weijer. MineGAN: effective knowledge transfer from GANs to target domains with few images. In CVPR, 2020. 2, 3 +[48] Yaxing Wang, Chenshen Wu, Luis Herranz, Joost van de Weijer, Abel Gonzalez-Garcia, and Bogdan Raducanu. Transferring GANs: generating images from limited data. In ECCV, 2018. 2, 3 +[49] Jiayu Xiao, Liang Li, Chaofei Wang, Zheng-Jun Zha, and Qingming Huang. Few shot generative model adaption via relaxed spatial structural alignment. In CVPR, 2022. 2, 3, 5, 6 +[50] Xingqian Xu, Shant Navasardyan, Vahram Tadevosyan, Andranik Sargsyan, Yadong Mu, and Humphrey Shi. Image completion with heterogeneously filtered spectral hints. In WACV, 2023. 2 + +[51] Xingqian Xu, Zhangyang Wang, Eric Zhang, Kai Wang, and Humphrey Shi. Versatile Diffusion: Text, images and variations all in one diffusion model. arXiv preprint arXiv:2211.08332, 2022. 2 +[52] Han Zhang, Zizhao Zhang, Augustus Odena, and Honglak Lee. Consistency regularization for generative adversarial networks. In ICLR, 2019. 3 +[53] Shengyu Zhao, Zhijian Liu, Ji Lin, Jun-Yan Zhu, and Song Han. Differentiable augmentation for data-efficient GAN training. In NeurIPS, 2020. 2, 3 +[54] Zhengli Zhao, Sameer Singh, Honglak Lee, Zizhao Zhang, Augustus Odena, and Han Zhang. Improved consistency regularization for GANs. In AAAI, 2021. 3 +[55] Zhengli Zhao, Zizhao Zhang, Ting Chen, Sameer Singh, and Han Zhang. Image augmentations for GAN training. arXiv preprint arXiv:2006.02595, 2020. 3 +[56] Zexuan Zhong, Dan Friedman, and Danqi Chen. Factual probing is [MASK]: Learning vs. learning to recall. In NAACL-HLT, 2021. 3 +[57] Kaiyang Zhou, Jingkang Yang, Chen Change Loy, and Zwei Liu. Conditional prompt learning for vision-language models. In CVPR, 2022. 3 +[58] Kaiyang Zhou, Jingkang Yang, Chen Change Loy, and Ziwei Liu. Learning to prompt for vision-language models. IJCV, 2022. 3, 4, 8 \ No newline at end of file diff --git a/2023/Zero-Shot Generative Model Adaptation via Image-Specific Prompt Learning/images.zip b/2023/Zero-Shot Generative Model Adaptation via Image-Specific Prompt Learning/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..0fe893015a8f760826db66668eecc10eda62c3fb --- /dev/null +++ b/2023/Zero-Shot Generative Model Adaptation via Image-Specific Prompt Learning/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3aa0f45dba721a9c2bfb1fba0d7c14b57caa06be5d9972e12209813566902ae4 +size 1090498 diff --git a/2023/Zero-Shot Generative Model Adaptation via Image-Specific Prompt Learning/layout.json b/2023/Zero-Shot Generative Model Adaptation via Image-Specific Prompt Learning/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..38ec34052aaeec7884d1a21e28e5c183bc77f8bc --- /dev/null +++ b/2023/Zero-Shot Generative Model Adaptation via Image-Specific Prompt Learning/layout.json @@ -0,0 +1,9457 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 60, + 103, + 534, + 123 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 103, + 534, + 123 + ], + "spans": [ + { + "bbox": [ + 60, + 103, + 534, + 123 + ], + "type": "text", + "content": "Zero-shot Generative Model Adaptation via Image-specific Prompt Learning" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 61, + 141, + 523, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 141, + 523, + 171 + ], + "spans": [ + { + "bbox": [ + 61, + 141, + 523, + 171 + ], + "type": "text", + "content": "Jiayi Guo" + }, + { + "bbox": [ + 61, + 141, + 523, + 171 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 61, + 141, + 523, + 171 + ], + "type": "text", + "content": " Chaofei Wang" + }, + { + "bbox": [ + 61, + 141, + 523, + 171 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 61, + 141, + 523, + 171 + ], + "type": "text", + "content": " You Wu" + }, + { + "bbox": [ + 61, + 141, + 523, + 171 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 61, + 141, + 523, + 171 + ], + "type": "text", + "content": " Eric Zhang" + }, + { + "bbox": [ + 61, + 141, + 523, + 171 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 61, + 141, + 523, + 171 + ], + "type": "text", + "content": " Kai Wang" + }, + { + "bbox": [ + 61, + 141, + 523, + 171 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 61, + 141, + 523, + 171 + ], + "type": "text", + "content": " Xingqian Xu" + }, + { + "bbox": [ + 61, + 141, + 523, + 171 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 61, + 141, + 523, + 171 + ], + "type": "text", + "content": " Shiji Song" + }, + { + "bbox": [ + 61, + 141, + 523, + 171 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 61, + 141, + 523, + 171 + ], + "type": "text", + "content": " Humphrey Shi" + }, + { + "bbox": [ + 61, + 141, + 523, + 171 + ], + "type": "inline_equation", + "content": "^{3,4\\dagger}" + }, + { + "bbox": [ + 61, + 141, + 523, + 171 + ], + "type": "text", + "content": " Gao Huang" + }, + { + "bbox": [ + 61, + 141, + 523, + 171 + ], + "type": "inline_equation", + "content": "^{1\\dagger}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 77, + 172, + 514, + 200 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 172, + 514, + 200 + ], + "spans": [ + { + "bbox": [ + 77, + 172, + 514, + 200 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 77, + 172, + 514, + 200 + ], + "type": "text", + "content": "Tsinghua University, BNRist " + }, + { + "bbox": [ + 77, + 172, + 514, + 200 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 77, + 172, + 514, + 200 + ], + "type": "text", + "content": "UCAS " + }, + { + "bbox": [ + 77, + 172, + 514, + 200 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 77, + 172, + 514, + 200 + ], + "type": "text", + "content": "SHI Labs @ Oregon & UIUC " + }, + { + "bbox": [ + 77, + 172, + 514, + 200 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 77, + 172, + 514, + 200 + ], + "type": "text", + "content": "Picsart AI Research (PAIR) https://github.com/Picsart-AI-Research/IPL-Zero-Shot-Generative-Model-Adaptation" + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 55, + 209, + 542, + 450 + ], + "blocks": [ + { + "bbox": [ + 55, + 209, + 542, + 450 + ], + "lines": [ + { + "bbox": [ + 55, + 209, + 542, + 450 + ], + "spans": [ + { + "bbox": [ + 55, + 209, + 542, + 450 + ], + "type": "image", + "image_path": "d738b2a13791d5c547a3216529ad819d26214393a5987c93bbfd8295911f861c.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 455, + 546, + 500 + ], + "lines": [ + { + "bbox": [ + 46, + 455, + 546, + 500 + ], + "spans": [ + { + "bbox": [ + 46, + 455, + 546, + 500 + ], + "type": "text", + "content": "Figure 1. The mode collapse issue. For NADA [21] and our method, the same generator pre-trained on the source domain of \"Photo\" is adapted to the unseen target domains of \"Disney\", \"Anime painting\", \"Wall painting\" and \"Ukiyo-e\" only with the domain labels. The images above the dotted line are some examples from the internet. The generated images of NADA exhibit some similar unseen patterns (yellow box areas) which are undesired in terms of quality and diversity. This issue is largely addressed by our method." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 143, + 515, + 192, + 528 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 515, + 192, + 528 + ], + "spans": [ + { + "bbox": [ + 143, + 515, + 192, + 528 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 45, + 541, + 288, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 541, + 288, + 685 + ], + "spans": [ + { + "bbox": [ + 45, + 541, + 288, + 685 + ], + "type": "text", + "content": "Recently, CLIP-guided image synthesis has shown appealing performance on adapting a pre-trained source-domain generator to an unseen target domain. It does not require any target-domain samples but only the textual domain labels. The training is highly efficient, e.g., a few minutes. However, existing methods still have some limitations in the quality of generated images and may suffer from the mode collapse issue. A key reason is that a fixed adaptation direction is applied for all cross-domain image pairs, which leads to identical supervision signals. To address this issue, we propose an Image-specific Prompt Learning (IPL) method, which learns specific prompt vec" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 517, + 545, + 648 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 517, + 545, + 648 + ], + "spans": [ + { + "bbox": [ + 306, + 517, + 545, + 648 + ], + "type": "text", + "content": "tors for each source-domain image. This produces a more precise adaptation direction for every cross-domain image pair, endowing the target-domain generator with greatly enhanced flexibility. Qualitative and quantitative evaluations on various domains demonstrate that IPL effectively improves the quality and diversity of synthesized images and alleviates the mode collapse. Moreover, IPL is independent of the structure of the generative model, such as generative adversarial networks or diffusion models. Code is available at https://github.com/Picsart-AI-Research/IPL-Zero-Shot-Generative-Model-Adaptation." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 670, + 386, + 682 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 670, + 386, + 682 + ], + "spans": [ + { + "bbox": [ + 306, + 670, + 386, + 682 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 690, + 545, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 690, + 545, + 715 + ], + "spans": [ + { + "bbox": [ + 306, + 690, + 545, + 715 + ], + "type": "text", + "content": "In recent years, image synthesis using generative adversarial networks (GANs) [11] has been rapidly developed." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 694, + 125, + 704 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 694, + 125, + 704 + ], + "spans": [ + { + "bbox": [ + 58, + 694, + 125, + 704 + ], + "type": "text", + "content": "*Equal contribution." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 59, + 704, + 138, + 714 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 704, + 138, + 714 + ], + "spans": [ + { + "bbox": [ + 59, + 704, + 138, + 714 + ], + "type": "text", + "content": "† Corresponding authors." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "11494" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 193 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 193 + ], + "type": "text", + "content": "The state-of-the-art methods can generate images that are hard to be distinguished from real data [14, 20, 21, 46, 50]. However, the GAN-based methods heavily rely on vast quantities of training examples, and adopt a cumbersome adversarial training scheme which generally costs many hours of training time. Unfortunately, in many real-world scenarios, data acquisition is difficult or expensive. For example, in the artistic domains, it is impossible to have artists make thousands of creations. The high training cost is also unacceptable on some embedded devices, e.g., cellphones." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 194, + 289, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 194, + 289, + 469 + ], + "spans": [ + { + "bbox": [ + 46, + 194, + 289, + 469 + ], + "type": "text", + "content": "To address these issues, researchers begin to focus on the generative model adaptation. The goal of this task is to adapt a pre-trained source-domain generator to a target domain with limited data. Many few-shot GAN-based methods are proposed, such as TGAN [48], FreezeD [30], MinGAN [47], ADA [18], DiffAug [53], IDC [33] and RSSA [49], etc. However, these methods still require some training images of the target domain and follow the adversarial training scheme. As a pioneer work, StyleGAN-NADA [8] (NADA for short) proposes a zero-shot adaptation method, which only requires textual domain labels and discards the cumbersome adversarial training scheme by introducing a pre-trained CLIP model. Although efficient, it still has obvious deficiencies, i.e., the limited quality and mode collapse of generated images. As shown in Fig.1, we adapt a pretrained generator of \"Photo\" domain to \"Disney\", \"Anime painting\", \"Wall painting\" and \"Ukiyo-e\" domains. For the results of NADA [8], we notice that the generated images of the same target domain always show some homogeneous patterns which degrade the image quality and diversity, such as deep nasolabial folds in \"Disney\", squinting eyes in \"Anime painting\", red cheeks in \"Wall painting\" and blue eyebrows in \"Ukiyo-e\" (yellow box areas)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 472, + 287, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 472, + 287, + 628 + ], + "spans": [ + { + "bbox": [ + 46, + 472, + 287, + 628 + ], + "type": "text", + "content": "By exploring the factors behind this phenomenon, we find that the key factor is the fixed adaptation direction produced by manually designed prompts. Sharing the direction for all cross-domain image pairs leads to identical supervision signals for the model adaptation. Consider the example, adapting a generator of \"Human\" domain to \"Tolkien elf\" domain as shown in Fig.2. The previous works [8, 22] adopt manually designed prompts (e.g., \"A photo of a\") plus the domain label to produce a fixed adaptation direction, which is shared by all cross-domain image pairs (Fig.2 (a)) in the adaptation process. We argue that the constraint is too restrictive and suppresses the image-specific features, leading to homogeneous generated patterns." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 630, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 630, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 630, + 288, + 715 + ], + "type": "text", + "content": "In this paper, we propose an Image-specific Prompt Learning (IPL) method to address the above issue. The motivation is setting more precise and diversified adaptation directions by customizing more image-specific prompts, for instance \"Asian girl\", \"Curly hair lady\" and \"Elder glass man\" (Fig.2 (b)). These adaptation directions endow the target-domain generator with high flexibility to synthesize" + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 307, + 72, + 545, + 259 + ], + "blocks": [ + { + "bbox": [ + 307, + 72, + 545, + 259 + ], + "lines": [ + { + "bbox": [ + 307, + 72, + 545, + 259 + ], + "spans": [ + { + "bbox": [ + 307, + 72, + 545, + 259 + ], + "type": "image", + "image_path": "7d3df592624051881c753925364c9354fc57843906aafa0d6691c58435607553.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 270, + 547, + 324 + ], + "lines": [ + { + "bbox": [ + 304, + 270, + 547, + 324 + ], + "spans": [ + { + "bbox": [ + 304, + 270, + 547, + 324 + ], + "type": "text", + "content": "Figure 2. An illustration of our motivation. The previous methods adopt manual prompts to compute a fixed adaptation direction for all cross-domain image pairs, while our method learns image-specific prompts for producing more precise and diversified adaptation directions." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 335, + 545, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 335, + 545, + 562 + ], + "spans": [ + { + "bbox": [ + 304, + 335, + 545, + 562 + ], + "type": "text", + "content": "more diversified images. The proposed IPL is a two-stage method. In Stage 1, a latent mapper is trained to produce an image-specific set of prompt vectors conditioned on each source-domain image by a contrastive training scheme. The learned prompt vectors contain more specific and diversified features of the source-domain images than the fixed prompt vectors. We further propose a domain regularization loss to ensure that the learned prompt vectors are compatible with the target domain. In Stage 2, we compute more precise and diversified adaptation directions for each cross-domain image pair, and train the target-domain generator with an adaptive directional CLIP loss, which can be viewed as an improved version of the Directional CLIP Loss [8]. As shown in Fig.1, our method alleviates the mode collapse issue well. Extensive experiments across a wide range of domains demonstrate that the proposed IPL effectively improves the quality of synthesized images and overcomes the mode collapse issue. User studies and ablation studies are also conducted to validate the effectiveness of our method." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 562, + 545, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 562, + 545, + 647 + ], + "spans": [ + { + "bbox": [ + 304, + 562, + 545, + 647 + ], + "type": "text", + "content": "It is worth noting that our proposed IPL method is independent of the structure of the generative model, and can be applied to the recent diffusion models [13,27,31,35,41-43, 51]. Thus we also combine IPL with diffusion models and get a more robust and stronger generative capacity, especially on complex images, which shows the high effectiveness and adaptability of our approach." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 657, + 392, + 670 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 657, + 392, + 670 + ], + "spans": [ + { + "bbox": [ + 306, + 657, + 392, + 670 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "text", + "content": "Generative model adaptation. Generative model adaptation is the task of adapting a generative model trained on a large-scale source domain to a data-limited target domain." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "11495" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 228 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 228 + ], + "type": "text", + "content": "According to the size of the training dataset of the target domain, it can be directly divided into two main categories: few-shot generative model adaptation and zero-shot generative model adaptation. For the few-shot generative model adaptation task, the most natural approach is to fine-tune a pre-trained GAN [2, 4, 26, 48]. However, fine-tuning the entire network weights used to result in overfitting. Subsequently, many methods were proposed to alleviate the overfitting issue. They either imposed strong regularization [52, 54], or modified the network parameters with a slight perturbation [30, 32, 37, 47], or preserved some important information by cross-domain alignment [33, 49], or performed data augmentation [45, 53, 55]." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 232, + 289, + 412 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 232, + 289, + 412 + ], + "spans": [ + { + "bbox": [ + 46, + 232, + 289, + 412 + ], + "type": "text", + "content": "For the zero-shot generative model adaptation task, NADA [8] first proposed to introduce a pre-trained CLIP model for supplying necessary prior knowledge. It only required textual domain labels, and encoded the domain gap as a text-guided adaptation direction in CLIP space. To enhance the identity-preserving capability of real-world image translation, Kim et al. further proposed DiffusionCLIP [22] which utilized diffusion models [42] instead of StyleGANs [18-21] in NADA. Nevertheless, these existing works all adopt a fixed adaptation direction which only contains the basic domain knowledge but no image-specific features. In this paper, we argue that this shared fixed adaptation direction may lead to the mode collapse issue. To produce more accurate and adaptive adaptation directions, we propose to learn diverse and specific prompt vectors for each image." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 415, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 415, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 415, + 289, + 715 + ], + "type": "text", + "content": "Prompt learning. Prompt engineering is first introduced as a knowledge probing approach [34]. Given cloze-style prompts, it induces pre-trained language models to generate the corresponding answers. However, manually designed prompts may be sub-optimal and provide imprecise guidance. To tackle this issue, prompt learning [9, 16, 23, 25, 28, 40, 56] has been widely studied in natural language processing to automatically explore the optimal set of prompts. With the unprecedented development of vision-language models [15, 36] in recent years, researchers begin to apply prompt learning to computer vision tasks [7, 10, 17, 24, 57, 58]. In specific, Zhou et al. [57, 58] first adopted context optimization in image classification tasks by modeling context words with continuous vectors in the word embedding space. Subsequently, many downstream tasks in computer vision were also explored, e.g., object detection [7], visual grounding [24], video understanding [17] and transfer learning [10]. As far as we know, this is the first work to propose an adaptive prompt learning scheme for generative model adaptation. Different from previous prompt learning schemes, our method introduces a latent mapper to learn a specific set of prompt vectors for each image. When training the target-domain generator, the learned image-specific prompt vectors could produce more precise adaptation directions to provide better supervision signals." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 305, + 71, + 389, + 86 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 71, + 389, + 86 + ], + "spans": [ + { + "bbox": [ + 305, + 71, + 389, + 86 + ], + "type": "text", + "content": "3. Methodology" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 304, + 91, + 547, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 91, + 547, + 198 + ], + "spans": [ + { + "bbox": [ + 304, + 91, + 547, + 198 + ], + "type": "text", + "content": "The goal of zero-shot generative model adaptation is to adapt a pre-trained source-domain generator " + }, + { + "bbox": [ + 304, + 91, + 547, + 198 + ], + "type": "inline_equation", + "content": "G_{\\mathrm{s}}" + }, + { + "bbox": [ + 304, + 91, + 547, + 198 + ], + "type": "text", + "content": " to an unseen target domain, and get the target-domain generator " + }, + { + "bbox": [ + 304, + 91, + 547, + 198 + ], + "type": "inline_equation", + "content": "G_{\\mathrm{t}}" + }, + { + "bbox": [ + 304, + 91, + 547, + 198 + ], + "type": "text", + "content": ". The source domain with the domain label " + }, + { + "bbox": [ + 304, + 91, + 547, + 198 + ], + "type": "inline_equation", + "content": "\\mathrm{Y_s}" + }, + { + "bbox": [ + 304, + 91, + 547, + 198 + ], + "type": "text", + "content": ", e.g., \"Human\", can obtain plentiful high-quality images by " + }, + { + "bbox": [ + 304, + 91, + 547, + 198 + ], + "type": "inline_equation", + "content": "G_{\\mathrm{s}}" + }, + { + "bbox": [ + 304, + 91, + 547, + 198 + ], + "type": "text", + "content": ". The target domain is described only through the domain label " + }, + { + "bbox": [ + 304, + 91, + 547, + 198 + ], + "type": "inline_equation", + "content": "\\mathrm{Y_t}" + }, + { + "bbox": [ + 304, + 91, + 547, + 198 + ], + "type": "text", + "content": ", e.g., \"Tolkien elf\", with no images. Following [8, 22], a pre-trained CLIP model [36] including an image encoder " + }, + { + "bbox": [ + 304, + 91, + 547, + 198 + ], + "type": "inline_equation", + "content": "E_{\\mathrm{I}}" + }, + { + "bbox": [ + 304, + 91, + 547, + 198 + ], + "type": "text", + "content": " and a text encoder " + }, + { + "bbox": [ + 304, + 91, + 547, + 198 + ], + "type": "inline_equation", + "content": "E_{\\mathrm{T}}" + }, + { + "bbox": [ + 304, + 91, + 547, + 198 + ], + "type": "text", + "content": " is introduced." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 199, + 547, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 199, + 547, + 486 + ], + "spans": [ + { + "bbox": [ + 304, + 199, + 547, + 486 + ], + "type": "text", + "content": "We propose a two-stage method named Image-specific Prompt Learning (IPL). Its framework is shown in Fig.3. In Stage 1, a latent mapper " + }, + { + "bbox": [ + 304, + 199, + 547, + 486 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 304, + 199, + 547, + 486 + ], + "type": "text", + "content": " is trained to produce a set of image-specific prompt vectors " + }, + { + "bbox": [ + 304, + 199, + 547, + 486 + ], + "type": "inline_equation", + "content": "\\{[\\mathbf{V}]_1^i, [\\mathbf{V}]_2^i, \\dots, [\\mathbf{V}]_m^i\\}" + }, + { + "bbox": [ + 304, + 199, + 547, + 486 + ], + "type": "text", + "content": " for each latent code " + }, + { + "bbox": [ + 304, + 199, + 547, + 486 + ], + "type": "inline_equation", + "content": "w^i" + }, + { + "bbox": [ + 304, + 199, + 547, + 486 + ], + "type": "text", + "content": " of a source-domain image. Each prompt vector has the same dimension with word embeddings in CLIP space. The training loss consists of a contrastive learning loss " + }, + { + "bbox": [ + 304, + 199, + 547, + 486 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{contr}}" + }, + { + "bbox": [ + 304, + 199, + 547, + 486 + ], + "type": "text", + "content": " and a domain regularization loss " + }, + { + "bbox": [ + 304, + 199, + 547, + 486 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{domain}}" + }, + { + "bbox": [ + 304, + 199, + 547, + 486 + ], + "type": "text", + "content": ". The former aims to preserve the image-specific features of each source domain image in the learned prompt vectors. The latter constrains the image-specific features to be suitable to the target domain, which means the learned features should not conflict with the target domain. For example, the features of prompts like \"round ear\" should not be contained in the ideal prompt vectors if the target domain is \"Tolkien elf\". In Stage 2, the trained latent mapper " + }, + { + "bbox": [ + 304, + 199, + 547, + 486 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 304, + 199, + 547, + 486 + ], + "type": "text", + "content": " is plugged into the training process of the target-domain generator " + }, + { + "bbox": [ + 304, + 199, + 547, + 486 + ], + "type": "inline_equation", + "content": "G_{\\mathrm{t}}" + }, + { + "bbox": [ + 304, + 199, + 547, + 486 + ], + "type": "text", + "content": ", and produces more precise and diversified adaptation directions for cross-domain image pairs. This training stage follows [8] except that learned prompt vectors produced by the latent mapper " + }, + { + "bbox": [ + 304, + 199, + 547, + 486 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 304, + 199, + 547, + 486 + ], + "type": "text", + "content": " replace the fixed prompt vectors. The final textual supervision information includes shared learned prompt vectors and respective embeddings of the original domain labels." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 305, + 493, + 476, + 506 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 493, + 476, + 506 + ], + "spans": [ + { + "bbox": [ + 305, + 493, + 476, + 506 + ], + "type": "text", + "content": "3.1. Image-specific prompt learning" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 512, + 547, + 655 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 512, + 547, + 655 + ], + "spans": [ + { + "bbox": [ + 304, + 512, + 547, + 655 + ], + "type": "text", + "content": "General prompts. The previous methods [8, 22] compute a fixed adaptation direction produced by two embeddings of manually designed prompts, e.g., \"a photo of a human\" and \"a photo of a Tolkien elf\", then constrain the directions of all cross-domain pairs to be parallel with the adaptation direction. In contrast to manually designed prompts, prompt learning [58] aims to find the optimal set of prompt vectors for a domain by directly tuning the embeddings of prompts. Formally, we define a general prompt matrix " + }, + { + "bbox": [ + 304, + 512, + 547, + 655 + ], + "type": "inline_equation", + "content": "\\mathrm{M_d}" + }, + { + "bbox": [ + 304, + 512, + 547, + 655 + ], + "type": "text", + "content": " to represent a given domain d. " + }, + { + "bbox": [ + 304, + 512, + 547, + 655 + ], + "type": "inline_equation", + "content": "\\mathrm{M_d}" + }, + { + "bbox": [ + 304, + 512, + 547, + 655 + ], + "type": "text", + "content": " consists of the prompt vectors " + }, + { + "bbox": [ + 304, + 512, + 547, + 655 + ], + "type": "inline_equation", + "content": "[\\mathbf{V}]_1, [\\mathbf{V}]_2, \\dots, [\\mathbf{V}]_m" + }, + { + "bbox": [ + 304, + 512, + 547, + 655 + ], + "type": "text", + "content": " and the embedding of the domain label " + }, + { + "bbox": [ + 304, + 512, + 547, + 655 + ], + "type": "inline_equation", + "content": "[\\mathrm{Y_d}]" + }, + { + "bbox": [ + 304, + 512, + 547, + 655 + ], + "type": "text", + "content": " as below:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 365, + 659, + 545, + 673 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 365, + 659, + 545, + 673 + ], + "spans": [ + { + "bbox": [ + 365, + 659, + 545, + 673 + ], + "type": "interline_equation", + "content": "\\mathrm {M} _ {\\mathrm {d}} = [ \\mathbf {V} ] _ {1} [ \\mathbf {V} ] _ {2} \\dots [ \\mathbf {V} ] _ {m} [ \\mathrm {Y} _ {\\mathrm {d}} ], \\tag {1}", + "image_path": "bc52d27a69eed1c26f45271095b7a178f2c9450a0dd60ad6145f6cf7455a2925.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 677, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 547, + 715 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 677, + 547, + 715 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 304, + 677, + 547, + 715 + ], + "type": "text", + "content": " is the number of prompts. Suppose the dimension of each embedding is " + }, + { + "bbox": [ + 304, + 677, + 547, + 715 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 304, + 677, + 547, + 715 + ], + "type": "text", + "content": ". Then the dimension of " + }, + { + "bbox": [ + 304, + 677, + 547, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{M}_{\\mathrm{d}}" + }, + { + "bbox": [ + 304, + 677, + 547, + 715 + ], + "type": "text", + "content": " should be " + }, + { + "bbox": [ + 304, + 677, + 547, + 715 + ], + "type": "inline_equation", + "content": "(m + 1) \\times k" + }, + { + "bbox": [ + 304, + 677, + 547, + 715 + ], + "type": "text", + "content": ". In [8, 22], the prompt vectors" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "11496" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 78, + 83, + 504, + 232 + ], + "blocks": [ + { + "bbox": [ + 80, + 72, + 251, + 83 + ], + "lines": [ + { + "bbox": [ + 80, + 72, + 251, + 83 + ], + "spans": [ + { + "bbox": [ + 80, + 72, + 251, + 83 + ], + "type": "text", + "content": "Stage 1: Training latent mapper for prompt learning" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 78, + 83, + 504, + 232 + ], + "lines": [ + { + "bbox": [ + 78, + 83, + 504, + 232 + ], + "spans": [ + { + "bbox": [ + 78, + 83, + 504, + 232 + ], + "type": "image", + "image_path": "fff8e843236ad10f7f82005b31182998855786026da78fc0b4f6652c2ce03917.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 94, + 246, + 460, + 348 + ], + "blocks": [ + { + "bbox": [ + 83, + 236, + 237, + 246 + ], + "lines": [ + { + "bbox": [ + 83, + 236, + 237, + 246 + ], + "spans": [ + { + "bbox": [ + 83, + 236, + 237, + 246 + ], + "type": "text", + "content": "Stage 2: Training generator for image synthesis" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 94, + 246, + 460, + 348 + ], + "lines": [ + { + "bbox": [ + 94, + 246, + 460, + 348 + ], + "spans": [ + { + "bbox": [ + 94, + 246, + 460, + 348 + ], + "type": "image", + "image_path": "8d9c252ea1b985f5bf7a741110cdaaa4f4d7a769b1958bd4678937ff495d9bd4.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 357, + 545, + 413 + ], + "lines": [ + { + "bbox": [ + 46, + 357, + 545, + 413 + ], + "spans": [ + { + "bbox": [ + 46, + 357, + 545, + 413 + ], + "type": "text", + "content": "Figure 3. The framework of our method. In Stage 1, a latent mapper " + }, + { + "bbox": [ + 46, + 357, + 545, + 413 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 46, + 357, + 545, + 413 + ], + "type": "text", + "content": " is trained for prompt learning by a contrastive learning loss " + }, + { + "bbox": [ + 46, + 357, + 545, + 413 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{contr}}" + }, + { + "bbox": [ + 46, + 357, + 545, + 413 + ], + "type": "text", + "content": " and a domain regularization loss " + }, + { + "bbox": [ + 46, + 357, + 545, + 413 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{domain}}" + }, + { + "bbox": [ + 46, + 357, + 545, + 413 + ], + "type": "text", + "content": ". The image encoder " + }, + { + "bbox": [ + 46, + 357, + 545, + 413 + ], + "type": "inline_equation", + "content": "E_{\\mathrm{I}}" + }, + { + "bbox": [ + 46, + 357, + 545, + 413 + ], + "type": "text", + "content": " and the text encoder " + }, + { + "bbox": [ + 46, + 357, + 545, + 413 + ], + "type": "inline_equation", + "content": "E_{\\mathrm{T}}" + }, + { + "bbox": [ + 46, + 357, + 545, + 413 + ], + "type": "text", + "content": " are from the CLIP model [36]. In Stage 2, the target-domain generator " + }, + { + "bbox": [ + 46, + 357, + 545, + 413 + ], + "type": "inline_equation", + "content": "G_{\\mathrm{t}}" + }, + { + "bbox": [ + 46, + 357, + 545, + 413 + ], + "type": "text", + "content": " is trained for image synthesis by the improved Directional CLIP Loss " + }, + { + "bbox": [ + 46, + 357, + 545, + 413 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{adapt}}" + }, + { + "bbox": [ + 46, + 357, + 545, + 413 + ], + "type": "text", + "content": " in which the adaptive prompts produced by the latent mapper are applied. In two stages, the locked modules are fixed while the unlocked modules are trained. For simplicity, we replace " + }, + { + "bbox": [ + 46, + 357, + 545, + 413 + ], + "type": "inline_equation", + "content": "E_{\\mathrm{I}}(G_{\\mathrm{s}}(w^{i}))" + }, + { + "bbox": [ + 46, + 357, + 545, + 413 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 357, + 545, + 413 + ], + "type": "inline_equation", + "content": "E_{\\mathrm{T}}(\\mathrm{M}_{\\mathrm{s}}^{i})" + }, + { + "bbox": [ + 46, + 357, + 545, + 413 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 46, + 357, + 545, + 413 + ], + "type": "inline_equation", + "content": "\\Gamma^i" + }, + { + "bbox": [ + 46, + 357, + 545, + 413 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 357, + 545, + 413 + ], + "type": "inline_equation", + "content": "\\mathrm{T_s}^i" + }, + { + "bbox": [ + 46, + 357, + 545, + 413 + ], + "type": "text", + "content": ", respectively." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 427, + 287, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 427, + 287, + 487 + ], + "spans": [ + { + "bbox": [ + 46, + 427, + 287, + 487 + ], + "type": "inline_equation", + "content": "[\\mathbf{V}]_1, [\\mathbf{V}]_2, \\dots, [\\mathbf{V}]_m" + }, + { + "bbox": [ + 46, + 427, + 287, + 487 + ], + "type": "text", + "content": " are fixed embeddings of manually designed prompts. For prompt learning [58], the prompt vectors are learned by encoding each training image of the domain " + }, + { + "bbox": [ + 46, + 427, + 287, + 487 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 46, + 427, + 287, + 487 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 46, + 427, + 287, + 487 + ], + "type": "inline_equation", + "content": "E_{\\mathrm{I}}" + }, + { + "bbox": [ + 46, + 427, + 287, + 487 + ], + "type": "text", + "content": " and the prompt matrix " + }, + { + "bbox": [ + 46, + 427, + 287, + 487 + ], + "type": "inline_equation", + "content": "\\mathrm{M_d}" + }, + { + "bbox": [ + 46, + 427, + 287, + 487 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 46, + 427, + 287, + 487 + ], + "type": "inline_equation", + "content": "E_{\\mathrm{T}}" + }, + { + "bbox": [ + 46, + 427, + 287, + 487 + ], + "type": "text", + "content": ", and then maximizing the cosine similarity between them." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 492, + 287, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 492, + 287, + 649 + ], + "spans": [ + { + "bbox": [ + 46, + 492, + 287, + 649 + ], + "type": "text", + "content": "Inspired by prompt learning, in the zero-shot generative model adaptation task, a natural idea is to learn an optimal set of prompt vectors instead of the manually designed prompts in NADA [8]. Although the adaptation direction calculated by the learned prompt vectors seems to be more reasonable than that of the manually designed prompts, it is still fixed and shared for all cross-domain image pairs. These fixed learned prompt vectors can not solve the mode collapse issue (Experimental validations can be seen in Sec. 4.4). To obtain more flexible and diversified adaptation directions, we further propose to learn a set of image-specific prompt vectors for each image, which can be regarded as an improved version of prompt learning." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 654, + 287, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 654, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 654, + 287, + 715 + ], + "type": "text", + "content": "Image-specific prompts. Utilizing the source-domain generator " + }, + { + "bbox": [ + 46, + 654, + 287, + 715 + ], + "type": "inline_equation", + "content": "G_{\\mathrm{s}}" + }, + { + "bbox": [ + 46, + 654, + 287, + 715 + ], + "type": "text", + "content": ", we train a latent mapper " + }, + { + "bbox": [ + 46, + 654, + 287, + 715 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 46, + 654, + 287, + 715 + ], + "type": "text", + "content": " as shown in Fig.3 (Stage 1). Through the mapper, each image of the source domain can be matched to an optimal set of prompt vectors. Formally, given a latent code " + }, + { + "bbox": [ + 46, + 654, + 287, + 715 + ], + "type": "inline_equation", + "content": "w^{i}" + }, + { + "bbox": [ + 46, + 654, + 287, + 715 + ], + "type": "text", + "content": ", corresponding to" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 427, + 545, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 427, + 545, + 499 + ], + "spans": [ + { + "bbox": [ + 304, + 427, + 545, + 499 + ], + "type": "text", + "content": "the " + }, + { + "bbox": [ + 304, + 427, + 545, + 499 + ], + "type": "inline_equation", + "content": "i^{\\mathrm{th}}" + }, + { + "bbox": [ + 304, + 427, + 545, + 499 + ], + "type": "text", + "content": " image in the source domain, the image-specific set of prompt vectors " + }, + { + "bbox": [ + 304, + 427, + 545, + 499 + ], + "type": "inline_equation", + "content": "\\{[\\mathbf{V}]_1^i,[\\mathbf{V}]_2^i,\\dots ,[\\mathbf{V}]_m^i\\}" + }, + { + "bbox": [ + 304, + 427, + 545, + 499 + ], + "type": "text", + "content": " can be obtained by " + }, + { + "bbox": [ + 304, + 427, + 545, + 499 + ], + "type": "inline_equation", + "content": "F(w^{i},\\theta)" + }, + { + "bbox": [ + 304, + 427, + 545, + 499 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 427, + 545, + 499 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 304, + 427, + 545, + 499 + ], + "type": "text", + "content": " denotes the parameters of the latent mapper " + }, + { + "bbox": [ + 304, + 427, + 545, + 499 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 304, + 427, + 545, + 499 + ], + "type": "text", + "content": ". Following the definition of the prompt matrix in Eq.(1), we define an image-specific prompt matrix of the " + }, + { + "bbox": [ + 304, + 427, + 545, + 499 + ], + "type": "inline_equation", + "content": "i^{\\mathrm{th}}" + }, + { + "bbox": [ + 304, + 427, + 545, + 499 + ], + "type": "text", + "content": " source-domain image as:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 333, + 505, + 545, + 519 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 505, + 545, + 519 + ], + "spans": [ + { + "bbox": [ + 333, + 505, + 545, + 519 + ], + "type": "interline_equation", + "content": "\\mathrm {M} _ {\\mathrm {s}} ^ {i} = F \\left(w ^ {i}, \\theta\\right) \\left[ \\mathrm {Y} _ {\\mathrm {s}} \\right] = \\left[ \\mathrm {V} \\right] _ {1} ^ {i} \\left[ \\mathrm {V} \\right] _ {2} ^ {i} \\dots \\left[ \\mathrm {V} \\right] _ {m} ^ {i} \\left[ \\mathrm {Y} _ {\\mathrm {s}} \\right]. \\tag {2}", + "image_path": "a71c1c6ef81a0886b758c8c057320d5f12415234367baa9aa29986a55480ff64.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 524, + 545, + 547 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 524, + 545, + 547 + ], + "spans": [ + { + "bbox": [ + 304, + 524, + 545, + 547 + ], + "type": "text", + "content": "In this paper, " + }, + { + "bbox": [ + 304, + 524, + 545, + 547 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 304, + 524, + 545, + 547 + ], + "type": "text", + "content": " is a common four-layer fully-connected network. Next, we show how to train it." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 548, + 545, + 668 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 548, + 545, + 668 + ], + "spans": [ + { + "bbox": [ + 304, + 548, + 545, + 668 + ], + "type": "text", + "content": "Contrastive training scheme. Given a batch of latent codes " + }, + { + "bbox": [ + 304, + 548, + 545, + 668 + ], + "type": "inline_equation", + "content": "\\{w^1, w^2, \\dots, w^n\\}" + }, + { + "bbox": [ + 304, + 548, + 545, + 668 + ], + "type": "text", + "content": ", we can produce a batch of sets of prompt matrices " + }, + { + "bbox": [ + 304, + 548, + 545, + 668 + ], + "type": "inline_equation", + "content": "\\{\\mathrm{M_s^1}, \\mathrm{M_s^2}, \\dots, \\mathrm{M_s^n}\\}" + }, + { + "bbox": [ + 304, + 548, + 545, + 668 + ], + "type": "text", + "content": " by " + }, + { + "bbox": [ + 304, + 548, + 545, + 668 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 304, + 548, + 545, + 668 + ], + "type": "text", + "content": " and a batch of images " + }, + { + "bbox": [ + 304, + 548, + 545, + 668 + ], + "type": "inline_equation", + "content": "\\{G_{\\mathrm{s}}(w^{1}), G_{\\mathrm{s}}(w^{2}), \\dots, G_{\\mathrm{s}}(w^{n})\\}" + }, + { + "bbox": [ + 304, + 548, + 545, + 668 + ], + "type": "text", + "content": " by " + }, + { + "bbox": [ + 304, + 548, + 545, + 668 + ], + "type": "inline_equation", + "content": "G_{\\mathrm{s}}" + }, + { + "bbox": [ + 304, + 548, + 545, + 668 + ], + "type": "text", + "content": ". Then " + }, + { + "bbox": [ + 304, + 548, + 545, + 668 + ], + "type": "inline_equation", + "content": "n \\times n" + }, + { + "bbox": [ + 304, + 548, + 545, + 668 + ], + "type": "text", + "content": " pairs " + }, + { + "bbox": [ + 304, + 548, + 545, + 668 + ], + "type": "inline_equation", + "content": "< G_{\\mathrm{s}}(w^{i}), \\mathrm{M}_{\\mathrm{s}}^{j} >" + }, + { + "bbox": [ + 304, + 548, + 545, + 668 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 548, + 545, + 668 + ], + "type": "inline_equation", + "content": "i, j \\in \\{1, 2, \\dots, n\\}" + }, + { + "bbox": [ + 304, + 548, + 545, + 668 + ], + "type": "text", + "content": " have been obtained. Then, we take the pairs of " + }, + { + "bbox": [ + 304, + 548, + 545, + 668 + ], + "type": "inline_equation", + "content": "i = j" + }, + { + "bbox": [ + 304, + 548, + 545, + 668 + ], + "type": "text", + "content": " as positive samples, and the pairs of " + }, + { + "bbox": [ + 304, + 548, + 545, + 668 + ], + "type": "inline_equation", + "content": "i \\neq j" + }, + { + "bbox": [ + 304, + 548, + 545, + 668 + ], + "type": "text", + "content": " as negative samples for contrastive training. Specifically, we compute the similarity between embeddings of the " + }, + { + "bbox": [ + 304, + 548, + 545, + 668 + ], + "type": "inline_equation", + "content": "i^{\\text{th}}" + }, + { + "bbox": [ + 304, + 548, + 545, + 668 + ], + "type": "text", + "content": " image and the " + }, + { + "bbox": [ + 304, + 548, + 545, + 668 + ], + "type": "inline_equation", + "content": "j^{\\text{th}}" + }, + { + "bbox": [ + 304, + 548, + 545, + 668 + ], + "type": "text", + "content": " prompt matrix in CLIP space as:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 318, + 671, + 545, + 685 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 671, + 545, + 685 + ], + "spans": [ + { + "bbox": [ + 318, + 671, + 545, + 685 + ], + "type": "interline_equation", + "content": "\\operatorname {S i m} _ {i j} = \\operatorname {C o s} \\left(\\operatorname {N o r m} \\left(E _ {\\mathrm {I}} \\left(G _ {\\mathrm {s}} \\left(w ^ {i}\\right)\\right)\\right), \\operatorname {N o r m} \\left(E _ {\\mathrm {T}} \\left(\\mathrm {M} _ {\\mathrm {s}} ^ {j}\\right)\\right)\\right), \\tag {3}", + "image_path": "7f7ca7d5888a2f189edaedc9319f34266c4eae8a9d966293c71b1b17d29ec135.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 689, + 545, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 689, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 689, + 545, + 714 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 689, + 545, + 714 + ], + "type": "inline_equation", + "content": "\\mathrm{Norm}(\\cdot)" + }, + { + "bbox": [ + 304, + 689, + 545, + 714 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 689, + 545, + 714 + ], + "type": "inline_equation", + "content": "\\mathrm{Cos}(\\cdot)" + }, + { + "bbox": [ + 304, + 689, + 545, + 714 + ], + "type": "text", + "content": " represent " + }, + { + "bbox": [ + 304, + 689, + 545, + 714 + ], + "type": "inline_equation", + "content": "L_{2}" + }, + { + "bbox": [ + 304, + 689, + 545, + 714 + ], + "type": "text", + "content": " normalization and the cosine function, respectively. The similarities of pos" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "11497" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 107 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 107 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 107 + ], + "type": "text", + "content": "itive samples are maximized while the similarities of negative samples are minimized. The contrastive loss is expressed as:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 75, + 109, + 287, + 134 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 109, + 287, + 134 + ], + "spans": [ + { + "bbox": [ + 75, + 109, + 287, + 134 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {c o n t r}} = \\mathbb {E} _ {w \\in \\mathcal {W}} \\left(\\sum_ {i \\neq j} \\left(\\operatorname {S i m} _ {i j}\\right) - \\sum_ {i = j} \\left(\\operatorname {S i m} _ {i j}\\right)\\right). \\tag {4}", + "image_path": "b278c4ea9159673c6578c44d32b82b5c5f4d1b757d2cfd15c614c60883980497.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 136, + 289, + 363 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 136, + 289, + 363 + ], + "spans": [ + { + "bbox": [ + 46, + 136, + 289, + 363 + ], + "type": "text", + "content": "Domain regularization loss. For the target domain without any prior knowledge except the domain label " + }, + { + "bbox": [ + 46, + 136, + 289, + 363 + ], + "type": "inline_equation", + "content": "\\mathrm{Y_t}" + }, + { + "bbox": [ + 46, + 136, + 289, + 363 + ], + "type": "text", + "content": ", we can simply share the learned prompt vectors between the source and target domains following [8]. However, the shared prompt vectors may lead to the risk of generating unrealistic images for the target domain, because some learned prompt vectors may contain strongly relevant features to the source domain, leading to conflict with the target domain. For example, an image of \"Human\" domain is matched to prompt vectors of \"round ear\", but a corresponding image of \"Tolkien elf\" domain should not contain the features of \"round ear\". Sharing these prompt vectors is harmful to the target-domain image generation. Therefore, we further propose a domain regularization loss. Specifically, we constrain the angles between the embeddings of the image-specific prompt matrix " + }, + { + "bbox": [ + 46, + 136, + 289, + 363 + ], + "type": "inline_equation", + "content": "\\mathbf{M}_{\\mathrm{t}}^{i}" + }, + { + "bbox": [ + 46, + 136, + 289, + 363 + ], + "type": "text", + "content": " and the target-domain label " + }, + { + "bbox": [ + 46, + 136, + 289, + 363 + ], + "type": "inline_equation", + "content": "\\mathrm{Y_t}" + }, + { + "bbox": [ + 46, + 136, + 289, + 363 + ], + "type": "text", + "content": " in CLIP space to be small, to avoid the learned prompt vectors conflicting with the target domain. Formally, the domain regularization loss is described as:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 369, + 287, + 399 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 369, + 287, + 399 + ], + "spans": [ + { + "bbox": [ + 56, + 369, + 287, + 399 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {d o m a i n}} = - \\mathbb {E} _ {w ^ {i} \\in \\mathcal {W}} \\sum_ {i = 1} ^ {n} \\left(\\operatorname {C o s} \\left(E _ {\\mathrm {T}} \\left(\\mathrm {M} _ {\\mathrm {t}} ^ {i}\\right), E _ {\\mathrm {T}} \\left(\\mathrm {Y} _ {\\mathrm {t}}\\right)\\right)\\right), \\tag {5}", + "image_path": "8b4817d3a11a0074807ba14933ed6fb7f781e538bd476b823a8e165a98ade9b4.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 405, + 287, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 405, + 287, + 430 + ], + "spans": [ + { + "bbox": [ + 47, + 405, + 287, + 430 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 405, + 287, + 430 + ], + "type": "inline_equation", + "content": "\\mathrm{M}_{\\mathrm{t}}^{i}" + }, + { + "bbox": [ + 47, + 405, + 287, + 430 + ], + "type": "text", + "content": " is calculated by Eq.(2) except replacing the domain label, " + }, + { + "bbox": [ + 47, + 405, + 287, + 430 + ], + "type": "inline_equation", + "content": "\\mathrm{Cos}(\\cdot)" + }, + { + "bbox": [ + 47, + 405, + 287, + 430 + ], + "type": "text", + "content": " represents the cosine similarity." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 430, + 287, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 430, + 287, + 453 + ], + "spans": [ + { + "bbox": [ + 47, + 430, + 287, + 453 + ], + "type": "text", + "content": "As a summary, the whole training loss function of the latent mapper " + }, + { + "bbox": [ + 47, + 430, + 287, + 453 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 47, + 430, + 287, + 453 + ], + "type": "text", + "content": " is:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 114, + 460, + 287, + 472 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 460, + 287, + 472 + ], + "spans": [ + { + "bbox": [ + 114, + 460, + 287, + 472 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\mathcal {L} _ {\\text {c o n s t r}} + \\lambda \\mathcal {L} _ {\\text {d o m a i n}}, \\tag {6}", + "image_path": "d69a944ccfc9c1f7857044f7f4c635ac27e18ef4372ac75f76f5e418b79c5cf5.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 478, + 287, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 478, + 287, + 515 + ], + "spans": [ + { + "bbox": [ + 47, + 478, + 287, + 515 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 478, + 287, + 515 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 47, + 478, + 287, + 515 + ], + "type": "text", + "content": " is the ratio parameter. Optimized by " + }, + { + "bbox": [ + 47, + 478, + 287, + 515 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 47, + 478, + 287, + 515 + ], + "type": "text", + "content": ", the learned prompt vectors can not only reflect the features of the source-domain images, but also adapt to the target domain." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 521, + 264, + 533 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 521, + 264, + 533 + ], + "spans": [ + { + "bbox": [ + 47, + 521, + 264, + 533 + ], + "type": "text", + "content": "3.2. Latent mapper guided generator training" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 539, + 287, + 646 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 539, + 287, + 646 + ], + "spans": [ + { + "bbox": [ + 46, + 539, + 287, + 646 + ], + "type": "text", + "content": "After training the latent mapper " + }, + { + "bbox": [ + 46, + 539, + 287, + 646 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 46, + 539, + 287, + 646 + ], + "type": "text", + "content": ", we conduct the second stage: training the target-domain generator " + }, + { + "bbox": [ + 46, + 539, + 287, + 646 + ], + "type": "inline_equation", + "content": "G_{\\mathrm{t}}" + }, + { + "bbox": [ + 46, + 539, + 287, + 646 + ], + "type": "text", + "content": " as shown in Fig.3 (Stage 2). In specific, we plug in the trained latent mapper, and train " + }, + { + "bbox": [ + 46, + 539, + 287, + 646 + ], + "type": "inline_equation", + "content": "G_{\\mathrm{t}}" + }, + { + "bbox": [ + 46, + 539, + 287, + 646 + ], + "type": "text", + "content": " with an improved Directional CLIP Loss " + }, + { + "bbox": [ + 46, + 539, + 287, + 646 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{adapt}}" + }, + { + "bbox": [ + 46, + 539, + 287, + 646 + ], + "type": "text", + "content": ". Its main difference with [8] is using the image-specific prompt vectors that are produced on-the-fly by " + }, + { + "bbox": [ + 46, + 539, + 287, + 646 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 46, + 539, + 287, + 646 + ], + "type": "text", + "content": " instead of the fixed ones of manually designed prompts. Formally, given a latent code " + }, + { + "bbox": [ + 46, + 539, + 287, + 646 + ], + "type": "inline_equation", + "content": "w^{i}" + }, + { + "bbox": [ + 46, + 539, + 287, + 646 + ], + "type": "text", + "content": ", we calculate the direction of the " + }, + { + "bbox": [ + 46, + 539, + 287, + 646 + ], + "type": "inline_equation", + "content": "i^{\\mathrm{th}}" + }, + { + "bbox": [ + 46, + 539, + 287, + 646 + ], + "type": "text", + "content": " source and target image pair as below:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 61, + 651, + 287, + 666 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 651, + 287, + 666 + ], + "spans": [ + { + "bbox": [ + 61, + 651, + 287, + 666 + ], + "type": "interline_equation", + "content": "\\Delta \\mathrm {I} _ {i} = \\operatorname {N o r m} \\left(E _ {\\mathrm {I}} \\left(G _ {\\mathrm {t}} \\left(w ^ {i}\\right)\\right) - \\operatorname {N o r m} \\left(E _ {\\mathrm {I}} \\left(G _ {\\mathrm {s}} \\left(w ^ {i}\\right)\\right), \\right. \\right. \\tag {7}", + "image_path": "418998d485bc016dc5a7a6e664315530c6693b469f39fd03987d3c5eaf0a333b.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 671, + 287, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 671, + 287, + 694 + ], + "spans": [ + { + "bbox": [ + 47, + 671, + 287, + 694 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 671, + 287, + 694 + ], + "type": "inline_equation", + "content": "\\mathrm{Norm}(\\cdot)" + }, + { + "bbox": [ + 47, + 671, + 287, + 694 + ], + "type": "text", + "content": " represents " + }, + { + "bbox": [ + 47, + 671, + 287, + 694 + ], + "type": "inline_equation", + "content": "L_{2}" + }, + { + "bbox": [ + 47, + 671, + 287, + 694 + ], + "type": "text", + "content": " normalization. The image-specific adaptation direction is calculated as below:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 75, + 700, + 287, + 715 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 700, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 75, + 700, + 287, + 715 + ], + "type": "interline_equation", + "content": "\\Delta \\mathrm {T} _ {i} = \\operatorname {N o r m} \\left(E _ {\\mathrm {T}} \\left(\\mathrm {M} _ {\\mathrm {t}} ^ {i}\\right)\\right) - \\operatorname {N o r m} \\left(E _ {\\mathrm {T}} \\left(\\mathrm {M} _ {\\mathrm {s}} ^ {i}\\right)\\right). \\tag {8}", + "image_path": "6882510dc2baa71fb8cb98b9fba779dd9daeebab6a0b438df861f3ff6fda4cb2.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 72, + 499, + 85 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 72, + 499, + 85 + ], + "spans": [ + { + "bbox": [ + 306, + 72, + 499, + 85 + ], + "type": "text", + "content": "The improved Directional CLIP Loss " + }, + { + "bbox": [ + 306, + 72, + 499, + 85 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{adapt}}" + }, + { + "bbox": [ + 306, + 72, + 499, + 85 + ], + "type": "text", + "content": " is:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 343, + 93, + 545, + 124 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 343, + 93, + 545, + 124 + ], + "spans": [ + { + "bbox": [ + 343, + 93, + 545, + 124 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {a d a p t}} = \\mathbb {E} _ {w ^ {i} \\in \\mathcal {W}} \\sum_ {i = 1} ^ {n} \\left(1 - \\frac {\\Delta \\mathrm {I} _ {i} \\cdot \\Delta \\mathrm {T} _ {i}}{| \\Delta \\mathrm {I} _ {i} | | \\Delta \\mathrm {T} _ {i} |}\\right), \\tag {9}", + "image_path": "bf0254f808a0ed4874fdf6785b69706b93fac54afc2ff0704684f9ecfea30cdc.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 305, + 131, + 545, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 131, + 545, + 167 + ], + "spans": [ + { + "bbox": [ + 305, + 131, + 545, + 167 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 131, + 545, + 167 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 305, + 131, + 545, + 167 + ], + "type": "text", + "content": " is the batch size of latent codes. " + }, + { + "bbox": [ + 305, + 131, + 545, + 167 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{adapt}}" + }, + { + "bbox": [ + 305, + 131, + 545, + 167 + ], + "type": "text", + "content": " constrains the direction of each image pair " + }, + { + "bbox": [ + 305, + 131, + 545, + 167 + ], + "type": "inline_equation", + "content": "\\Delta \\mathrm{I}_i" + }, + { + "bbox": [ + 305, + 131, + 545, + 167 + ], + "type": "text", + "content": " with an image-specific adaptation direction " + }, + { + "bbox": [ + 305, + 131, + 545, + 167 + ], + "type": "inline_equation", + "content": "\\Delta \\mathrm{T}_i" + }, + { + "bbox": [ + 305, + 131, + 545, + 167 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 178, + 388, + 190 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 178, + 388, + 190 + ], + "spans": [ + { + "bbox": [ + 306, + 178, + 388, + 190 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 305, + 198, + 545, + 292 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 198, + 545, + 292 + ], + "spans": [ + { + "bbox": [ + 305, + 198, + 545, + 292 + ], + "type": "text", + "content": "In this section, we evaluate our method qualitatively and quantitatively. The experimental setup is firstly presented in Sec. 4.1. Then we show image synthesis results across various domains in Sec. 4.2. Utilizing a GAN inversion model and diffusion models, results of real-world image translation are provided in Sec. 4.3. Finally, we carefully conduct ablation studies on prompt designing schemes and loss term ratios in Sec. 4.4." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 306, + 300, + 421, + 313 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 300, + 421, + 313 + ], + "spans": [ + { + "bbox": [ + 306, + 300, + 421, + 313 + ], + "type": "text", + "content": "4.1. Experimental setup" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 305, + 319, + 545, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 319, + 545, + 581 + ], + "spans": [ + { + "bbox": [ + 305, + 319, + 545, + 581 + ], + "type": "text", + "content": "Baselines and settings. Two strong methods are chosen as our competitors. For zero-shot image synthesis, NADA [8] is the state-of-the-art method. Following NADA [8], we adapt the pre-trained StyleGANv2 [21] generators on (i) Flickr-Faces-HQ (FFHQ) [8] and (ii) Animal FacesHQ (AFHQ) [3], utilize the same pre-trained CLIP [36] built on ViT-B/32 [6]. For zero-shot real-world image translation, we utilize Restyle [1] with e4e [44] encoder to invert a real image into the latent space " + }, + { + "bbox": [ + 305, + 319, + 545, + 581 + ], + "type": "inline_equation", + "content": "\\mathcal{W}" + }, + { + "bbox": [ + 305, + 319, + 545, + 581 + ], + "type": "text", + "content": " for StyleGANs. DiffusionCLIP (Diff-CLIP for short) [22] is the state-of-the-art method. We follow the setting of [22] except replacing denoising diffusion implicit models (DDIM) [42] with diffusion autoencoders [35]. The training process includes 300 iterations for prompt learning and 300 iterations for generator adaptation using a single NVIDIA RTX 3090 GPU. The batch size is set to 32 for prompt learning and 2 for generator adaptation. The number of learned prompt vectors " + }, + { + "bbox": [ + 305, + 319, + 545, + 581 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 305, + 319, + 545, + 581 + ], + "type": "text", + "content": " is set to 4. For each domain, the ratio parameter " + }, + { + "bbox": [ + 305, + 319, + 545, + 581 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 305, + 319, + 545, + 581 + ], + "type": "text", + "content": " in Eq.(6) is selected among [1, 10], according to the best Inception Score [38] of adapted generators. The whole training process requires about " + }, + { + "bbox": [ + 305, + 319, + 545, + 581 + ], + "type": "inline_equation", + "content": "10\\sim 20" + }, + { + "bbox": [ + 305, + 319, + 545, + 581 + ], + "type": "text", + "content": " minutes. More implementation details can be seen in supplementary materials." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 582, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 582, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 582, + 545, + 713 + ], + "type": "text", + "content": "Evaluation metrics. The ideal generated images should have: 1) high quality and diversity, 2) correct target-domain style, and 3) necessary source-domain information preservation (e.g., structure or identity). For a comprehensive evaluation, we utilize the popular Inception Score (IS) [38] to evaluate the image quality and diversity, the Single Image Fréchet Inception Distance (SIFID) [39] to evaluate the target-domain style, the Structural Consistency Score (SCS) [49] to evaluate the structure preservation, the identity similarity (ID) [5, 12] to evaluate the identity preservation. More details can be seen in supplementary materials." + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "11498" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 55, + 73, + 212, + 245 + ], + "blocks": [ + { + "bbox": [ + 55, + 73, + 212, + 245 + ], + "lines": [ + { + "bbox": [ + 55, + 73, + 212, + 245 + ], + "spans": [ + { + "bbox": [ + 55, + 73, + 212, + 245 + ], + "type": "image", + "image_path": "29fb6b7f34be5303901b7120fd27c50add553887e1983dc4e092eba95845498f.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 216, + 74, + 372, + 244 + ], + "blocks": [ + { + "bbox": [ + 216, + 74, + 372, + 244 + ], + "lines": [ + { + "bbox": [ + 216, + 74, + 372, + 244 + ], + "spans": [ + { + "bbox": [ + 216, + 74, + 372, + 244 + ], + "type": "image", + "image_path": "08418b9a6f0b65e8dec972b97a56e626c27bc7f5e4dc21961dfc895d8a67c363.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 376, + 74, + 533, + 245 + ], + "blocks": [ + { + "bbox": [ + 376, + 74, + 533, + 245 + ], + "lines": [ + { + "bbox": [ + 376, + 74, + 533, + 245 + ], + "spans": [ + { + "bbox": [ + 376, + 74, + 533, + 245 + ], + "type": "image", + "image_path": "d74e7911721bc05f5b888638160c73bd8faa6ecd3bda79ab6cc2324278edb17d.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 55, + 255, + 211, + 415 + ], + "blocks": [ + { + "bbox": [ + 55, + 255, + 211, + 415 + ], + "lines": [ + { + "bbox": [ + 55, + 255, + 211, + 415 + ], + "spans": [ + { + "bbox": [ + 55, + 255, + 211, + 415 + ], + "type": "image", + "image_path": "2dc87c38b88cb36faeb6cd7945f5e3b66de331822eba924bd546c0e624226e44.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 418, + 547, + 452 + ], + "lines": [ + { + "bbox": [ + 46, + 418, + 547, + 452 + ], + "spans": [ + { + "bbox": [ + 46, + 418, + 547, + 452 + ], + "type": "text", + "content": "Figure 4. Image synthesis comparison results. For FFHQ [21], the source domain is \"Human\" and the target domains are \"Pixar character\", \"Tolkien elf\", and \"Werewolf\". For AFHQ-Dog [3], the source domain is \"Photo\" and the target domains are \"Cartoon\", \"Pointillism\", and \"Cubism\". The yellow box areas show the mode collapse problem of NADA [8]." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 216, + 255, + 373, + 415 + ], + "blocks": [ + { + "bbox": [ + 216, + 255, + 373, + 415 + ], + "lines": [ + { + "bbox": [ + 216, + 255, + 373, + 415 + ], + "spans": [ + { + "bbox": [ + 216, + 255, + 373, + 415 + ], + "type": "image", + "image_path": "d6df7d3195dc9f738872649e3d2812238e7b48f155c54a226759d7ccd9a05f95.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 376, + 255, + 533, + 415 + ], + "blocks": [ + { + "bbox": [ + 376, + 255, + 533, + 415 + ], + "lines": [ + { + "bbox": [ + 376, + 255, + 533, + 415 + ], + "spans": [ + { + "bbox": [ + 376, + 255, + 533, + 415 + ], + "type": "image", + "image_path": "ed555e54f9be07325dd42f45c54854a4745e971370d6a0517d61066e98e8ce7a.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 48, + 473, + 549, + 606 + ], + "blocks": [ + { + "bbox": [ + 134, + 457, + 457, + 468 + ], + "lines": [ + { + "bbox": [ + 134, + 457, + 457, + 468 + ], + "spans": [ + { + "bbox": [ + 134, + 457, + 457, + 468 + ], + "type": "text", + "content": "Table 1. Quantitative evaluation results. US denotes user study. The best results are bold." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 48, + 473, + 549, + 606 + ], + "lines": [ + { + "bbox": [ + 48, + 473, + 549, + 606 + ], + "spans": [ + { + "bbox": [ + 48, + 473, + 549, + 606 + ], + "type": "table", + "html": "
DatasetSource→TargetIS [38] (↑)SCS [49] (↑)ID [5,12] (↑)SIFID [39] (↓)US (↑)
NADAIPLNADAIPLNADAIPLNADAIPL
R1R2R3R1R2R3
FFHQ [8]Photo→Disney2.7213.0890.4070.4480.7820.8012.7763.1363.6702.5172.9303.49782.6%
Photo→Anime painting2.4503.0510.3240.5180.6660.7762.9561.8111.2422.8451.5951.02179.3%
Photo→Wall painting2.1832.6760.4390.4870.5940.6371.9441.2201.3311.9301.1831.27480.9%
Photo→Ukiyo-e2.2052.9740.4200.5060.7750.6321.9541.9901.3261.1651.2550.87885.9%
Human→Pixar character2.7032.7850.3790.4610.7570.8530.7930.9320.8650.6380.8211.09286.7%
Human→Tolkien elf2.4792.7780.4160.4910.7110.7720.6321.4951.4520.6900.6370.70176.8%
Human→Werewolf2.6192.8090.3990.4170.6420.7471.9691.8461.9671.7341.6881.91172.7%
AFHQ [3]Photo→Cartoon6.5058.6580.4070.5630.9250.9412.7082.6723.8702.5172.4773.27887.6%
Photo→Pointillism5.4196.9130.2240.5420.7750.8817.0815.2887.1424.8183.0894.07478.5%
Photo→Cubism4.1656.4500.3860.4630.9340.9432.7792.9383.1992.4312.9562.28474.3%
", + "image_path": "697dd8ceb06d0b29c8fb6f2e43ffa527abdeb6f8c975d2daf907724941da19c6.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 616, + 207, + 628 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 616, + 207, + 628 + ], + "spans": [ + { + "bbox": [ + 47, + 616, + 207, + 628 + ], + "type": "text", + "content": "4.2. Generative model adaptation" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 642, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 642, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 642, + 288, + 715 + ], + "type": "text", + "content": "Qualitative comparison. In addition to Fig.1, we conduct extensive experiments across a wide range of domains as shown in Fig.4. All results indicate that our proposed approach outperforms NADA consistently. The yellow box areas in the figures denote the main different features between NADA and our IPL. From the quality of the gener" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 616, + 545, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 616, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 304, + 616, + 545, + 712 + ], + "type": "text", + "content": "ated images, the results of NADA have more incorrect features and noise, such as green mussy noise on hairs (Tolkien elf), ruined noses (Werewolf) and unshaped necks (Pointillism), while the results of IPL are more clear and correct. From the mode collapse perspective of the generated images, NADA is prone to collapse to some similar facial features for different images, such as depressed emotions (Pixar character), folded ears (Cartoon) and blue noses (Cu" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "11499" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 252, + 73, + 341, + 83 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 252, + 73, + 341, + 83 + ], + "spans": [ + { + "bbox": [ + 252, + 73, + 341, + 83 + ], + "type": "text", + "content": "\"Photo\" " + }, + { + "bbox": [ + 252, + 73, + 341, + 83 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 252, + 73, + 341, + 83 + ], + "type": "text", + "content": " \"Wall painting\"" + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 52, + 83, + 106, + 189 + ], + "blocks": [ + { + "bbox": [ + 52, + 83, + 106, + 189 + ], + "lines": [ + { + "bbox": [ + 52, + 83, + 106, + 189 + ], + "spans": [ + { + "bbox": [ + 52, + 83, + 106, + 189 + ], + "type": "image", + "image_path": "0268edaa23f642103bd1f705e64ee50c57d5519fc6cbf5a435497c24df1f5acd.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 108, + 83, + 162, + 189 + ], + "blocks": [ + { + "bbox": [ + 108, + 83, + 162, + 189 + ], + "lines": [ + { + "bbox": [ + 108, + 83, + 162, + 189 + ], + "spans": [ + { + "bbox": [ + 108, + 83, + 162, + 189 + ], + "type": "image", + "image_path": "70ae5f6ce32ee7b7a22ca816cecd35cef0f9b1fd65ad245e20525bb713fea231.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 162, + 83, + 214, + 189 + ], + "blocks": [ + { + "bbox": [ + 162, + 83, + 214, + 189 + ], + "lines": [ + { + "bbox": [ + 162, + 83, + 214, + 189 + ], + "spans": [ + { + "bbox": [ + 162, + 83, + 214, + 189 + ], + "type": "image", + "image_path": "269fc30244248f9c7014d3a679e18bd2ad6f633c4ebe0d4a4847ebd42467e94a.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 214, + 83, + 268, + 189 + ], + "blocks": [ + { + "bbox": [ + 214, + 83, + 268, + 189 + ], + "lines": [ + { + "bbox": [ + 214, + 83, + 268, + 189 + ], + "spans": [ + { + "bbox": [ + 214, + 83, + 268, + 189 + ], + "type": "image", + "image_path": "dadb6e2a11a8a7cd0a9aeca2c8fa601789afb8b890b353e11e32e2601826a1c5.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 268, + 83, + 323, + 189 + ], + "blocks": [ + { + "bbox": [ + 268, + 83, + 323, + 189 + ], + "lines": [ + { + "bbox": [ + 268, + 83, + 323, + 189 + ], + "spans": [ + { + "bbox": [ + 268, + 83, + 323, + 189 + ], + "type": "image", + "image_path": "4687422010a3419ad8aa9635ccde0f89c1a9bdfe3517bd6651caa42d74798c4b.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 323, + 83, + 376, + 189 + ], + "blocks": [ + { + "bbox": [ + 323, + 83, + 376, + 189 + ], + "lines": [ + { + "bbox": [ + 323, + 83, + 376, + 189 + ], + "spans": [ + { + "bbox": [ + 323, + 83, + 376, + 189 + ], + "type": "image", + "image_path": "fabe97722fa861eaa5d6afd08724ae22e7fb11443d4695cd3bb126ae86b5446d.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 378, + 83, + 432, + 189 + ], + "blocks": [ + { + "bbox": [ + 378, + 83, + 432, + 189 + ], + "lines": [ + { + "bbox": [ + 378, + 83, + 432, + 189 + ], + "spans": [ + { + "bbox": [ + 378, + 83, + 432, + 189 + ], + "type": "image", + "image_path": "e7af42432eb1da68e9e7d6b1894a86454d2c738c0ec1b4d399de583cf7332101.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 432, + 83, + 485, + 189 + ], + "blocks": [ + { + "bbox": [ + 432, + 83, + 485, + 189 + ], + "lines": [ + { + "bbox": [ + 432, + 83, + 485, + 189 + ], + "spans": [ + { + "bbox": [ + 432, + 83, + 485, + 189 + ], + "type": "image", + "image_path": "79042054a2ae262e18ffe0fc1d954021da116f3308a780ba49b49623567cc96f.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 485, + 83, + 537, + 189 + ], + "blocks": [ + { + "bbox": [ + 485, + 83, + 537, + 189 + ], + "lines": [ + { + "bbox": [ + 485, + 83, + 537, + 189 + ], + "spans": [ + { + "bbox": [ + 485, + 83, + 537, + 189 + ], + "type": "image", + "image_path": "cd443fa96b2fde1eed83d4bfb736c84c63d44686ba3fa96b3cd7f41101a7129f.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 254, + 190, + 338, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 254, + 190, + 338, + 198 + ], + "spans": [ + { + "bbox": [ + 254, + 190, + 338, + 198 + ], + "type": "text", + "content": "\"Human\" " + }, + { + "bbox": [ + 254, + 190, + 338, + 198 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 254, + 190, + 338, + 198 + ], + "type": "text", + "content": " \"Tolkien elf\"" + } + ] + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 52, + 198, + 106, + 304 + ], + "blocks": [ + { + "bbox": [ + 52, + 198, + 106, + 304 + ], + "lines": [ + { + "bbox": [ + 52, + 198, + 106, + 304 + ], + "spans": [ + { + "bbox": [ + 52, + 198, + 106, + 304 + ], + "type": "image", + "image_path": "25c38f9cca72c89ddbb5ffad71a66b9291ff39396201b148a82ba074a5b641ee.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 59, + 304, + 99, + 312 + ], + "lines": [ + { + "bbox": [ + 59, + 304, + 99, + 312 + ], + "spans": [ + { + "bbox": [ + 59, + 304, + 99, + 312 + ], + "type": "text", + "content": "Real Images" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 46, + 315, + 545, + 360 + ], + "lines": [ + { + "bbox": [ + 46, + 315, + 545, + 360 + ], + "spans": [ + { + "bbox": [ + 46, + 315, + 545, + 360 + ], + "type": "text", + "content": "Figure 5. Real-world image translation comparison results. Baselines are NADA [8], Diff-CLIP [22] and Diff-CLIP+ (an improved version of Diff-CLIP). Recon1, Recon2 and Recon3 refer to inversion results via Restyle [1], DDIM and diffusion autoencoders, respectively. GAN-IPL and Diff-IPL denote integrating IPL with NADA and Diff-CLIP+, respectively. Real images are from CelebA-HQ dataset [29] and translated into two styles of images, \"Wall painting\" and \"Tolkien elf\". The yellow boxes show the key observation areas." + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 108, + 198, + 162, + 304 + ], + "blocks": [ + { + "bbox": [ + 108, + 198, + 162, + 304 + ], + "lines": [ + { + "bbox": [ + 108, + 198, + 162, + 304 + ], + "spans": [ + { + "bbox": [ + 108, + 198, + 162, + 304 + ], + "type": "image", + "image_path": "3554ec76e38e70f52b8788d4d92390f422f2f746cc296883976998f13ded151b.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 123, + 304, + 147, + 312 + ], + "lines": [ + { + "bbox": [ + 123, + 304, + 147, + 312 + ], + "spans": [ + { + "bbox": [ + 123, + 304, + 147, + 312 + ], + "type": "text", + "content": "Recon1" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 162, + 198, + 214, + 304 + ], + "blocks": [ + { + "bbox": [ + 162, + 198, + 214, + 304 + ], + "lines": [ + { + "bbox": [ + 162, + 198, + 214, + 304 + ], + "spans": [ + { + "bbox": [ + 162, + 198, + 214, + 304 + ], + "type": "image", + "image_path": "1a3fcd2901d98d2b5a9334bce46342c1e92c6ca58ebd207841b67e6cb52b34b3.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 177, + 304, + 200, + 312 + ], + "lines": [ + { + "bbox": [ + 177, + 304, + 200, + 312 + ], + "spans": [ + { + "bbox": [ + 177, + 304, + 200, + 312 + ], + "type": "text", + "content": "NADA" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 214, + 198, + 268, + 304 + ], + "blocks": [ + { + "bbox": [ + 214, + 198, + 268, + 304 + ], + "lines": [ + { + "bbox": [ + 214, + 198, + 268, + 304 + ], + "spans": [ + { + "bbox": [ + 214, + 198, + 268, + 304 + ], + "type": "image", + "image_path": "f5b419a0a98122ff4b831e60890433270dd62a07fed5fc62bae54df243de7bbb.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 226, + 304, + 257, + 312 + ], + "lines": [ + { + "bbox": [ + 226, + 304, + 257, + 312 + ], + "spans": [ + { + "bbox": [ + 226, + 304, + 257, + 312 + ], + "type": "text", + "content": "GAN-IPL" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 269, + 198, + 323, + 304 + ], + "blocks": [ + { + "bbox": [ + 269, + 198, + 323, + 304 + ], + "lines": [ + { + "bbox": [ + 269, + 198, + 323, + 304 + ], + "spans": [ + { + "bbox": [ + 269, + 198, + 323, + 304 + ], + "type": "image", + "image_path": "fd02d4ef7fcca9b1bff27811a8aec50c12890e34cd577549341714e42c83056a.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 282, + 304, + 307, + 312 + ], + "lines": [ + { + "bbox": [ + 282, + 304, + 307, + 312 + ], + "spans": [ + { + "bbox": [ + 282, + 304, + 307, + 312 + ], + "type": "text", + "content": "Recon2" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 323, + 198, + 376, + 304 + ], + "blocks": [ + { + "bbox": [ + 323, + 198, + 376, + 304 + ], + "lines": [ + { + "bbox": [ + 323, + 198, + 376, + 304 + ], + "spans": [ + { + "bbox": [ + 323, + 198, + 376, + 304 + ], + "type": "image", + "image_path": "520fd4a0ab9340aa229fd044e9cfd1ee7b585aab42909b89c04c0361f58f73bc.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 333, + 304, + 367, + 312 + ], + "lines": [ + { + "bbox": [ + 333, + 304, + 367, + 312 + ], + "spans": [ + { + "bbox": [ + 333, + 304, + 367, + 312 + ], + "type": "text", + "content": "Diff-CLIP" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 378, + 198, + 432, + 304 + ], + "blocks": [ + { + "bbox": [ + 378, + 198, + 432, + 304 + ], + "lines": [ + { + "bbox": [ + 378, + 198, + 432, + 304 + ], + "spans": [ + { + "bbox": [ + 378, + 198, + 432, + 304 + ], + "type": "image", + "image_path": "37386bea78b0c8edf5a9d57d86167dae92e5f7e3622d12f72063ae790a8bbaf6.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 392, + 304, + 417, + 312 + ], + "lines": [ + { + "bbox": [ + 392, + 304, + 417, + 312 + ], + "spans": [ + { + "bbox": [ + 392, + 304, + 417, + 312 + ], + "type": "text", + "content": "Recon3" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 432, + 198, + 485, + 304 + ], + "blocks": [ + { + "bbox": [ + 432, + 198, + 485, + 304 + ], + "lines": [ + { + "bbox": [ + 432, + 198, + 485, + 304 + ], + "spans": [ + { + "bbox": [ + 432, + 198, + 485, + 304 + ], + "type": "image", + "image_path": "716b93708d5f65e373def0309008655580349ad226402db625a51cac6be3c994.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 440, + 304, + 477, + 312 + ], + "lines": [ + { + "bbox": [ + 440, + 304, + 477, + 312 + ], + "spans": [ + { + "bbox": [ + 440, + 304, + 477, + 312 + ], + "type": "text", + "content": "Diff-CLIP+" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 485, + 198, + 537, + 304 + ], + "blocks": [ + { + "bbox": [ + 485, + 198, + 537, + 304 + ], + "lines": [ + { + "bbox": [ + 485, + 198, + 537, + 304 + ], + "spans": [ + { + "bbox": [ + 485, + 198, + 537, + 304 + ], + "type": "image", + "image_path": "78f9acc720855cb593279202034e32f89e677c92a2db8a81fdba192bdde14f2a.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 498, + 304, + 526, + 312 + ], + "lines": [ + { + "bbox": [ + 498, + 304, + 526, + 312 + ], + "spans": [ + { + "bbox": [ + 498, + 304, + 526, + 312 + ], + "type": "text", + "content": "Diff-IPL" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_caption" + } + ], + "index": 27 + }, + { + "bbox": [ + 46, + 366, + 287, + 450 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 366, + 287, + 450 + ], + "spans": [ + { + "bbox": [ + 46, + 366, + 287, + 450 + ], + "type": "text", + "content": "bism), while IPL presents consistently higher diversity and solve the mode collapse issue well. Our advantages mainly come from the fact that the latent mapper preserves sufficient image-specific and target-domain friendly features from the source-domain images. The produced prompt vectors provide more precise and diversified adaptation directions for the target-domain generator adaptation." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 46, + 450, + 287, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 450, + 287, + 605 + ], + "spans": [ + { + "bbox": [ + 46, + 450, + 287, + 605 + ], + "type": "text", + "content": "Quantitative comparison. To quantify the performance improvement of IPL compared to NADA [8], IS, SCS, ID and SIFID are evaluated. As reported in Tab.1, for IS, IPL outperforms NADA on all 10 settings, indicating our method achieves better image quality and diversity. For SCS and ID, IPL outperforms NADA on most of the 10 settings except \"Human " + }, + { + "bbox": [ + 46, + 450, + 287, + 605 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 46, + 450, + 287, + 605 + ], + "type": "text", + "content": " Ukiyo-e\". It is mainly because that \"Ukiyo-e\" naturally favors humans with narrow eyes and pale skin, which encourages identity changes during training. For SIFID, we collect 3 reference images " + }, + { + "bbox": [ + 46, + 450, + 287, + 605 + ], + "type": "inline_equation", + "content": "(\\mathbb{R}_1,\\mathbb{R}_2," + }, + { + "bbox": [ + 46, + 450, + 287, + 605 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 450, + 287, + 605 + ], + "type": "inline_equation", + "content": "\\mathbb{R}_3)" + }, + { + "bbox": [ + 46, + 450, + 287, + 605 + ], + "type": "text", + "content": " on the internet for each target domain. Tab.1 shows that IPL outperforms NADA in most cases, indicating our superiority in generating precise target-domain styles." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 46, + 606, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 606, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 606, + 287, + 713 + ], + "type": "text", + "content": "User studies. For each target domain, 32 images generated by NADA and our method are provided to human observers, together with their corresponding source images and textual labels of target domains. Human observers are required to choose better synthesized images which are semantically more consistent with the target domain labels and preserve the useful source-domain information better. We collect 1210 responses from 121 people using a survey platform. As reported in the last column of Tab.1, " + }, + { + "bbox": [ + 46, + 606, + 287, + 713 + ], + "type": "inline_equation", + "content": "80.5\\%" + }, + { + "bbox": [ + 46, + 606, + 287, + 713 + ], + "type": "text", + "content": " of" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 306, + 367, + 498, + 379 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 367, + 498, + 379 + ], + "spans": [ + { + "bbox": [ + 306, + 367, + 498, + 379 + ], + "type": "text", + "content": "users prefer our approach to NADA on average." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 306, + 384, + 465, + 396 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 384, + 465, + 396 + ], + "spans": [ + { + "bbox": [ + 306, + 384, + 465, + 396 + ], + "type": "text", + "content": "4.3. Real-world image translation" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 304, + 403, + 545, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 403, + 545, + 534 + ], + "spans": [ + { + "bbox": [ + 304, + 403, + 545, + 534 + ], + "type": "text", + "content": "This task first inverts a real-world image to the latent code by a pre-trained inversion model and then feeds it to the trained target-domain generator to get the translated target-domain image. For GAN-based generators, we compare our method (GAN-IPL) with NADA by connecting the inversion model Restyle [1]. For diffusion model generators, we compare our method (Diff-IPL) with Diff-CLIP [22] and Diff-CLIP+ which is an improved version of Diff-CLIP [22] by replacing the original DDIM [42] with a diffusion autoencoder [35]. For these diffusion models, a deterministic inversion process is naturally provided." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 304, + 534, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 534, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 534, + 545, + 713 + ], + "type": "text", + "content": "As shown in Fig.5, comparing the results of NADA and GAN-IPL, IPL's superiority of alleviating mode collapse over NADA can still be observed. Comparing the results of Recon1, Recon2 and Recon3, diffusion models (Recon2 and Recon3) consistently perform better identity preservation than Restyle (Recon1) for real image inversion, especially for some uncommon stuffs in a human face photo, e.g., the hats, hands and tattoos in Fig.5. However, this property is not well inherited in the target domain generators with a fixed adaptation direction (see the results of Diff-CLIP and Diff-CLIP+). Our proposed IPL could help preserve the details in source images better and present the target-domain styles correctly (see the results of Diff-IPL). Quantitative evaluation results of Diff-CLIP, Diff-CLIP+ and Diff-IPL can be seen in supplementary materials." + } + ] + } + ], + "index": 36 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "11500" + } + ] + } + ], + "index": 37 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 53, + 74, + 278, + 289 + ], + "blocks": [ + { + "bbox": [ + 53, + 74, + 278, + 289 + ], + "lines": [ + { + "bbox": [ + 53, + 74, + 278, + 289 + ], + "spans": [ + { + "bbox": [ + 53, + 74, + 278, + 289 + ], + "type": "image", + "image_path": "32128f89966999a701937ed1a444c461eece8cf86d9b70eedbb719c0633a1b18.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 64, + 292, + 269, + 303 + ], + "lines": [ + { + "bbox": [ + 64, + 292, + 269, + 303 + ], + "spans": [ + { + "bbox": [ + 64, + 292, + 269, + 303 + ], + "type": "text", + "content": "Figure 6. Ablation results of prompt designing schemes." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 312, + 146, + 323 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 312, + 146, + 323 + ], + "spans": [ + { + "bbox": [ + 47, + 312, + 146, + 323 + ], + "type": "text", + "content": "4.4. Ablation studies" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 331, + 287, + 461 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 331, + 287, + 461 + ], + "spans": [ + { + "bbox": [ + 46, + 331, + 287, + 461 + ], + "type": "text", + "content": "Prompt designing schemes. We investigate four different prompt designing schemes: 1) manually fixed prompts (NADA), 2) learned fixed prompts, 3) random prompts and 4) adaptive prompts (Ours). Manually fixed prompts mean simply utilizing the manually designed prompts as NADA [8]. Learned fixed prompts denote unified prompt vectors produced by common prompt learning strategy [58] and shared for all images. Random prompts refer to prompt vectors produced by a randomly initialized latent mapper. Adaptive prompts denote the learned image-specific prompt vectors produced by our IPL method." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 463, + 287, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 463, + 287, + 617 + ], + "spans": [ + { + "bbox": [ + 46, + 463, + 287, + 617 + ], + "type": "text", + "content": "As illustrated in Fig.6, synthesized images with manually fixed prompts and learned fixed prompts show some similar mode collapse issues, e.g., blue eyebrows (Ukiyo-e) and depressed emotions (Pixar character). They both produce a fixed adaptation direction, which leads to identical supervision signals for all image pairs. Synthesized images with random prompts present more photo-realistic results but lack the desired target-domain style. A possible reason is that the random prompts contain some features conflicting with the target domain and impede the learning of the target domain style. Our adaptive prompts perform best since the prompts contain more image-specific and target-domain friendly features from the source-domain images." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 618, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 618, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 618, + 288, + 714 + ], + "type": "text", + "content": "Loss term ratios. We compare different values of the ratio parameter " + }, + { + "bbox": [ + 46, + 618, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 46, + 618, + 288, + 714 + ], + "type": "text", + "content": " in Eq.(6), which is used to adjust the intensity of the domain regularization loss. Visual results are shown in Fig.7. In specific, when we set " + }, + { + "bbox": [ + 46, + 618, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 46, + 618, + 288, + 714 + ], + "type": "text", + "content": " to a small value (" + }, + { + "bbox": [ + 46, + 618, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\lambda = 0" + }, + { + "bbox": [ + 46, + 618, + 288, + 714 + ], + "type": "text", + "content": " as an extreme case), there is almost no constraint from the target domain. The learned prompts would excessively preserve the source-domain features. Thus the synthesized images are similar to their corresponding source" + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 313, + 74, + 541, + 280 + ], + "blocks": [ + { + "bbox": [ + 313, + 74, + 541, + 280 + ], + "lines": [ + { + "bbox": [ + 313, + 74, + 541, + 280 + ], + "spans": [ + { + "bbox": [ + 313, + 74, + 541, + 280 + ], + "type": "image", + "image_path": "047ec3af94a9ec2da033b34d28cf97a94320a17fb212f632782bbafa48bb1f72.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 343, + 293, + 508, + 304 + ], + "lines": [ + { + "bbox": [ + 343, + 293, + 508, + 304 + ], + "spans": [ + { + "bbox": [ + 343, + 293, + 508, + 304 + ], + "type": "text", + "content": "Figure 7. Ablation results of loss term ratios." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 315, + 545, + 398 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 315, + 545, + 398 + ], + "spans": [ + { + "bbox": [ + 304, + 315, + 545, + 398 + ], + "type": "text", + "content": "images. In contrast, if " + }, + { + "bbox": [ + 304, + 315, + 545, + 398 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 304, + 315, + 545, + 398 + ], + "type": "text", + "content": " is set to a large value (" + }, + { + "bbox": [ + 304, + 315, + 545, + 398 + ], + "type": "inline_equation", + "content": "\\lambda = 20" + }, + { + "bbox": [ + 304, + 315, + 545, + 398 + ], + "type": "text", + "content": " as an example), a strong target-domain constraint will limit the diversity of the learned prompts. As a result, the synthesized images would slightly show some similar undesired patterns as images generated via fixed prompts. Therefore, in practical applications, " + }, + { + "bbox": [ + 304, + 315, + 545, + 398 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 304, + 315, + 545, + 398 + ], + "type": "text", + "content": " should be a trade-off value (i.e., between 1 and 10)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 407, + 378, + 418 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 407, + 378, + 418 + ], + "spans": [ + { + "bbox": [ + 306, + 407, + 378, + 418 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 424, + 545, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 424, + 545, + 628 + ], + "spans": [ + { + "bbox": [ + 304, + 424, + 545, + 628 + ], + "type": "text", + "content": "In this paper, we have proposed a novel zero-shot generative model adaptation approach called Image-specific Prompt Learning (IPL). In specific, we build a projection from latent codes to image-specific sets of prompt vectors via a latent mapper. With a contrastive learning scheme and a domain regularization constraint, the learned prompt vectors represent image-specific but target-domain-friendly features, producing more precise and diversified adaptation directions for target domain generator training. Compared with the state-of-the-art approaches, IPL consistently improves the quality of synthesized images and alleviates the mode collapse issue. Furthermore, IPL is independent of the type of generator and works well with both GANs and diffusion models, which exhibits good universality and adaptability. In the future, we will try to apply the proposed image-specific prompt learning strategy in other downstream tasks, such as unsupervised image captioning." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 636, + 408, + 649 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 636, + 408, + 649 + ], + "spans": [ + { + "bbox": [ + 306, + 636, + 408, + 649 + ], + "type": "text", + "content": "Acknowledgements" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 654, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 654, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 654, + 545, + 713 + ], + "type": "text", + "content": "This work is supported in part by the National Key R&D Program of China (2019YFC1408703), the National Natural Science Foundation of China (62022048, 62276150), Guoqiang Institute of Tsinghua University and Beijing Academy of Artificial Intelligence." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "11501" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 107, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 107, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 107, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 286, + 712 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 53, + 91, + 286, + 123 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 286, + 123 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 286, + 123 + ], + "type": "text", + "content": "[1] Yuval Alaluf, Or Patashnik, and Daniel Cohen-Or. ReStyle: A residual-based StyleGAN encoder via iterative refinement. In ICCV, 2021. 5, 7" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 125, + 286, + 157 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 125, + 286, + 157 + ], + "spans": [ + { + "bbox": [ + 53, + 125, + 286, + 157 + ], + "type": "text", + "content": "[2] Sergey Bartunov and Dmitry Vetrov. Few-shot generative modelling with generative matching networks. In AISTATS, 2018. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 54, + 160, + 286, + 191 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 160, + 286, + 191 + ], + "spans": [ + { + "bbox": [ + 54, + 160, + 286, + 191 + ], + "type": "text", + "content": "[3] Yunjey Choi, Youngjung Uh, Jaejun Yoo, and Jung-Woo Ha. StarGAN v2: Diverse image synthesis for multiple domains. In CVPR, 2020. 5, 6" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 194, + 286, + 225 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 194, + 286, + 225 + ], + "spans": [ + { + "bbox": [ + 53, + 194, + 286, + 225 + ], + "type": "text", + "content": "[4] Louis Clouatre and Marc Demers. FIGR: Few-shot image generation with reptile. arXiv preprint arXiv:1901.02199, 2019.3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 54, + 228, + 286, + 260 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 228, + 286, + 260 + ], + "spans": [ + { + "bbox": [ + 54, + 228, + 286, + 260 + ], + "type": "text", + "content": "[5] Jiankang Deng, Jia Guo, Niannan Xue, and Stefanos Zafeiriou. ArcFace: Additive angular margin loss for deep face recognition. In CVPR, 2019. 5, 6" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 54, + 262, + 286, + 327 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 262, + 286, + 327 + ], + "spans": [ + { + "bbox": [ + 54, + 262, + 286, + 327 + ], + "type": "text", + "content": "[6] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. In ICLR, 2020. 5" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 54, + 330, + 286, + 371 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 330, + 286, + 371 + ], + "spans": [ + { + "bbox": [ + 54, + 330, + 286, + 371 + ], + "type": "text", + "content": "[7] Yu Du, Fangyun Wei, Zihe Zhang, Miaojing Shi, Yue Gao, and Guoqi Li. Learning to prompt for open-vocabulary object detection with vision-language model. In CVPR, 2022. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 54, + 375, + 286, + 417 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 375, + 286, + 417 + ], + "spans": [ + { + "bbox": [ + 54, + 375, + 286, + 417 + ], + "type": "text", + "content": "[8] Rinon Gal, Or Patashnik, Haggai Maron, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. StyleGAN-NADA: CLIP-guided domain adaptation of image generators. In SIGGRAPH, 2022. 2, 3, 4, 5, 6, 7, 8" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 420, + 286, + 451 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 420, + 286, + 451 + ], + "spans": [ + { + "bbox": [ + 53, + 420, + 286, + 451 + ], + "type": "text", + "content": "[9] Tianyu Gao, Adam Fisch, and Danqi Chen. Making pre-trained language models better few-shot learners. In ACL/IJCNLP, 2021. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 454, + 286, + 486 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 454, + 286, + 486 + ], + "spans": [ + { + "bbox": [ + 48, + 454, + 286, + 486 + ], + "type": "text", + "content": "[10] Chunjiang Ge, Rui Huang, Mixue Xie, Zihang Lai, Shiji Song, Shuang Li, and Gao Huang. Domain adaptation via prompt learning. arXiv preprint arXiv:2202.06687, 2022. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 488, + 286, + 530 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 488, + 286, + 530 + ], + "spans": [ + { + "bbox": [ + 48, + 488, + 286, + 530 + ], + "type": "text", + "content": "[11] Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. In NeurIPS, 2014. 1" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 533, + 286, + 575 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 533, + 286, + 575 + ], + "spans": [ + { + "bbox": [ + 48, + 533, + 286, + 575 + ], + "type": "text", + "content": "[12] Ju He, Jie-Neng Chen, Shuai Liu, Adam Kortylewski, Cheng Yang, Yutong Bai, and Changhu Wang. TransFG: A transformer architecture for fine-grained recognition. In AAAI, 2022. 5, 6" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 578, + 286, + 599 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 578, + 286, + 599 + ], + "spans": [ + { + "bbox": [ + 48, + 578, + 286, + 599 + ], + "type": "text", + "content": "[13] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. In NeurIPS, 2020. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 601, + 286, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 601, + 286, + 633 + ], + "spans": [ + { + "bbox": [ + 48, + 601, + 286, + 633 + ], + "type": "text", + "content": "[14] Phillip Isola, Jun-Yan Zhu, Tinghui Zhou, and Alexei A Efros. Image-to-image translation with conditional adversarial networks. In CVPR, 2017. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 48, + 635, + 286, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 635, + 286, + 678 + ], + "spans": [ + { + "bbox": [ + 48, + 635, + 286, + 678 + ], + "type": "text", + "content": "[15] Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. In ICML, 2021. 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 48, + 681, + 286, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 681, + 286, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 681, + 286, + 712 + ], + "type": "text", + "content": "[16] Zhengbao Jiang, Frank F Xu, Jun Araki, and Graham Neubig. How can we know what language models know? TACL, 2020. 3" + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 712 + ], + "type": "list", + "angle": 0, + "index": 36, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "type": "text", + "content": "[17] Chen Ju, Tengda Han, Kunhao Zheng, Ya Zhang, and Weidi Xie. Prompting visual-language models for efficient video understanding. In ECCV, 2021. 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 108, + 545, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 108, + 545, + 138 + ], + "spans": [ + { + "bbox": [ + 308, + 108, + 545, + 138 + ], + "type": "text", + "content": "[18] Tero Karras, Miika Aittala, Janne Hellsten, Samuli Laine, Jaakko Lehtinen, and Timo Aila. Training generative adversarial networks with limited data. In NeurIPS, 2020. 2, 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 141, + 545, + 173 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 141, + 545, + 173 + ], + "spans": [ + { + "bbox": [ + 308, + 141, + 545, + 173 + ], + "type": "text", + "content": "[19] Tero Karras, Miika Aittala, Samuli Laine, Erik Härkönen, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Alias-free generative adversarial networks. In NeurIPS, 2021. 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 175, + 545, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 175, + 545, + 206 + ], + "spans": [ + { + "bbox": [ + 308, + 175, + 545, + 206 + ], + "type": "text", + "content": "[20] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In CVPR, 2019. 2, 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 209, + 545, + 240 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 209, + 545, + 240 + ], + "spans": [ + { + "bbox": [ + 308, + 209, + 545, + 240 + ], + "type": "text", + "content": "[21] Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and improving the image quality of StyleGAN. In CVPR, 2020. 1, 2, 3, 5, 6" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 243, + 545, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 243, + 545, + 274 + ], + "spans": [ + { + "bbox": [ + 308, + 243, + 545, + 274 + ], + "type": "text", + "content": "[22] Gwanghyun Kim, Taesung Kwon, and Jong Chul Ye. DiffusionCLIP: Text-guided diffusion models for robust image manipulation. In CVPR, 2022. 2, 3, 5, 7" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 277, + 545, + 307 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 277, + 545, + 307 + ], + "spans": [ + { + "bbox": [ + 308, + 277, + 545, + 307 + ], + "type": "text", + "content": "[23] Brian Lester, Rami Al-Rfou, and Noah Constant. The power of scale for parameter-efficient prompt tuning. In EMNLP, 2021. 3" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 310, + 545, + 363 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 310, + 545, + 363 + ], + "spans": [ + { + "bbox": [ + 308, + 310, + 545, + 363 + ], + "type": "text", + "content": "[24] Liunian Harold Li, Pengchuan Zhang, Haotian Zhang, Jianwei Yang, Chunyuan Li, Yiwu Zhong, Lijuan Wang, Lu Yuan, Lei Zhang, Jenq-Neng Hwang, Kai-Wei Chang, and Jianfeng Gao. Grounded language-image pre-training. In CVPR, 2022. 3" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 366, + 545, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 366, + 545, + 396 + ], + "spans": [ + { + "bbox": [ + 308, + 366, + 545, + 396 + ], + "type": "text", + "content": "[25] Xiang Lisa Li and Percy Liang. Prefix-Tuning: Optimizing continuous prompts for generation. In ACL/JCNLP, 2021. 3" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 399, + 545, + 431 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 399, + 545, + 431 + ], + "spans": [ + { + "bbox": [ + 308, + 399, + 545, + 431 + ], + "type": "text", + "content": "[26] Weixin Liang, Zixuan Liu, and Can Liu. DAWSON: A domain adaptive few shot generation framework. arXiv preprint arXiv:2001.00576, 2020.3" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 308, + 434, + 545, + 486 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 434, + 545, + 486 + ], + "spans": [ + { + "bbox": [ + 308, + 434, + 545, + 486 + ], + "type": "text", + "content": "[27] Xihui Liu, Dong Huk Park, Samaneh Azadi, Gong Zhang, Arman Chopikyan, Yuxiao Hu, Humphrey Shi, Anna Rohrbach, and Trevor Darrell. More control for free! image synthesis with semantic diffusion guidance. In WACV, 2023. 2" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 308, + 489, + 545, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 489, + 545, + 521 + ], + "spans": [ + { + "bbox": [ + 308, + 489, + 545, + 521 + ], + "type": "text", + "content": "[28] Xiao Liu, Yanan Zheng, Zhengxiao Du, Ming Ding, Yujie Qian, Zhilin Yang, and Jie Tang. GPT understands, too. arXiv preprint arXiv:2103.10385, 2021. 3" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 308, + 523, + 545, + 553 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 523, + 545, + 553 + ], + "spans": [ + { + "bbox": [ + 308, + 523, + 545, + 553 + ], + "type": "text", + "content": "[29] Ziwei Liu, Ping Luo, Xiaogang Wang, and Xiaou Tang. Deep learning face attributes in the wild. In ICCV, 2015. 7" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 308, + 557, + 545, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 557, + 545, + 588 + ], + "spans": [ + { + "bbox": [ + 308, + 557, + 545, + 588 + ], + "type": "text", + "content": "[30] Sangwoo Mo, Minsu Cho, and Jinwoo Shin. Freeze the discriminator: a simple baseline for fine-tuning GANs. In CVPR Workshops, 2020. 2, 3" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 308, + 591, + 545, + 611 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 591, + 545, + 611 + ], + "spans": [ + { + "bbox": [ + 308, + 591, + 545, + 611 + ], + "type": "text", + "content": "[31] Alexander Quinn Nichol and Prafulla Dhariwal. Improved denoising diffusion probabilistic models. In ICML, 2021. 2" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 308, + 614, + 545, + 644 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 614, + 545, + 644 + ], + "spans": [ + { + "bbox": [ + 308, + 614, + 545, + 644 + ], + "type": "text", + "content": "[32] Atsuhiro Noguchi and Tatsuya Harada. Image generation from small datasets via batch statistics adaptation. In ICCV, 2019. 3" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 308, + 647, + 545, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 647, + 545, + 689 + ], + "spans": [ + { + "bbox": [ + 308, + 647, + 545, + 689 + ], + "type": "text", + "content": "[33] Utkarsh Ojha, Yijun Li, Jingwan Lu, Alexei A Efros, Yong Jae Lee, Eli Shechtman, and Richard Zhang. Few-shot image generation via cross-domain correspondence. In CVPR, 2021. 2, 3" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 308, + 692, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 692, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 308, + 692, + 545, + 712 + ], + "type": "text", + "content": "[34] Fabio Petroni, Tim Rocktäschel, Patrick Lewis, Anton Bakhtin, Yuxiang Wu, Alexander H Miller, and Sebastian" + } + ] + } + ], + "index": 35 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "11502" + } + ] + } + ], + "index": 37 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 712 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 67, + 72, + 286, + 94 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 286, + 94 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 286, + 94 + ], + "type": "text", + "content": "Riedel. Language models as knowledge bases? In EMNLP-IJCNLP, 2019. 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 95, + 287, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 95, + 287, + 138 + ], + "spans": [ + { + "bbox": [ + 48, + 95, + 287, + 138 + ], + "type": "text", + "content": "[35] Konpat Preechakul, Nattanat Chathee, Suttisak Wizadwongsa, and Supasorn Suwajanakorn. Diffusion Autoencoders: Toward a meaningful and decodable representation. In CVPR, 2022. 2, 5, 7" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 140, + 287, + 204 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 140, + 287, + 204 + ], + "spans": [ + { + "bbox": [ + 48, + 140, + 287, + 204 + ], + "type": "text", + "content": "[36] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In ICML, 2021. 3, 4, 5" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 205, + 287, + 237 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 205, + 287, + 237 + ], + "spans": [ + { + "bbox": [ + 48, + 205, + 287, + 237 + ], + "type": "text", + "content": "[37] Esther Robb, Wen-Sheng Chu, Abhishek Kumar, and Jia-Bin Huang. Few-shot adaptation of generative adversarial networks. arXiv preprint arXiv:2010.11943, 2020. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 239, + 287, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 239, + 287, + 270 + ], + "spans": [ + { + "bbox": [ + 48, + 239, + 287, + 270 + ], + "type": "text", + "content": "[38] Tim Salimans, Ian Goodfellow, Wojciech Zaremba, Vicki Cheung, Alec Radford, and Xi Chen. Improved techniques for training GANs. In NeurIPS, 2016. 5, 6" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 272, + 287, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 272, + 287, + 304 + ], + "spans": [ + { + "bbox": [ + 48, + 272, + 287, + 304 + ], + "type": "text", + "content": "[39] Tamar Rott Shaham, Tali Dekel, and Tomer Michaeli. SinGAN: Learning a generative model from a single natural image. In ICCV, 2019. 5, 6" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 305, + 287, + 348 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 305, + 287, + 348 + ], + "spans": [ + { + "bbox": [ + 48, + 305, + 287, + 348 + ], + "type": "text", + "content": "[40] Taylor Shin, Yasaman Razeghi, Robert L Logan IV, Eric Wallace, and Sameer Singh. AutoPrompt: Eliciting knowledge from language models with automatically generated prompts. In EMNLP, 2020. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 350, + 287, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 350, + 287, + 381 + ], + "spans": [ + { + "bbox": [ + 48, + 350, + 287, + 381 + ], + "type": "text", + "content": "[41] Jascha Sohl-Dickstein, Eric Weiss, Niru Maheswaranathan, and Surya Ganguli. Deep unsupervised learning using nonequilibrium thermodynamics. In ICML, 2015. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 383, + 287, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 383, + 287, + 403 + ], + "spans": [ + { + "bbox": [ + 48, + 383, + 287, + 403 + ], + "type": "text", + "content": "[42] Jiaming Song, Chenlin Meng, and Stefano Ermon. Denoising diffusion implicit models. In ICLR, 2020. 2, 3, 5, 7" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 405, + 287, + 435 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 405, + 287, + 435 + ], + "spans": [ + { + "bbox": [ + 48, + 405, + 287, + 435 + ], + "type": "text", + "content": "[43] Yang Song and Stefano Ermon. Generative modeling by estimating gradients of the data distribution. NeurIPS, 2019. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 437, + 287, + 469 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 437, + 287, + 469 + ], + "spans": [ + { + "bbox": [ + 48, + 437, + 287, + 469 + ], + "type": "text", + "content": "[44] Omer Tov, Yuval Alaluf, Yotam Nitzan, Or Patashnik, and Daniel Cohen-Or. Designing an encoder for StyleGAN image manipulation. TOG, 2021. 5" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 472, + 287, + 503 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 472, + 287, + 503 + ], + "spans": [ + { + "bbox": [ + 48, + 472, + 287, + 503 + ], + "type": "text", + "content": "[45] Ngoc-Trung Tran, Viet-Hung Tran, Ngoc-Bao Nguyen, Trung-Kien Nguyen, and Ngai-Man Cheung. On data augmentation for GAN training. TIP, 2021. 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 505, + 287, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 505, + 287, + 536 + ], + "spans": [ + { + "bbox": [ + 48, + 505, + 287, + 536 + ], + "type": "text", + "content": "[46] Steven Walton, Ali Hassani, Xingqian Xu, Zhangyang Wang, and Humphrey Shi. StyleNAT: Giving each head a new perspective. arXiv preprint arXiv:2211.05770, 2022. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 537, + 287, + 580 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 537, + 287, + 580 + ], + "spans": [ + { + "bbox": [ + 48, + 537, + 287, + 580 + ], + "type": "text", + "content": "[47] Yaxing Wang, Abel Gonzalez-Garcia, David Berga, Luis Herranz, Fahad Shahbaz Khan, and Joost van de Weijer. MineGAN: effective knowledge transfer from GANs to target domains with few images. In CVPR, 2020. 2, 3" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 582, + 287, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 582, + 287, + 624 + ], + "spans": [ + { + "bbox": [ + 48, + 582, + 287, + 624 + ], + "type": "text", + "content": "[48] Yaxing Wang, Chenshen Wu, Luis Herranz, Joost van de Weijer, Abel Gonzalez-Garcia, and Bogdan Raducanu. Transferring GANs: generating images from limited data. In ECCV, 2018. 2, 3" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 48, + 625, + 287, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 625, + 287, + 666 + ], + "spans": [ + { + "bbox": [ + 48, + 625, + 287, + 666 + ], + "type": "text", + "content": "[49] Jiayu Xiao, Liang Li, Chaofei Wang, Zheng-Jun Zha, and Qingming Huang. Few shot generative model adaption via relaxed spatial structural alignment. In CVPR, 2022. 2, 3, 5, 6" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 48, + 670, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 670, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 670, + 287, + 712 + ], + "type": "text", + "content": "[50] Xingqian Xu, Shant Navasardyan, Vahram Tadevosyan, Andranik Sargsyan, Yadong Mu, and Humphrey Shi. Image completion with heterogeneously filtered spectral hints. In WACV, 2023. 2" + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 353 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 115 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 115 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 115 + ], + "type": "text", + "content": "[51] Xingqian Xu, Zhangyang Wang, Eric Zhang, Kai Wang, and Humphrey Shi. Versatile Diffusion: Text, images and variations all in one diffusion model. arXiv preprint arXiv:2211.08332, 2022. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 118, + 545, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 118, + 545, + 149 + ], + "spans": [ + { + "bbox": [ + 307, + 118, + 545, + 149 + ], + "type": "text", + "content": "[52] Han Zhang, Zizhao Zhang, Augustus Odena, and Honglak Lee. Consistency regularization for generative adversarial networks. In ICLR, 2019. 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 152, + 545, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 152, + 545, + 184 + ], + "spans": [ + { + "bbox": [ + 307, + 152, + 545, + 184 + ], + "type": "text", + "content": "[53] Shengyu Zhao, Zhijian Liu, Ji Lin, Jun-Yan Zhu, and Song Han. Differentiable augmentation for data-efficient GAN training. In NeurIPS, 2020. 2, 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 186, + 545, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 186, + 545, + 217 + ], + "spans": [ + { + "bbox": [ + 307, + 186, + 545, + 217 + ], + "type": "text", + "content": "[54] Zhengli Zhao, Sameer Singh, Honglak Lee, Zizhao Zhang, Augustus Odena, and Han Zhang. Improved consistency regularization for GANs. In AAAI, 2021. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 220, + 545, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 220, + 545, + 251 + ], + "spans": [ + { + "bbox": [ + 307, + 220, + 545, + 251 + ], + "type": "text", + "content": "[55] Zhengli Zhao, Zizhao Zhang, Ting Chen, Sameer Singh, and Han Zhang. Image augmentations for GAN training. arXiv preprint arXiv:2006.02595, 2020. 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 254, + 545, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 254, + 545, + 285 + ], + "spans": [ + { + "bbox": [ + 307, + 254, + 545, + 285 + ], + "type": "text", + "content": "[56] Zexuan Zhong, Dan Friedman, and Danqi Chen. Factual probing is [MASK]: Learning vs. learning to recall. In NAACL-HLT, 2021. 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 287, + 545, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 287, + 545, + 319 + ], + "spans": [ + { + "bbox": [ + 307, + 287, + 545, + 319 + ], + "type": "text", + "content": "[57] Kaiyang Zhou, Jingkang Yang, Chen Change Loy, and Zwei Liu. Conditional prompt learning for vision-language models. In CVPR, 2022. 3" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 321, + 545, + 353 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 321, + 545, + 353 + ], + "spans": [ + { + "bbox": [ + 307, + 321, + 545, + 353 + ], + "type": "text", + "content": "[58] Kaiyang Zhou, Jingkang Yang, Chen Change Loy, and Ziwei Liu. Learning to prompt for vision-language models. IJCV, 2022. 3, 4, 8" + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "11503" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/Zero-Shot Model Diagnosis/549384d4-f244-4966-8076-15abf189c0a7_content_list.json b/2023/Zero-Shot Model Diagnosis/549384d4-f244-4966-8076-15abf189c0a7_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..453b062e3a817a8be1b0b1e2849a14a0a5601e19 --- /dev/null +++ b/2023/Zero-Shot Model Diagnosis/549384d4-f244-4966-8076-15abf189c0a7_content_list.json @@ -0,0 +1,2560 @@ +[ + { + "type": "text", + "text": "Zero-shot Model Diagnosis", + "text_level": 1, + "bbox": [ + 346, + 130, + 622, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Jinqi Luo*", + "bbox": [ + 96, + 180, + 183, + 199 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Zhaoning Wang*", + "bbox": [ + 215, + 180, + 351, + 199 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Chen Henry Wu", + "bbox": [ + 383, + 181, + 514, + 198 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Dong Huang", + "bbox": [ + 555, + 181, + 660, + 198 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Fernando De la Torre", + "bbox": [ + 700, + 181, + 870, + 196 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Carnegie Mellon University", + "bbox": [ + 372, + 200, + 594, + 215 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{jinqil, zhaoning, chenwu2, dghuang, ftorre}@cs.cmu.edu", + "bbox": [ + 238, + 219, + 723, + 233 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 268, + 313, + 284 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "When it comes to deploying deep vision models, the behavior of these systems must be explicable to ensure confidence in their reliability and fairness. A common approach to evaluate deep learning models is to build a labeled test set with attributes of interest and assess how well it performs. However, creating a balanced test set (i.e., one that is uniformly sampled over all the important traits) is often time-consuming, expensive, and prone to mistakes. The question we try to address is: can we evaluate the sensitivity of deep learning models to arbitrary visual attributes without an annotated test set?", + "bbox": [ + 73, + 289, + 468, + 455 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "This paper argues the case that Zero-shot Model Diagnosis (ZOOM) is possible without the need for a test set nor labeling. To avoid the need for test sets, our system relies on a generative model and CLIP. The key idea is enabling the user to select a set of prompts (relevant to the problem) and our system will automatically search for semantic counterfactual images (i.e., synthesized images that flip the prediction in the case of a binary classifier) using the generative model. We evaluate several visual tasks (classification, key-point detection, and segmentation) in multiple visual domains to demonstrate the viability of our methodology. Extensive experiments demonstrate that our method is capable of producing counterfactual images and offering sensitivity analysis for model diagnosis without the need for a test set.", + "bbox": [ + 73, + 455, + 470, + 681 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 699, + 207, + 714 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Deep learning models inherit data biases, which can be accentuated or downplayed depending on the model's architecture and optimization strategy. Deploying a computer vision deep learning model requires extensive testing and evaluation, with a particular focus on features with potentially dire social consequences (e.g., non-uniform behavior across gender or ethnicity). Given the importance of the problem, it is common to collect and label large-scale datasets to evaluate the behavior of these models across attributes of interest. Unfortunately, collecting these test", + "bbox": [ + 75, + 724, + 468, + 876 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/3c343307fb18c4ea29ef25ad4087d7b7663b31d9277a7b39fe95b2f82af49943.jpg", + "image_caption": [ + "Figure 1. Given a differentiable deep learning model (e.g., a cat/dog classifier) and user-defined text attributes, how can we determine the model's sensitivity to specific attributes without using labeled test data? Our system generates counterfactual images (bottom right) based on the textual directions provided by the user, while also computing the sensitivity histogram (top right)." + ], + "image_footnote": [], + "bbox": [ + 504, + 268, + 880, + 455 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "datasets is extremely time-consuming, error-prone, and expensive. Moreover, a balanced dataset, that is uniformly distributed across all attributes of interest, is also typically impractical to acquire due to its combinatorial nature. Even with careful metric analysis in this test set, no robustness nor fairness can be guaranteed since there can be a mismatch between the real and test distributions [25]. This research will explore model diagnosis without relying on a test set in an effort to democratize model diagnosis and lower the associated cost.", + "bbox": [ + 496, + 566, + 890, + 715 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Counterfactual explainability as a means of model diagnosis is drawing the community's attention [5,20]. Counterfactual images visualize the sensitive factors of an input image that can influence the model's outputs. In other words, counterfactuals answer the question: \"How can we modify the input image $\\mathbf{x}$ (while fixing the ground truth) so that the model prediction would diverge from $\\mathbf{y}$ to $\\hat{\\mathbf{y}}$ ?\". The parameterization of such counterfactuals will provide insights into identifying key factors of where the model fails. Unlike existing image-space adversary techniques [4,18], counterfactuals provide semantic perturbations that are interpretable by humans. However, existing counterfactual studies re", + "bbox": [ + 496, + 719, + 892, + 900 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Equal contribution.", + "bbox": [ + 94, + 886, + 205, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "11631", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "require the user to either collect uniform test sets [10], annotate discovered bias [15], or train a model-specific explanation every time the user wants to diagnose a new model [13].", + "bbox": [ + 75, + 90, + 467, + 136 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "On the other hand, recent advances in Contrastive Language-Image Pretraining (CLIP) [24] can help to overcome the above challenges. CLIP enables text-driven applications that map user text representations to visual manifolds for downstream tasks such as avatar generation [7], motion generation [37] or neural rendering [22, 30]. In the domain of image synthesis, StyleCLIP [21] reveals that text-conditioned optimization in the StyleGAN [12] latent space can decompose latent directions for image editing, allowing for the mutation of a specific attribute without disturbing others. With such capability, users can freely edit semantic attributes conditioned on text inputs. This paper further explores its use in the scope of model diagnosis.", + "bbox": [ + 75, + 136, + 467, + 330 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The central concept of the paper is depicted in Fig. 1. Consider a user interested in evaluating which factors contribute to the lack of robustness in a cat/dog classifier (target model). By selecting a list of keyword attributes, the user is able to (1) see counterfactual images where semantic variations flip the target model predictions (see the classifier score in the top-right corner of the counterfactual images) and (2) quantify the sensitivity of each attribute for the target model (see sensitivity histogram on the top). Instead of using a test set, we propose using a StyleGAN generator as the picture engine for sampling counterfactual images. CLIP transforms user's text input, and enables model diagnosis in an open-vocabulary setting. This is a major advantage since there is no need for collecting and annotating images and minimal user expert knowledge. In addition, we are not tied to a particular annotation from datasets (e.g., specific attributes in CelebA [16]).", + "bbox": [ + 75, + 332, + 467, + 588 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To summarize, our proposed work offers three major improvements over earlier efforts:", + "bbox": [ + 76, + 589, + 467, + 619 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- The user requires neither a labeled, balanced test dataset, and minimal expert knowledge in order to evaluate where a model fails (i.e., model diagnosis). In addition, the method provides a sensitivity histogram across the attributes of interest.", + "- When a different target model or a new user-defined attribute space is introduced, it is not necessary to retrain our system, allowing for practical use.", + "- The target model fine-tuned with counterfactual images not only slightly improves the classification performance, but also greatly increases the distributional robustness against counterfactual images." + ], + "bbox": [ + 94, + 625, + 467, + 825 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 76, + 844, + 217, + 859 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "This section reviews prior work on attribute editing with generative models and recent efforts on model diagnosis.", + "bbox": [ + 76, + 869, + 467, + 900 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1. Attribute Editing with Generative Models", + "text_level": 1, + "bbox": [ + 500, + 90, + 856, + 106 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "With recent progress in generative models, GANs supports high-quality image synthesis, as well as semantic attributes editing [35]. [1, 6] edit the images by perturbing the intermediate latent space encoded from the original images. These methods rely on images to be encoded to latent vectors to perform attribute editing. On the contrary, StyleGAN [12] can produce images by sampling the latent space. Many works have explored ways to edit attributes in the latent space of StyleGAN, either by relying on image annotations [27] or in an unsupervised manner [8, 28]. StyleSpace [34] further disentangles the latent space of StyleGAN and can perform specific attribute edits by disentangled style vectors. Based upon StyleSpace, StyleCLIP [21] builds the connection between the CLIP language space and StyleGAN latent space to enable arbitrary edits specified by the text. Our work adopts this concept for fine-grained attribute editing.", + "bbox": [ + 496, + 109, + 890, + 367 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.2. Model Diagnosis", + "text_level": 1, + "bbox": [ + 500, + 375, + 663, + 390 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To the best of our knowledge, model diagnosis without a test set is a relatively unexplored problem. In the adversarial learning literature, it is common to find methods that show how image-space perturbations [4, 18] flip the model prediction; however, such perturbations lack visual interpretability. [36] pioneers in synthesizing adversaries by GANs. More recently, [9, 23, 26] propose generative methods to synthesize semantically perturbed images to visualize where the target model fails. However, their attribute editing is limited within the dataset's annotated labels. Instead, our framework allows users to easily customize their own attribute space, in which we visualize and quantify the biased factors that affect the model prediction. On the bias detection track, [13] co-trains a model-specific StyleGAN with each target model, and requires human annotators to name attribute coordinates in the Stylespace. [3, 14, 15] synthesize counterfactual images by either optimally traversing the latent space or learning an attribute hyperplane, after which the user will inspect the represented bias. Unlike previous work, we propose to diagnose a deep learning model without any model-specific re-training, new test sets, or manual annotations/inspections.", + "bbox": [ + 496, + 398, + 890, + 729 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 500, + 742, + 589, + 757 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "This section firstly describes our method to generate counterfactual images guided by CLIP in a zero-shot manner. We then introduce how we perform the sensitivity analysis across attributes of interest. Fig. 2 shows the overview of our framework.", + "bbox": [ + 496, + 763, + 890, + 838 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3.1. Notation and Problem Definition", + "text_level": 1, + "bbox": [ + 500, + 847, + 785, + 861 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Let $f_{\\theta}$ , parameterized by $\\theta$ , be the target model that we want to diagnose. In this paper, $f_{\\theta}$ denotes two types of", + "bbox": [ + 500, + 869, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "11632", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/247dfc97e41f80fd7d1d2a86fd3efb21ea4c42cb07451c98651a328c990284e9.jpg", + "image_caption": [ + "Figure 2. The ZOOM framework. Black solid lines stand for forward passes, red dashed lines stand for backpropagation, and purple dashed lines stand for inference after the optimization converges. The user inputs single or multiple attributes, and we map them into edit directions with the method in Sec. 3.2. Then we assign to each edit direction (attribute) a weight, which represents how much we are adding/removing this attribute. We iteratively perform adversarial learning on the attribute space to maximize the counterfactual effectiveness." + ], + "image_footnote": [], + "bbox": [ + 112, + 85, + 857, + 258 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "deep nets: binary attribute classifiers and face keypoint detectors. Note that our approach is extendable to any end-to-end differentiable target deep models. Let $\\mathcal{G}_{\\phi}$ , parameterized by $\\phi$ , be the style generator that synthesizes images by $\\mathbf{x} = \\mathcal{G}_{\\phi}(\\mathbf{s})$ where $\\mathbf{s}$ is the style vector in Style Space $S$ [34]. We denote a counterfactual image as $\\hat{\\mathbf{x}}$ , which is a synthesized image that misleads the target model $f_{\\theta}$ , and denote the original reference image as $\\mathbf{x}$ . $a$ is defined as a single user input text-based attribute, with its domain $\\mathcal{A} = \\{a_i\\}_{i=1}^N$ for $N$ input attributes. $\\hat{\\mathbf{x}}$ and $\\mathbf{x}$ differs only along attribute directions $\\mathcal{A}$ . Given a set of $\\{f_{\\theta}, \\mathcal{G}_{\\phi}, \\mathcal{A}\\}$ , our goal is to perform counterfactual-based diagnosis to interpret where the model fails without manually collecting nor labeling any test set. Unlike traditional approaches of image-space noises which lack explainability to users, our method adversarially searches the counterfactual in the user-designed semantic space. To this end, our diagnosis will have three outputs, namely counterfactual images (Sec. 3.3), sensitivity histograms (Sec. 3.4), and distributionally robust models (Sec. 3.5).", + "bbox": [ + 75, + 348, + 472, + 650 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. Extracting Edit Directions", + "text_level": 1, + "bbox": [ + 76, + 657, + 316, + 672 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "This section examines the terminologies, method, and modification we adopt in ZOOM to extract suitable global directions for attribute editing. Since CLIP has shown strong capability in disentangling visual representation [19], we incorporate style channel relevance from Style-CLIP [21] to find edit directions for each attribute.", + "bbox": [ + 75, + 676, + 468, + 767 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Given the user's input strings of attributes, we want to find an image manipulation direction $\\Delta \\mathbf{s}$ for any $\\mathbf{s} \\sim \\mathcal{S}$ , such that the generated image $\\mathcal{G}_{\\phi}(\\mathbf{s} + \\Delta \\mathbf{s})$ only varies in the input attributes. Recall that CLIP maps strings into a text embedding $\\mathbf{t} \\in \\mathcal{T}$ , the text embedding space. For a string attribute description $a$ and a neutral prefix $p$ , we obtain the CLIP text embedding difference $\\Delta \\mathbf{t}$ by:", + "bbox": [ + 75, + 768, + 470, + 875 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\Delta \\mathbf {t} = \\operatorname {C L I P} _ {\\text {t e x t}} (p \\oplus a) - \\operatorname {C L I P} _ {\\text {t e x t}} (p) \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 140, + 880, + 468, + 897 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\oplus$ is the string concatenation operator. To take 'Eyeglasses' as an example, we can get $\\Delta t = \\mathrm{CLIP}_{\\mathrm{text}}$ (a face with Eyeglasses) - $\\mathrm{CLIP}_{\\mathrm{text}}$ (a face).", + "bbox": [ + 498, + 348, + 890, + 393 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To get the edit direction, $\\Delta \\mathbf{s}$ , we need to utilize a style relevance mapper $\\mathbf{M} \\in \\mathbb{R}^{c_S \\times c_T}$ to map between the CLIP text embedding vectors of length $c_{\\mathcal{T}}$ and the Style space vector of length $c_{\\mathcal{S}}$ . StyleCLIP optimizes $\\mathbf{M}$ by iteratively searching meaningful style channels: mutating each channel in $\\mathcal{S}$ and encoding the mutated images by CLIP to assess whether there is a significant change in $\\mathcal{T}$ space. To prevent undesired edits that are irrelevant to the user prompt, the edit direction $\\Delta \\mathbf{s}$ will filter out channels that the style value change is insignificant:", + "bbox": [ + 496, + 393, + 892, + 545 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\Delta \\mathbf {s} = (\\mathbf {M} \\cdot \\Delta \\mathbf {t}) \\odot \\mathbb {1} ((\\mathbf {M} \\cdot \\Delta \\mathbf {t}) > \\lambda), \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 568, + 554, + 890, + 570 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\lambda$ is the hyper-parameter for the threshold value. $\\mathbb{1}(\\cdot)$ is the indicator function, and $\\odot$ is the element-wise product operator. Since the success of attribute editing by the extracted edit directions will be the key to our approach, Appendix A will show the capability of CLIP by visualizing the global edit direction on multiple sampled images, conducting the user study, and analyzing the effect of $\\lambda$ .", + "bbox": [ + 496, + 580, + 890, + 686 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.3. Style Counterfactual Synthesis", + "text_level": 1, + "bbox": [ + 500, + 694, + 772, + 710 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Identifying semantic counterfactuals necessitates a manageable parametrization of the semantic space for effective exploration. For ease of notation, we denote $(\\Delta \\mathbf{s})_i$ as the global edit direction for $i^{th}$ attribute $a_i \\in \\mathcal{A}$ from the user input. After these $N$ attributes are provided and the edit directions are calculated, we initialize the control vectors $\\mathbf{w}$ of length $N$ where the $i^{th}$ element $w_i$ controls the strength of the $i^{th}$ edit direction. Our counterfactual edit will be a linear combination of normalized edit directions: $\\mathbf{s}_{edit} = \\sum_{i=1}^{N} w_i \\frac{(\\Delta \\mathbf{s})_i}{||(\\Delta \\mathbf{s})_i||}$ .", + "bbox": [ + 496, + 718, + 890, + 871 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The black arrows in Fig. 2 show the forward inference to synthesize counterfactual images. Given the parametriza", + "bbox": [ + 500, + 871, + 890, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "11633", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "tion of attribute editing strengths and the final loss value, our framework searches for counterfactual examples in the. \noptimizable edit weight space. The original sampled image is $\\mathbf{x} = G_{\\phi}(\\mathbf{s})$ , and the counterfactual image is", + "bbox": [ + 75, + 90, + 468, + 151 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\mathbf {x}} = G _ {\\phi} (\\mathbf {s} + \\mathbf {s} _ {e d i t}) = G _ {\\phi} \\left(\\mathbf {s} + \\sum_ {i = 1} ^ {N} w _ {i} \\frac {(\\Delta \\mathbf {s}) _ {i}}{| | (\\Delta \\mathbf {s}) _ {i} | |}\\right), \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 91, + 160, + 468, + 200 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "which is obtained by minimizing the following loss, $\\mathcal{L}$ , that is the weighted sum of three terms:", + "bbox": [ + 76, + 208, + 468, + 239 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} (\\mathbf {s}, \\mathbf {w}) = \\alpha \\mathcal {L} _ {\\text {t a r g e t}} (\\hat {\\mathbf {x}}) + \\beta \\mathcal {L} _ {\\text {s t r u c t}} (\\hat {\\mathbf {x}}) + \\gamma \\mathcal {L} _ {\\text {a t t r}} (\\hat {\\mathbf {x}}). \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 86, + 247, + 468, + 266 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We back-propagate to optimize $\\mathcal{L}$ w.r.t the weights of the edit directions $\\mathbf{w}$ , shown as the red pipeline in Fig. 2.", + "bbox": [ + 76, + 272, + 468, + 301 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The targeted adversarial loss $\\mathcal{L}_{target}$ for binary attribute classifiers minimizes the distance between the current model prediction $f_{\\theta}(\\hat{\\mathbf{x}})$ with the flip of original prediction $\\hat{p}_{cls} = 1 - f_{\\theta}(\\mathbf{x})$ . In the case of an eyeglass classifier on a person wearing eyeglasses, $\\mathcal{L}_{target}$ will guide the optimization to search w such that the model predicts no eyeglasses. For a keypoint detector, the adversarial loss will minimize the distance between the model keypoint prediction with a set of random points $\\hat{p}_{kp} \\sim \\mathcal{N}$ :", + "bbox": [ + 75, + 303, + 468, + 439 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "(binary classifier) $\\mathcal{L}_{target}(\\hat{\\mathbf{x}}) = L_{CE}(f_{\\theta}(\\hat{\\mathbf{x}}),\\hat{p}_{cls})$ (5)", + "bbox": [ + 84, + 446, + 468, + 464 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "(keypoint detector) $\\mathcal{L}_{target}(\\hat{\\mathbf{x}}) = L_{MSE}(f_{\\theta}(\\hat{\\mathbf{x}}),\\hat{p}_{kp})$ (6)", + "bbox": [ + 84, + 467, + 468, + 484 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "If we only optimize $\\mathcal{L}_{\\text {target }}$ w.r.t the global edit directions, it is possible that the method will not preserve image statistics of the original image and can include the particular attribute that we are diagnosing. To constrain the optimization, we added a structural loss $\\mathcal{L}_{\\text {struct }}$ and an attribute consistency loss $\\mathcal{L}_{\\text {attr }}$ to avoid generation collapse. $\\mathcal{L}_{\\text {struct }}$ [32] aims to preserve global image statistics of the original image x including image contrasts, background, or shape identity during the adversarial editing. While $\\mathcal{L}_{\\text {attr }}$ enforces that the target attribute (perceived ground truth) be consistent on the style edits. For example, when diagnosing the eyeglasses classifier, ZOOM preserves the original status of eyeglasses and precludes direct eyeglasses addition/removal.", + "bbox": [ + 75, + 491, + 468, + 700 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {s t r u c t}} (\\hat {\\mathbf {x}}) = L _ {\\text {S S I M}} (\\hat {\\mathbf {x}}, \\mathbf {x}) \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 135, + 710, + 468, + 727 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {a t t r}} (\\hat {\\mathbf {x}}) = L _ {C E} \\left(\\operatorname {C L I P} (\\hat {\\mathbf {x}}), \\operatorname {C L I P} (\\mathbf {x})\\right) \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 148, + 729, + 468, + 746 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Given a pretrained target model $f_{\\theta}$ , a domain-specific style generator $G_{\\phi}$ , and a text-driven attribute space $\\mathcal{A}$ , our goal is to sample an original style vector $\\mathbf{s}$ for each image and search its counterfactual edit strength $\\hat{\\mathbf{w}}$ :", + "bbox": [ + 76, + 755, + 468, + 816 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\mathbf {w}} = \\underset {\\mathbf {w}} {\\operatorname {a r g m i n}} \\mathcal {L} (\\mathbf {s}, \\mathbf {w}). \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 197, + 824, + 468, + 848 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Unless otherwise stated, we iteratively update $\\mathbf{w}$ as:", + "bbox": [ + 76, + 854, + 419, + 869 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {w} = \\operatorname {c l a m p} _ {[ - \\epsilon , \\epsilon ]} (\\mathbf {w} - \\eta \\nabla_ {\\mathbf {w}} \\mathcal {L}), \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 166, + 878, + 468, + 897 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\eta$ is the step size and $\\epsilon$ is the clamp bound to avoid synthesis collapse caused by exaggerated edit. Note that the maximum counterfactual effectiveness does not indicate the maximum edit strength (i.e., $w_{i} = \\epsilon$ ), since the attribute edit direction does not necessarily overlap with the target classifier direction. The attribute change is bi-directional, as the $w_{i}$ can be negative in Eq. 3. Details of using other optimization approaches (e.g., linear approximation [18]) will be discussed in Appendix C.", + "bbox": [ + 496, + 90, + 890, + 227 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.4. Attribute Sensitivity Analysis", + "text_level": 1, + "bbox": [ + 500, + 238, + 761, + 255 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Single-attribute counterfactual reflects the sensitivity of target model on the individual attribute. By optimizing independently along the edit direction for a single attribute and averaging the model probability changes over images, our model generates independent sensitivity score $h_i$ for each attribute $a_i$ :", + "bbox": [ + 496, + 262, + 890, + 352 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nh _ {i} = \\mathbb {E} _ {\\mathbf {x} \\sim \\mathcal {P} (\\mathbf {x}), \\hat {\\mathbf {x}} = \\mathrm {Z O O M} (\\mathbf {x}, a _ {i})} | f _ {\\theta} (\\mathbf {x}) - f _ {\\theta} (\\hat {\\mathbf {x}}) |. \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 531, + 367, + 890, + 385 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The sensitivity score $h_i$ is the probability difference between the original image $\\mathbf{x}$ and generated image $\\hat{\\mathbf{x}}$ , at the most counterfactual point when changing attribute $a_i$ . We synthesize a number of images from $\\mathcal{G}_{\\phi}$ , then iteratively compute the sensitivity for each given attribute, and finally normalize all sensitivities to draw the histogram as shown in Fig. 4. The histogram indicates the sensitivity of the evaluated model $f_{\\theta}$ on each of the user-defined attributes. Higher sensitivity of one attribute means that the model is more easily affected by that attribute.", + "bbox": [ + 496, + 396, + 890, + 547 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.5. Counterfactual Training", + "text_level": 1, + "bbox": [ + 500, + 559, + 723, + 575 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The multi-attribute counterfactual approach visualizes semantic combinations that cause the model to falter, providing valuable insights for enhancing the model's robustness. We naturally adopt the concept of iterative adversarial training [18] to robustify the target model. For each iteration, ZOOM receives the target model parameter and returns a batch of mutated counterfactual images with the model's original predictions as labels. Then the target model will be trained on the counterfactually-augmented images to achieve the robust goal:", + "bbox": [ + 496, + 583, + 890, + 734 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\theta^ {*} = \\underset {\\theta} {\\operatorname {a r g m i n}} \\mathbb {E} _ {\\mathbf {x} \\sim \\mathcal {P} (\\mathbf {x}), \\hat {\\mathbf {x}} = \\operatorname {Z O O M} (\\mathbf {x}, A)} L _ {C E} \\left(f _ {\\theta} (\\hat {\\mathbf {x}}), f _ {\\theta} (\\mathbf {x})\\right) \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 509, + 746, + 890, + 767 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where batches of $\\mathbf{x}$ are randomly sampled from the StyleGAN generator $\\mathcal{G}_{\\phi}$ . In the following, we abbreviate the process as Counterfactual Training (CT). Note that, although not explicitly expressed in Eq. 12, the CT process is a min-max game. ZOOM synthesizes counterfactuals to maximize the variation of model prediction (while persevering the perceived ground truth), and the target model is learned with the counterfactual images to minimize the variation.", + "bbox": [ + 496, + 779, + 890, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "11634", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/382f5252f5eef69ed8ac56fa86f515853e05d632939c48c6660966221ace8272.jpg", + "image_caption": [ + "Open Mouth" + ], + "image_footnote": [], + "bbox": [ + 83, + 88, + 163, + 150 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/6e6f7765c0c6d9f1d2dfdc190d779bfe664e680f1fa60e96badfb38c36bde1f4.jpg", + "image_caption": [ + "$\\frac{1}{2}x - 1 > 0$" + ], + "image_footnote": [], + "bbox": [ + 163, + 88, + 243, + 150 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/d58c5d6850d439ce2439ccb870594aa331b04646e7cdef3bd0558f01e41331d5.jpg", + "image_caption": [ + "$\\frac{1}{2}x - 1 > 0$" + ], + "image_footnote": [], + "bbox": [ + 243, + 88, + 323, + 150 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/71d46012b365d52f94b9082a48b36a98cd58e025a583732997d1de6c2f945647.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 323, + 88, + 401, + 150 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/e52e69fb3e1a7bb5d2200ba25acd415cca0e9d8625a7a2ccf1e06fd40de18944.jpg", + "image_caption": [ + "Closed Mouth" + ], + "image_footnote": [], + "bbox": [ + 401, + 88, + 480, + 150 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/5998e6f16297004dcded0e568b7cc7fa1aeb1d4478fa0d868cedde8c94e8abb9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 482, + 88, + 560, + 150 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/f8433beb32c5896e06a7aa642d6d2e15c4fb1518e7fd0631894bb952d4187d32.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 560, + 88, + 640, + 150 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/b6dd9a6086f785bfc6b1ac4c27d8833b5f9c5b1ab95041790dcf663b8f50f846.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 640, + 88, + 720, + 150 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/28703eb187d242dc891d55db71d5efddeceab3e8980a5c1277a0ce6afedee26e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 720, + 88, + 800, + 150 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/7614e0fc2876e9a275a20e70f7ed9a9f2b21e3d09bda32999fac3b9d7e2e8d29.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 802, + 88, + 882, + 150 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/6a3a3474601bb128b509ebe8645c0463326834db7d238ec593ff709a98712b77.jpg", + "image_caption": [ + "Felidae Pupil", + "$\\frac{3}{1} + u + {4q} = 1 + u + {uq}$ dH" + ], + "image_footnote": [], + "bbox": [ + 83, + 162, + 163, + 224 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/4d6861e565df844de868c6952bcbc37a40413ae51cdc59f8416074b34ef31c4e.jpg", + "image_caption": [ + "$\\frac{3}{1} + u + {4q} = 1 + u + {uq}$ dH" + ], + "image_footnote": [], + "bbox": [ + 163, + 162, + 243, + 224 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/4d8a7695d44eb9f470f709190f28a206de0c1aab536a4b0437ec3bc92cf5bd11.jpg", + "image_caption": [ + "$\\frac{3}{1} + u + {4q} = 1 + u + {uq}$ dH" + ], + "image_footnote": [], + "bbox": [ + 243, + 162, + 323, + 224 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/d057aae1bdbe74f1ef3ade2252fe5600868dacc63d10ffa80489d5108f2465cd.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 323, + 162, + 401, + 224 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/7f0ee7ecc4f01729ebc6c197b05497ab9e0d2c9ad1e1974529aaba7ff496842a.jpg", + "image_caption": [ + "Canidae Pupil" + ], + "image_footnote": [], + "bbox": [ + 401, + 162, + 480, + 224 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/950ca79c0a21d73328cb6c2141ef3a183badae2ab7c780a9fdaf31d8eff33e9a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 482, + 162, + 560, + 224 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/4a3892a846507c7e850b5c42d892d2c92afc49f1ac4bfb0c9247b91e1e0e3782.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 560, + 162, + 640, + 224 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/fff7032c14f3dd2afb30d5725689aeb7224f00b1835d24d2086789fd848604f4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 640, + 162, + 720, + 224 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/f4ee52e5caa6487a1c008a41983ab01976ff119b38dd1cfdc50448e4f2284da8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 720, + 162, + 800, + 224 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/f01f0ae813036598b13cd750bae2ac75ae2ade78cfe29545fa92258a290b064b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 802, + 162, + 882, + 224 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/fa8994a05b33e168543b91bca0c47422be0966914496d537e87c679df5e3861f.jpg", + "image_caption": [ + "Figure 3. Effect of progressively generating counterfactual images on (left) cat/dog classifier (0-Cat / 1-Dog), and (right) perceived age classifier (0-Senior / 1-Young). Model probability prediction during the process is attached at the top right corner." + ], + "image_footnote": [], + "bbox": [ + 76, + 277, + 277, + 383 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/727877f509e7e5d67c5b05d7d903a72ee905c64a7b8dbdfad753de413e2325fe.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 279, + 277, + 478, + 383 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/b77fcf9e992f6c5a9b18b69d5691eb5072855ddebb3e5b44b5ca39ecb67ab12b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 488, + 277, + 681, + 383 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/5600c547b9caf898459903860dd888f4bcb0a9050f29754b6b7151f43b1597df.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 684, + 277, + 883, + 383 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/1847122c4f8c41b0eb8a40cf92eecf152e7305a956b9ffb3b71751c5c69b6fd7.jpg", + "image_caption": [ + "(a) Model diagnosis histograms generated by ZOOM on four facial attribute classifiers.", + "(b) Model diagnosis histograms generated by ZOOM on four classifiers trained on manually-crafted imbalance data.", + "Figure 4. Model diagnosis histograms generated by ZOOM. The vertical axis values reflect the attribute sensitivities calculated by averaging the model probability change over all sampled images. The horizontal axis is the attribute space input by user." + ], + "image_footnote": [], + "bbox": [ + 76, + 398, + 277, + 500 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/7aefa2cf30afb6773a8d83624c5cc365680c7a839dfb50d5f30e1c4037506cf3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 279, + 398, + 478, + 500 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/3b29ecd3f3d5ddb487e1fec77c9ab7e51b89f73310a02eefdbed083bdd755f27.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 482, + 398, + 681, + 500 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/e39ef17850764b0a9307a0e37db6c248af616552d7b6550dc1ceacd5997a6f2f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 684, + 398, + 883, + 500 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experimental Results", + "text_level": 1, + "bbox": [ + 75, + 569, + 281, + 585 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "This section describes the experimental validations on the effectiveness and reliability of ZOOM. First, we describe the model setup in Sec. 4.1. Sec. 4.2 and Sec. 4.3 visualize and validate the model diagnosis results for the single-attribute setting. In Sec. 4.4, we show results on synthesized multiple-attribute counterfactual images and apply them to counterfactual training.", + "bbox": [ + 75, + 594, + 468, + 700 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Model Setup", + "text_level": 1, + "bbox": [ + 75, + 708, + 210, + 724 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Pre-trained models: We used Stylegan2-ADA [11] pretrained on FFHQ [12] and AFHQ [1] as our base generative networks, and the pre-trained CLIP model [24] which is parameterized by ViT-B/32. We followed StyleCLIP [21] setups to compute the channel relevance matrices $\\mathcal{M}$ .", + "bbox": [ + 75, + 731, + 468, + 806 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Target models: Our classifier models are ResNet50 with single fully-connected head initialized by TorchVision1. In training the binary classifiers, we use the Adam optimizer with learning rate 0.001 and batch size 128. We train binary", + "bbox": [ + 75, + 806, + 468, + 867 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "classifiers for Eyeglasses, Perceived Gender, Mustache, Perceived Age attributes on CelebA and for cat/dog classification on AFHQ. For the 98-keypoint detectors, we used the HR-Net architecture [31] on WFLW [33].", + "bbox": [ + 498, + 571, + 890, + 631 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.2. Visual Model Diagnosis: Single-Attribute", + "text_level": 1, + "bbox": [ + 498, + 646, + 852, + 662 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Understanding where deep learning model fails is an essential step towards building trustworthy models. Our proposed work allows us to generate counterfactual images (Sec. 3.3) and provide insights on sensitivities of the target model (Sec. 3.4). This section visualizes the counterfactual images in which only one attribute is modified at a time.", + "bbox": [ + 496, + 672, + 890, + 762 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Fig. 3 shows the single-attribute counterfactual images. Interestingly (but not unexpectedly), we can see that reducing the hair length or joyfulness causes the age classifier more likely to label the face to an older person. Note that our approach is extendable to multiple domains, as we change the cat-like pupil to dog-like, the dog-cat classification tends towards the dog. Using the counterfactual images, we can conduct model diagnosis with the method mentioned in Sec. 3.4, on which attributes the model is sen", + "bbox": [ + 496, + 763, + 890, + 898 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "1https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/", + "bbox": [ + 75, + 875, + 457, + 898 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "11635", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "sitive to. In the histogram generated in model diagnosis, a higher bar means the model is more sensitive toward the corresponding attribute.", + "bbox": [ + 75, + 90, + 468, + 136 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Fig. 4a shows the model diagnosis histograms on regularly-trained classifiers. For instance, the cat/dog classifier histogram shows outstanding sensitivity to green eyes and vertical pupil. The analysis is intuitive since these are cat-biased traits rarely observed in dog photos. Moreover, the histogram of eyeglasses classifier shows that the mutation on bushy eyebrows is more influential for flipping the model prediction. It potentially reveals the positional correlation between eyeglasses and bushy eyebrows. The advantage of single-attribute model diagnosis is that the score of each attribute in the histogram are independent from other attributes, enabling unambiguous understanding of exact semantics that make the model fail. Diagnosis results for additional target models can be found in Appendix B.", + "bbox": [ + 75, + 137, + 470, + 348 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3. Validation of Visual Model Diagnosis", + "text_level": 1, + "bbox": [ + 76, + 358, + 397, + 375 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Evaluating whether our zero-shot sensitivity histograms (Fig. 4) explain the true vulnerability is a difficult task, since we do not have access to a sufficiently large and balanced test set fully annotated in an open-vocabulary setting. To verify the performance, we create synthetically imbalanced cases where the model bias is known. We then compare our results with a supervised diagnosis setting [17]. In addition, we will validate the decoupling of the attributes by CLIP.", + "bbox": [ + 75, + 382, + 468, + 503 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3.1 Creating imbalanced classifiers", + "text_level": 1, + "bbox": [ + 76, + 516, + 348, + 531 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In order to evaluate whether our sensitivity histogram is correct, we train classifiers that are highly imbalanced towards a known attribute and see whether ZOOM can detect the sensitivity w.r.t the attribute. For instance, when training the perceived-age classifier (binarized as Young in CelebA), we created a dataset on which the trained classifier is strongly sensitive to Bangs (hair over forehead). The custom dataset is a CelebA training subset that consists of 20, 200 images. More specifically, there are 10,000 images that have both young people that have bangs, represented as (1, 1), and 10,000 images of people that are not young and have no bangs, represented as (0, 0). The remaining combinations of (1, 0) and (0, 1) have only 100 images. With this imbalanced dataset, bangs is the attribute that dominantly correlates with whether the person is young, and hence the perceived-age classifier would be highly sensitive towards bangs. See Fig. 5 (the right histograms) for an illustration of the sensitivity histogram computed by our method for the case of an age classifier with bangs (top) and lipstick (bottom) being imbalanced.", + "bbox": [ + 75, + 537, + 468, + 838 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We trained multiple imbalanced classifiers with this methodology, and visualize the model diagnosis histograms of these imbalanced classifiers in Fig. 4b. We can observe that the ZOOM histograms successfully detect the", + "bbox": [ + 75, + 839, + 468, + 900 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/03c482988bb95c3fb2b913e7d04e7753f129604886046e43297446927fd39540.jpg", + "image_caption": [ + "Figure 5. The sensitivity of the age classifier is evaluated with ZOOM (right) and AttGAN (left), achieving comparable results." + ], + "image_footnote": [], + "bbox": [ + 500, + 90, + 888, + 337 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/868104e74e862e5874b0fc8bdea2db78e9c3a4eddc9dca8eb7eac1b667dd688e.jpg", + "image_caption": [ + "(a) Mustache classifier", + "Figure 6. Confusion matrix of CLIP score variation (vertical axis) when perturbing attributes (horizontal axis). This shows that attributes in ZOOM are highly decoupled." + ], + "image_footnote": [], + "bbox": [ + 506, + 383, + 691, + 518 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/dcbaf559d890ab0d776db1dfd7c533b9d30df76634298628b22e1c7d2c277833.jpg", + "image_caption": [ + "(b) Perceived age classifier" + ], + "image_footnote": [], + "bbox": [ + 697, + 383, + 885, + 518 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "synthetically-made bias, which are shown as the highest bars in histograms. See the caption for more information.", + "bbox": [ + 498, + 595, + 890, + 627 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3.2 Comparison with supervised diagnosis", + "text_level": 1, + "bbox": [ + 500, + 638, + 821, + 652 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We also validated our histogram by comparing it with the case in which we have access to a generative model that has been explicitly trained to disentangle attributes. We follow the work on [17] and used AttGAN [6] trained on the CelebA training set over 15 attributes2. After the training converged, we performed adversarial learning in the attribute space of AttGAN and create a sensitivity histogram using the same approach as Sec. 3.4. Fig. 5 shows the result of this method on the perceived-age classifier which is made biased towards bangs. As anticipated, the AttGAN histogram (left) corroborates the histogram derived from our method (right). Interestingly, unlike ZOOM, AttGAN show less sensitivity to remaining attributes. This is likely", + "bbox": [ + 496, + 657, + 890, + 854 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "$^{2}$ Bald, Bangs, Black_Hair, Blond_Hair, Brown_Hair, Bushy_Eyesbrows, Eyeglasses, Male, Mouth_Slightly_Open, Mustache, No_Board, Pale_Skin, Young, Smiling, Wearing_Lipstick.", + "bbox": [ + 500, + 862, + 890, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "11636", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/971f72c88bb26074e4f23622c9671dc69ad2c5c8ef52b1de0e3abff7f432ed84.jpg", + "image_caption": [ + "Counterfactual Original" + ], + "image_footnote": [], + "bbox": [ + 98, + 102, + 176, + 164 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/59a77b820be44c6ef95e06c79ed435d4aea09f30e59a51a44334ce5918612f50.jpg", + "image_caption": [ + "Figure 7. Multi-attribute counterfactual in faces. The model probability is documented in the upper right corner of each image." + ], + "image_footnote": [], + "bbox": [ + 98, + 165, + 176, + 223 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/f9405d19790d274a6d1931829f8e4e575bd3f7cc28b5941c5c136c6e6fe50c9f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 176, + 102, + 254, + 164 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/0b74918e51861880fb6780c140d8792c3dd7328306aa4b189e381e22a0959b59.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 176, + 165, + 254, + 223 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/62d9231115f02f5bfbba46a4a4e0d5568e7376856e2196457f2c2cadb092c30d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 256, + 102, + 333, + 164 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/95c9e54324f64ecb0c2b49a24c3925de7336d3b45bf0e60163eb774073f3797e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 256, + 165, + 333, + 223 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/35b5f6e20e6167a44791b6cb91234ca74c030f5e4c36171689c395c8065cd4e6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 334, + 102, + 411, + 164 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/97e45ff9d147e87912d810e4d276a4c527a7d1ad64ea900907f4c91ed774a842.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 334, + 165, + 411, + 223 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/da9051d7fff8dc80e0c62ad7ee8bb1f22aee7c66ceee600246b30ecd46b68ec9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 413, + 102, + 490, + 164 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/fb66c5f6fd296b219f4e39383cf0d5e8611263f518e77b88dcc07bac8524b5fb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 413, + 165, + 490, + 223 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/64ca890b2eb603a0c45fbe34384d65cb3f74b4fb1c434434a634b531abed726f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 491, + 102, + 570, + 164 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/75a69bd63b6bb345d672a5f742d694288aff8578f206e84032f24c69b9a9b894.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 491, + 165, + 570, + 223 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/ca61c210042de2fc0e449802d99fb5d4d50854952d4b0d1c3b744eeb1bc247eb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 571, + 102, + 650, + 164 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/946eed3c0bbd26dbe64a2cfba69b326b2847fd1f8cba97cbe6ac25e8917ff046.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 571, + 165, + 650, + 223 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/d123ff6aac734384ccd47ae0c44a41de25a370497a5d42d8c5f027dda49c9db5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 651, + 102, + 730, + 164 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/0ef9843e612c9e58dda93b42683eadb3af0fd3f8500c8a006c5c82c974bc2501.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 651, + 165, + 730, + 223 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/117f9d63d2885bbdb0be3707734cbab387bc97bca6cd5b6a8ae619f0b9996fa3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 732, + 102, + 807, + 164 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/f48d716b2acde35b9f94c46749523a8fb01e8260331bb184c25132e442f4c22e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 732, + 165, + 807, + 223 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/c909bca5f6f918435a78f23e7f3cbf31b341cf4d888a1eecd8568c29216efa66.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 810, + 102, + 887, + 164 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/2e4a4d43d388288a2a74b5386a43481c22c06d07edd49bac51178d01e8a5a1bf.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 810, + 165, + 887, + 223 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "because AttGAN has a latent space learned in a supervised manner and hence attributes are better disentangled than with StyleGAN. Note that AttGAN is trained with a fixed set of attributes; if a new attribute of interest is introduced, the dataset needs to be re-labeled and AttGAN retrained. ZOOM, however, merely calls for the addition of a new text prompt. More results in Appendix B.", + "bbox": [ + 75, + 263, + 467, + 368 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3.3 Measuring disentanglement of attributes", + "text_level": 1, + "bbox": [ + 76, + 380, + 413, + 393 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Previous works demonstrated that the StyleGAN's latent space can be entangled [2, 27], adding undesired dependencies when searching single-attribute counterfactuals. This section verifies that our framework can disentangle the attributes and mostly edit the desirable attributes.", + "bbox": [ + 75, + 400, + 467, + 474 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We use CLIP as a super annotator to measure attribute changes during single-attribute modifications. For 1,000 images, we record the attribute change after performing adversarial learning in each attribute, and average the attribute score change. Fig. 6 shows the confusion matrix during single-attribute counterfactual synthesis. The horizontal axis is the attribute being edited during the optimization, and the vertical axis represents the CLIP prediction changed by the process. For instance, the first column of Fig. 6a is generated when we optimize over bangs for the mustache classifier. We record the CLIP prediction variation. It clearly shows that bangs is the dominant attribute changing during the optimization. From the main diagonal of matrices, it is evident that the ZOOM mostly perturbs the attribute of interest. The results indicate reasonable disentanglement among attributes.", + "bbox": [ + 75, + 476, + 467, + 715 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.4. Visual Model Diagnosis: Multi-Attributes", + "text_level": 1, + "bbox": [ + 76, + 726, + 431, + 741 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In the previous sections, we have visualized and validated single-attribute model diagnosis histograms and counterfactual images. In this section, we will assess ZOOM's ability to produce counterfactual images by concurrently exploring multiple attributes within $\\mathcal{A}$ , the domain of user-defined attributes. The approach conducts multi-attribute counterfactual searches across various edit directions, identifying distinct semantic combinations that result in the target model's failure. By doing so, we can effectively create more powerful counterfactuals images (see Fig. 9).", + "bbox": [ + 75, + 750, + 467, + 900 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/d63f194e849545ee4d327760565ffb442da574671e4f852701c57f64f75ccd06.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 503, + 271, + 576, + 316 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/2a8b19bda7e3c098f3b7515ecb4c437e82aff219568f13b46ca980369ed954b0.jpg", + "image_caption": [ + "Figure 8. Multi-attribute counterfactual on Cat/Dog classifier. The number in each image is the predicted probability of being a dog." + ], + "image_footnote": [], + "bbox": [ + 503, + 316, + 576, + 364 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/97575becca7c2b0a58f05e7bd3efea1ce8f805407f9a9a8bc5b0b8fa133de821.jpg", + "image_caption": [ + "Cat / Dog Classifier (0-Cat / 1-Dog)" + ], + "image_footnote": [], + "bbox": [ + 578, + 271, + 640, + 316 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/9f015960384356c923f938a96ad155d7920e2f40848154f6927b5ca61d80c9da.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 578, + 318, + 640, + 364 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/9dc79a7da6f29d71c72a5adf20673275f04537bf9eea485fde0a25d54eae61fd.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 640, + 271, + 702, + 316 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/b0ef6e22515f9fbe3fe524912b72ea323ef911ac09d0c03ab0e227513c25dffc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 640, + 318, + 702, + 364 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/eacf43272b02d9d8200813f9d64795ddfee3402d5fde9196f88ee01631e050d5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 702, + 271, + 764, + 316 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/00c3b77821922449ec954cf2a408c8a712da33871a82ab4f911ece894aeb4d58.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 702, + 318, + 764, + 364 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/73ef0f2cf59bc1d4b27d02b70df889a8d0b602bd88ce7e79c4b3b4e6e43cb0e9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 764, + 271, + 826, + 316 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/44bba462e0e42241c95319ec2b01d11c8a422c41b0ce523d8f3136a9bc721633.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 764, + 318, + 826, + 364 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/93e5220bf68cbf225c272fed33def41a45c1fad35b0f715590fc34da455c23ed.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 828, + 271, + 887, + 316 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/b8335552498ed460f4d22b2fd05b6d4609a5b26d9c180d8d4bc002a607f780d4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 828, + 318, + 887, + 364 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/5d137f7d3aa9d683b7868c27d6b50d55214821f413f2a649b34ec317e8ccc450.jpg", + "image_caption": [ + "Original Reference" + ], + "image_footnote": [], + "bbox": [ + 503, + 419, + 580, + 477 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/cd94d5ea6748597fb9c03d4f85d7fe91a324a817224d3587d29b461780e9954f.jpg", + "image_caption": [ + "SAC by Beard" + ], + "image_footnote": [], + "bbox": [ + 581, + 419, + 656, + 477 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/ef8f24a423832c377b502751ea1a6ec5c02870f6c9bf6f57ca957e36e35ccf1e.jpg", + "image_caption": [ + "SAC by Pale Skin" + ], + "image_footnote": [], + "bbox": [ + 658, + 419, + 733, + 477 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/14f558865c5e0601cdaa01a946868a1455678b57180ec80975d4ed1ececa0a91.jpg", + "image_caption": [ + "SAC by Black Hair" + ], + "image_footnote": [], + "bbox": [ + 735, + 419, + 812, + 477 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/d503aca254f8fbd0ba1ddfc316957c35f64f0e256656c8f21634b487f29c57bd.jpg", + "image_caption": [ + "Multiple-Attribute" + ], + "image_footnote": [], + "bbox": [ + 813, + 419, + 890, + 477 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/4175e75073b501fe5f39001245d583575431f4cbce9915be62cc70647315c29c.jpg", + "image_caption": [ + "Original Reference" + ], + "image_footnote": [], + "bbox": [ + 503, + 494, + 580, + 553 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/5d65bb23276b3a3c12c60e02c786251d3ba8fc8c89d311fea013e13ecf6352fa.jpg", + "image_caption": [ + "SAC by Lips Color" + ], + "image_footnote": [], + "bbox": [ + 581, + 494, + 656, + 553 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/3fa0538d80ef39f3390920970221e72096153f9028e5e434697ae973a6488b8f.jpg", + "image_caption": [ + "SAC by Smiling" + ], + "image_footnote": [], + "bbox": [ + 658, + 494, + 735, + 553 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/1fa5c86fcdf03b6475a88a8351d7ebf59208fccfc5f637b753b80fc3793cd0d7.jpg", + "image_caption": [ + "SAC by Bangs" + ], + "image_footnote": [], + "bbox": [ + 735, + 494, + 812, + 553 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/06ab8a56e1dd662eb7212a310fc18e69a77dde02e44ad7df8ead56fe5f2145de.jpg", + "image_caption": [ + "Multiple-Attribute", + "Figure 9. Multiple-Attribute Counterfactual (MAC, Sec. 4.4) compared with Single-Attribute Counterfactual (SAC, Sec. 4.2). We can see that optimization along multiple directions enable the generation of more powerful counterfactuals." + ], + "image_footnote": [], + "bbox": [ + 813, + 494, + 890, + 553 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Fig. 7 and Fig. 8 show examples of multi-attribute counterfactual images generated by ZOOM, against human and animal face classifiers. It can be observed that multiple face attributes such as lipsticks or hair color are edited in Fig. 7, and various cat/dog attributes like nose pinkness, eye shape, and fur patterns are edited in Fig. 8. These attribute edits are blended to affect the target model prediction. Appendix B further illustrates ZOOM counterfactual images for semantic segmentation, multi-class classification, and a church classifier. By mutating semantic representations, ZOOM reveals semantic combinations as outliers where the target model underfits.", + "bbox": [ + 496, + 648, + 890, + 829 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In the following sections, we will use the Flip Rate (the percentage of counterfactuals that flipped the model prediction) and Flip Resistance (the percentage of counterfactuals for which the model successfully withheld its prediction) to evaluate the multi-attribute setting.", + "bbox": [ + 496, + 833, + 890, + 907 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Eyeglasses Classifier (0-No Eyeglasses / 1-Eyeglasses)", + "bbox": [ + 119, + 89, + 395, + 99 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Perceived Age Classifier (0-Senior / 1-Young)", + "bbox": [ + 415, + 89, + 648, + 99 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Facial Keypoint Detector (WFLW)", + "bbox": [ + 678, + 89, + 859, + 99 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "11637", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/3a31fe81e15c74b575c6093e46f0834299f52fbd05a5297a4af77ec9e568073e.jpg", + "image_caption": [ + "(a) Sensitivity histograms generated by ZOOM on attribute combinations." + ], + "image_footnote": [], + "bbox": [ + 76, + 87, + 271, + 188 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/b0c505f7748d04f811c5d12aed9bbf43c879e1b7ff0a3273c81dba77b6e38c5b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 272, + 87, + 467, + 188 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/0801f88cca854414287012d32b85e15604c661f7e8ca63dfc868dc3e24766f24.jpg", + "image_caption": [ + "(b) Model diagnosis by ZOOM over 19 attributes. Our framework is generalizable to analyze facial attributes of various domains.", + "Figure 10. Customizing attribute space for ZOOM." + ], + "image_footnote": [], + "bbox": [ + 76, + 204, + 464, + 287 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.4.1 Customizing attribute space", + "text_level": 1, + "bbox": [ + 76, + 347, + 326, + 363 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In some circumstances, users may finish one round of model diagnosis and proceed to another round by adding new attributes, or trying a new attribute space. The linear nature of attribute editing (Eq. 3) in ZOOM makes it possible to easily add or remove attributes. Table 1 shows the flip rates results when adding new attributes into $\\mathcal{A}$ for perceived age classifier and big lips classifier. We can observe that a different attribute space will result in different effectiveness of counterfactual images. Also, increasing the search iteration will make counterfactual more effective (see last row). Note that neither re-training the StyleGAN nor user-collection/labeling of data is required at any point in this procedure. Moreover, Fig. 10a shows the model diagnosis histograms generated with combinations of two attributes. Fig. 10b demonstrates the capability of ZOOM in a rich vocabulary setting where we can analyze attributes that are not labeled in existing datasets [16, 29].", + "bbox": [ + 75, + 364, + 468, + 621 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.4.2 Counterfactual training results", + "text_level": 1, + "bbox": [ + 76, + 627, + 346, + 642 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This section evaluates regular classifiers trained on CelebA [16] and counterfactually-trained (CT) classifiers on a mix of CelebA data and counterfactual images as described in Sec. 3.5. Table 2 presents accuracy and flip resistance (FR) results. CT outperforms the regular classifier. FR is assessed over 10,000 counterfactual images, with FR-25 and FR-100 denoting Flip Resistance after 25 and 100 optimization iterations, respectively. Both use $\\eta = 0.2$ and $\\epsilon = 30$ . We can observe that the classifiers after CT are way less likely to be flipped by counterfactual images while maintaining a decent accuracy on the CalebA testset. Our approach robustifies the model by increasing the tolerance toward counterfactuals. Note that CT slightly improves the CelebA classifier when trained on a mixture of CelebA images (original images) and the counterfactual images generated with a generative model trained in the FFHQ [12] images (different domain).", + "bbox": [ + 75, + 648, + 468, + 904 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/4427e736fac19a282ea9a3abd30cb3b7bf9f5849ac3eecf1d3b8a74b1709f21c.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodAC Flip Rate (%)BC Flip Rate (%)
Initialize ZOOM by A61.9583.47
+ Attribute: Beard72.0890.07
+ Attribute: Smiling87.4796.27
+ Attribute: Lipstick90.9694.07
+ Iterations increased to 20092.9194.87
", + "bbox": [ + 501, + 88, + 888, + 176 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/019909ecbdeabb30964a6186068d59ea0fd09e455a4976227c0994ceed8f2d1e.jpg", + "table_caption": [ + "Table 1. Model flip rate study. The initial attribute space $\\mathcal{A} =$ {Bangs, Blond Hair, Bushy Eyebrows, Pale Skin, Pointy Nose}. AC is the perceived age classifier and BC is the big lips classifier." + ], + "table_footnote": [], + "table_body": "
AttributeMetricRegular (%)CT (Ours) (%)
Perceived AgeCelebA Accuracy86.1086.29
ZOOM FR-2519.5497.36
ZOOM FR-1009.0495.65
Big LipsCelebA Accuracy74.3675.39
ZOOM FR-2514.1299.19
ZOOM FR-1005.9388.91
", + "bbox": [ + 501, + 234, + 893, + 340 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 2. Results of network inference on CelebA original images and ZOOM-generated counterfactual. The CT classifier is significantly less prone to be flipped by counterfactual images, while test accuracy on CelebA remains performant.", + "bbox": [ + 498, + 351, + 890, + 407 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion and Discussion", + "text_level": 1, + "bbox": [ + 500, + 412, + 746, + 428 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this paper, we present ZOOM, a zero-shot model diagnosis framework that generates sensitivity histograms based on user's input of natural language attributes. ZOOM assesses failures and generates corresponding sensitivity histograms for each attribute. A significant advantage of our technique is its ability to analyze the failures of a target deep model without the need for laborious collection and annotation of test sets. ZOOM effectively visualizes the correlation between attributes and model outputs, elucidating model behaviors and intrinsic biases.", + "bbox": [ + 496, + 431, + 890, + 580 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Our work has three primary limitations. First, users should possess domain knowledge as their input (text of attributes of interest) should be relevant to the target domain. Recall that it is a small price to pay for model evaluation without an annotated test set. Second, StyleGAN2-ADA struggles with generating out-of-domain samples. Nevertheless, our adversarial learning framework can be adapted to other generative models (e.g., stable diffusion), and the generator can be improved by training on more images. We have rigorously tested our generator with various user inputs, confirming its effectiveness for regular diagnosis requests. Currently, we are exploring stable diffusion models to generate a broader range of classes while maintaining the core concept. Finally, we rely on a pre-trained model like CLIP which we presume to be free of bias and capable of encompassing all relevant attributes.", + "bbox": [ + 496, + 582, + 892, + 823 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgements: We would like to thank George Cazenavette, Tianyuan Zhang, Yinong Wang, Hanzhe Hu, Bharath Raj for suggestions in the presentation and experiments. We sincerely thank Ken Ziyu Liu, Jiashun Wang, Bowen Li, and Ce Zheng for revisions to improve this work.", + "bbox": [ + 496, + 824, + 890, + 900 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "11638", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Yunjey Choi, Youngjung Uh, Jaejun Yoo, and Jung-Woo Ha. StarGAN v2: Diverse Image Synthesis for Multiple Domains. In CVPR, 2020.", + "[2] Edo Collins, Raja Bala, Bob Price, and Sabine Susstrunk. Editing in Style: Uncovering the Local Semantics of GANs. In CVPR, 2020.", + "[3] Emily Denton and Ben Hutchinson and Margaret Mitchell and Timnit Gebru and Andrew Zaldivar. Image counterfactual sensitivity analysis for detecting unintended bias. arXiv preprint arXiv:1906.06439, 2019.", + "[4] Ian J. Goodfellow, Jonathon Shlens, and Christian Szegedy. Explaining and Harnessing Adversarial Examples. 2014.", + "[5] Yash Goyal, Ziyan Wu, Jan Ernst, Dhruv Batra, Devi Parikh, and Stefan Lee. Counterfactual Visual Explanations. In ICML, 2019.", + "[6] Z. He, W. Zuo, M. Kan, S. Shan, and X. Chen. AttGAN: Facial Attribute Editing by Only Changing What You Want. In IEEE TIP, 2019.", + "[7] Fangzhou Hong, Mingyuan Zhang, Liang Pan, Zhongang Cai, Lei Yang, and Ziwei Liu. AvatarCLIP: Zero-Shot Text-Driven Generation and Animation of 3D Avatars. In ACM TOG, 2022.", + "[8] Erik Härkönen, Aaron Hertzmann, Jaakko Lehtinen, and Sylvain Paris. GANSpace: Discovering Interpretable GAN Controls. In NeurIPS, 2020.", + "[9] Ameya Joshi, Amitangshu Mukherjee, Soumik Sarkar, and Chinmay Hegde. Semantic Adversarial Attacks: Parametric Transformations That Fool Deep Classifiers. In ICCV, 2019.", + "[10] Kimmo Karkkainen and Jungseock Joo. FairFace: Face Attribute Dataset for Balanced Race, Gender, and Age for Bias Measurement and Mitigation. In WACV, 2021.", + "[11] Tero Karras, Miika Aittala, Janne Hellsten, Samuli Laine, Jaakko Lehtinen, and Timo Aila. Training Generative Adversarial Networks with Limited Data. In NeurIPS, 2020.", + "[12] Tero Karras, Samuli Laine, and Timo Aila. A Style-Based Generator Architecture for Generative Adversarial Networks. In CVPR, 2019.", + "[13] Oran Lang, Yossi Gandelsman, Michal Yarom, Yoav Wald, Gal Elidan, Avinatan Hassidim, William T. Freeman, Phillip Isola, Amir Globerson, Michal Irani, and Inbar Mosseri. Explaining in Style: Training a GAN To Explain a Classifier in StyleSpace. In ICCV, 2021.", + "[14] Bo Li, Qiulin Wang, Jiquan Pei, Yu Yang, and Xiangyang Ji. Which Style Makes Me Attractive? Interpretable Control Discovery and Counterfactual Explanation on StyleGAN. arXiv preprint arXiv:2201.09689, 2022.", + "[15] Zhiheng Li and Chenliang Xu. Discover the Unknown Biased Attribute of an Image Classifier. In ICCV, 2021.", + "[16] Ziwei Liu, Ping Luo, Xiaogang Wang, and Xiaou Tang. Deep Learning Face Attributes in the Wild. In ICCV, 2015.", + "[17] Jinqi Luo, Zhaoning Wang, Chen Henry Wu, Dong Huang, and Fernando De la Torre. Semantic image attack for visual model diagnosis. arXiv preprint arXiv:2303.13010, 2023.", + "[18] Aleksander Madry, Aleksandar Makelov, Ludwig Schmidt, Dimitris Tsipras, and Adrian Vladu. Towards Deep Learning Models Resistant to Adversarial Attacks. In ICLR, 2018." + ], + "bbox": [ + 78, + 114, + 467, + 898 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[19] Joanna Materzynska, Antonio Torralba, and David Bau. Disentangling Visual and Written Concepts in CLIP. In CVPR, 2022.", + "[20] Ramaravind K. Mothilal, Amit Sharma, and Chenhao Tan. Explaining Machine Learning Classifiers through Diverse Counterfactual Explanations. In ACM FAccT, 2020.", + "[21] Or Patashnik, Zongze Wu, Eli Shechtman, Daniel Cohen-Or, and Dani Lischinski. StyleCLIP: Text-Driven Manipulation of StyleGAN Imagery. In ICCV, 2021.", + "[22] Ben Poole, Ajay Jain, Jonathan T. Barron, and Ben Mildenhall. DreamFusion: Text-to-3D using 2D Diffusion. arXiv preprint arXiv:2209.14988, 2022.", + "[23] Haonan Qiu, Chaowei Xiao, Lei Yang, Xinchen Yan, Honglak Lee, and Bo Li. SemanticAdv: Generating Adversarial Examples via Attribute-conditioned Image Editing. In ECCV, 2020.", + "[24] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning Transferable Visual Models From Natural Language Supervision. In ICML, 2021.", + "[25] Vikram V. Ramaswamy, Sunnie S. Y. Kim, and Olga Russakovsky. Fair Attribute Classification Through Latent Space De-Biasing. In CVPR, 2021.", + "[26] Axel Sauer and Andreas Geiger. Counterfactual Generative Networks. In ICLR, 2021.", + "[27] Yujun Shen, Ceyuan Yang, Xiaou Tang, and Bolei Zhou. InterFaceGAN: Interpreting the Disentangled Face Representation Learned by GANs. In IEEE TPAMI, 2020.", + "[28] Yujun Shen and Bolei Zhou. Closed-Form Factorization of Latent Semantics in GANs. In CVPR, 2021.", + "[29] Philipp Terhörst, Daniel Fährmann, Jan Niklas Kolf, Naser Damer, Florian Kirchbuchner, and Arjan Kuijper. MAAD-Face: A Massively Annotated Attribute Dataset for Face Images. In IEEE TIFS, 2021.", + "[30] Can Wang, Menglei Chai, Mingming He, Dongdong Chen, and Jing Liao. CLIP-NeRF: Text-and-Image Driven Manipulation of Neural Radiance Fields. In CVPR, 2022.", + "[31] Jingdong Wang, Ke Sun, Tianheng Cheng, Borui Jiang, Chaorui Deng, Yang Zhao, Dong Liu, Yadong Mu, Mingkui Tan, Xinggang Wang, Wenyu Liu, and Bin Xiao. Deep High-Resolution Representation Learning for Visual Recognition. In IEEE TPAMI, 2019.", + "[32] Zhou Wang, A.C. Bovik, H.R. Sheikh, and E.P. Simoncelli. Image Quality Assessment: from Error Visibility to Structural Similarity. In IEEE TIP, 2004.", + "[33] Wayne Wu, Chen Qian, Shuo Yang, Quan Wang, Yici Cai, and Qiang Zhou. Look at Boundary: A Boundary-Aware Face Alignment Algorithm. In CVPR, 2018.", + "[34] Zongze Wu, Dani Lischinski, and Eli Shechtman. StyleSpace Analysis: Disentangled Controls for StyleGAN Image Generation. In CVPR, 2021.", + "[35] Weihao Xia, Yulun Zhang, Yujiu Yang, Jing-Hao Xue, Bolei Zhou, and Ming-Hsuan Yang. GAN Inversion: A Survey. In IEEE TPAMI, 2022." + ], + "bbox": [ + 501, + 92, + 890, + 898 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "11639", + "bbox": [ + 480, + 945, + 519, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[36] Chaowei Xiao, Bo Li, Jun-yan Zhu, Warren He, Mingyan Liu, and Dawn Song. Generating Adversarial Examples with Adversarial Networks. In *IJCAI*, 2018.", + "[37] Mingyuan Zhang, Zhongang Cai, Liang Pan, Fangzhou Hong, Xinying Guo, Lei Yang, and Ziwei Liu. MotionDiffuse: Text-Driven Human Motion Generation with Diffusion Model. arXiv preprint arXiv:2208.15001, 2022." + ], + "bbox": [ + 78, + 90, + 468, + 189 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "11640", + "bbox": [ + 480, + 945, + 519, + 955 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2023/Zero-Shot Model Diagnosis/549384d4-f244-4966-8076-15abf189c0a7_model.json b/2023/Zero-Shot Model Diagnosis/549384d4-f244-4966-8076-15abf189c0a7_model.json new file mode 100644 index 0000000000000000000000000000000000000000..3e56feb44a3afae964a2c10596122035dbabced1 --- /dev/null +++ b/2023/Zero-Shot Model Diagnosis/549384d4-f244-4966-8076-15abf189c0a7_model.json @@ -0,0 +1,3124 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.812, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.348, + 0.131, + 0.623, + 0.154 + ], + "angle": 0, + "content": "Zero-shot Model Diagnosis" + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.181, + 0.184, + 0.2 + ], + "angle": 0, + "content": "Jinqi Luo*" + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.181, + 0.352, + 0.2 + ], + "angle": 0, + "content": "Zhaoning Wang*" + }, + { + "type": "text", + "bbox": [ + 0.385, + 0.182, + 0.516, + 0.199 + ], + "angle": 0, + "content": "Chen Henry Wu" + }, + { + "type": "text", + "bbox": [ + 0.557, + 0.183, + 0.661, + 0.199 + ], + "angle": 0, + "content": "Dong Huang" + }, + { + "type": "text", + "bbox": [ + 0.701, + 0.182, + 0.872, + 0.198 + ], + "angle": 0, + "content": "Fernando De la Torre" + }, + { + "type": "text", + "bbox": [ + 0.374, + 0.201, + 0.596, + 0.217 + ], + "angle": 0, + "content": "Carnegie Mellon University" + }, + { + "type": "text", + "bbox": [ + 0.24, + 0.22, + 0.725, + 0.234 + ], + "angle": 0, + "content": "{jinqil, zhaoning, chenwu2, dghuang, ftorre}@cs.cmu.edu" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.269, + 0.314, + 0.285 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.29, + 0.47, + 0.456 + ], + "angle": 0, + "content": "When it comes to deploying deep vision models, the behavior of these systems must be explicable to ensure confidence in their reliability and fairness. A common approach to evaluate deep learning models is to build a labeled test set with attributes of interest and assess how well it performs. However, creating a balanced test set (i.e., one that is uniformly sampled over all the important traits) is often time-consuming, expensive, and prone to mistakes. The question we try to address is: can we evaluate the sensitivity of deep learning models to arbitrary visual attributes without an annotated test set?" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.457, + 0.471, + 0.683 + ], + "angle": 0, + "content": "This paper argues the case that Zero-shot Model Diagnosis (ZOOM) is possible without the need for a test set nor labeling. To avoid the need for test sets, our system relies on a generative model and CLIP. The key idea is enabling the user to select a set of prompts (relevant to the problem) and our system will automatically search for semantic counterfactual images (i.e., synthesized images that flip the prediction in the case of a binary classifier) using the generative model. We evaluate several visual tasks (classification, key-point detection, and segmentation) in multiple visual domains to demonstrate the viability of our methodology. Extensive experiments demonstrate that our method is capable of producing counterfactual images and offering sensitivity analysis for model diagnosis without the need for a test set." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.7, + 0.208, + 0.715 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.726, + 0.47, + 0.877 + ], + "angle": 0, + "content": "Deep learning models inherit data biases, which can be accentuated or downplayed depending on the model's architecture and optimization strategy. Deploying a computer vision deep learning model requires extensive testing and evaluation, with a particular focus on features with potentially dire social consequences (e.g., non-uniform behavior across gender or ethnicity). Given the importance of the problem, it is common to collect and label large-scale datasets to evaluate the behavior of these models across attributes of interest. Unfortunately, collecting these test" + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.269, + 0.882, + 0.457 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.468, + 0.892, + 0.553 + ], + "angle": 0, + "content": "Figure 1. Given a differentiable deep learning model (e.g., a cat/dog classifier) and user-defined text attributes, how can we determine the model's sensitivity to specific attributes without using labeled test data? Our system generates counterfactual images (bottom right) based on the textual directions provided by the user, while also computing the sensitivity histogram (top right)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.567, + 0.892, + 0.717 + ], + "angle": 0, + "content": "datasets is extremely time-consuming, error-prone, and expensive. Moreover, a balanced dataset, that is uniformly distributed across all attributes of interest, is also typically impractical to acquire due to its combinatorial nature. Even with careful metric analysis in this test set, no robustness nor fairness can be guaranteed since there can be a mismatch between the real and test distributions [25]. This research will explore model diagnosis without relying on a test set in an effort to democratize model diagnosis and lower the associated cost." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.72, + 0.893, + 0.901 + ], + "angle": 0, + "content": "Counterfactual explainability as a means of model diagnosis is drawing the community's attention [5,20]. Counterfactual images visualize the sensitive factors of an input image that can influence the model's outputs. In other words, counterfactuals answer the question: \"How can we modify the input image \\( \\mathbf{x} \\) (while fixing the ground truth) so that the model prediction would diverge from \\( \\mathbf{y} \\) to \\( \\hat{\\mathbf{y}} \\)?\". The parameterization of such counterfactuals will provide insights into identifying key factors of where the model fails. Unlike existing image-space adversary techniques [4,18], counterfactuals provide semantic perturbations that are interpretable by humans. However, existing counterfactual studies re" + }, + { + "type": "page_footnote", + "bbox": [ + 0.096, + 0.887, + 0.206, + 0.9 + ], + "angle": 0, + "content": "*Equal contribution." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "11631" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.468, + 0.137 + ], + "angle": 0, + "content": "require the user to either collect uniform test sets [10], annotate discovered bias [15], or train a model-specific explanation every time the user wants to diagnose a new model [13]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.137, + 0.468, + 0.332 + ], + "angle": 0, + "content": "On the other hand, recent advances in Contrastive Language-Image Pretraining (CLIP) [24] can help to overcome the above challenges. CLIP enables text-driven applications that map user text representations to visual manifolds for downstream tasks such as avatar generation [7], motion generation [37] or neural rendering [22, 30]. In the domain of image synthesis, StyleCLIP [21] reveals that text-conditioned optimization in the StyleGAN [12] latent space can decompose latent directions for image editing, allowing for the mutation of a specific attribute without disturbing others. With such capability, users can freely edit semantic attributes conditioned on text inputs. This paper further explores its use in the scope of model diagnosis." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.333, + 0.468, + 0.589 + ], + "angle": 0, + "content": "The central concept of the paper is depicted in Fig. 1. Consider a user interested in evaluating which factors contribute to the lack of robustness in a cat/dog classifier (target model). By selecting a list of keyword attributes, the user is able to (1) see counterfactual images where semantic variations flip the target model predictions (see the classifier score in the top-right corner of the counterfactual images) and (2) quantify the sensitivity of each attribute for the target model (see sensitivity histogram on the top). Instead of using a test set, we propose using a StyleGAN generator as the picture engine for sampling counterfactual images. CLIP transforms user's text input, and enables model diagnosis in an open-vocabulary setting. This is a major advantage since there is no need for collecting and annotating images and minimal user expert knowledge. In addition, we are not tied to a particular annotation from datasets (e.g., specific attributes in CelebA [16])." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.59, + 0.468, + 0.62 + ], + "angle": 0, + "content": "To summarize, our proposed work offers three major improvements over earlier efforts:" + }, + { + "type": "text", + "bbox": [ + 0.096, + 0.626, + 0.468, + 0.701 + ], + "angle": 0, + "content": "- The user requires neither a labeled, balanced test dataset, and minimal expert knowledge in order to evaluate where a model fails (i.e., model diagnosis). In addition, the method provides a sensitivity histogram across the attributes of interest." + }, + { + "type": "text", + "bbox": [ + 0.096, + 0.711, + 0.468, + 0.757 + ], + "angle": 0, + "content": "- When a different target model or a new user-defined attribute space is introduced, it is not necessary to retrain our system, allowing for practical use." + }, + { + "type": "text", + "bbox": [ + 0.096, + 0.766, + 0.468, + 0.826 + ], + "angle": 0, + "content": "- The target model fine-tuned with counterfactual images not only slightly improves the classification performance, but also greatly increases the distributional robustness against counterfactual images." + }, + { + "type": "list", + "bbox": [ + 0.096, + 0.626, + 0.468, + 0.826 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.845, + 0.218, + 0.86 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.871, + 0.468, + 0.901 + ], + "angle": 0, + "content": "This section reviews prior work on attribute editing with generative models and recent efforts on model diagnosis." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.091, + 0.857, + 0.107 + ], + "angle": 0, + "content": "2.1. Attribute Editing with Generative Models" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.111, + 0.892, + 0.368 + ], + "angle": 0, + "content": "With recent progress in generative models, GANs supports high-quality image synthesis, as well as semantic attributes editing [35]. [1, 6] edit the images by perturbing the intermediate latent space encoded from the original images. These methods rely on images to be encoded to latent vectors to perform attribute editing. On the contrary, StyleGAN [12] can produce images by sampling the latent space. Many works have explored ways to edit attributes in the latent space of StyleGAN, either by relying on image annotations [27] or in an unsupervised manner [8, 28]. StyleSpace [34] further disentangles the latent space of StyleGAN and can perform specific attribute edits by disentangled style vectors. Based upon StyleSpace, StyleCLIP [21] builds the connection between the CLIP language space and StyleGAN latent space to enable arbitrary edits specified by the text. Our work adopts this concept for fine-grained attribute editing." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.375, + 0.665, + 0.391 + ], + "angle": 0, + "content": "2.2. Model Diagnosis" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.399, + 0.892, + 0.73 + ], + "angle": 0, + "content": "To the best of our knowledge, model diagnosis without a test set is a relatively unexplored problem. In the adversarial learning literature, it is common to find methods that show how image-space perturbations [4, 18] flip the model prediction; however, such perturbations lack visual interpretability. [36] pioneers in synthesizing adversaries by GANs. More recently, [9, 23, 26] propose generative methods to synthesize semantically perturbed images to visualize where the target model fails. However, their attribute editing is limited within the dataset's annotated labels. Instead, our framework allows users to easily customize their own attribute space, in which we visualize and quantify the biased factors that affect the model prediction. On the bias detection track, [13] co-trains a model-specific StyleGAN with each target model, and requires human annotators to name attribute coordinates in the Stylespace. [3, 14, 15] synthesize counterfactual images by either optimally traversing the latent space or learning an attribute hyperplane, after which the user will inspect the represented bias. Unlike previous work, we propose to diagnose a deep learning model without any model-specific re-training, new test sets, or manual annotations/inspections." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.743, + 0.591, + 0.758 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.764, + 0.892, + 0.839 + ], + "angle": 0, + "content": "This section firstly describes our method to generate counterfactual images guided by CLIP in a zero-shot manner. We then introduce how we perform the sensitivity analysis across attributes of interest. Fig. 2 shows the overview of our framework." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.848, + 0.787, + 0.862 + ], + "angle": 0, + "content": "3.1. Notation and Problem Definition" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.871, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Let \\( f_{\\theta} \\), parameterized by \\( \\theta \\), be the target model that we want to diagnose. In this paper, \\( f_{\\theta} \\) denotes two types of" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "11632" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.113, + 0.086, + 0.859, + 0.259 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.273, + 0.895, + 0.331 + ], + "angle": 0, + "content": "Figure 2. The ZOOM framework. Black solid lines stand for forward passes, red dashed lines stand for backpropagation, and purple dashed lines stand for inference after the optimization converges. The user inputs single or multiple attributes, and we map them into edit directions with the method in Sec. 3.2. Then we assign to each edit direction (attribute) a weight, which represents how much we are adding/removing this attribute. We iteratively perform adversarial learning on the attribute space to maximize the counterfactual effectiveness." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.349, + 0.473, + 0.651 + ], + "angle": 0, + "content": "deep nets: binary attribute classifiers and face keypoint detectors. Note that our approach is extendable to any end-to-end differentiable target deep models. Let \\(\\mathcal{G}_{\\phi}\\), parameterized by \\(\\phi\\), be the style generator that synthesizes images by \\(\\mathbf{x} = \\mathcal{G}_{\\phi}(\\mathbf{s})\\) where \\(\\mathbf{s}\\) is the style vector in Style Space \\(S\\) [34]. We denote a counterfactual image as \\(\\hat{\\mathbf{x}}\\), which is a synthesized image that misleads the target model \\(f_{\\theta}\\), and denote the original reference image as \\(\\mathbf{x}\\). \\(a\\) is defined as a single user input text-based attribute, with its domain \\(\\mathcal{A} = \\{a_i\\}_{i=1}^N\\) for \\(N\\) input attributes. \\(\\hat{\\mathbf{x}}\\) and \\(\\mathbf{x}\\) differs only along attribute directions \\(\\mathcal{A}\\). Given a set of \\(\\{f_{\\theta}, \\mathcal{G}_{\\phi}, \\mathcal{A}\\}\\), our goal is to perform counterfactual-based diagnosis to interpret where the model fails without manually collecting nor labeling any test set. Unlike traditional approaches of image-space noises which lack explainability to users, our method adversarially searches the counterfactual in the user-designed semantic space. To this end, our diagnosis will have three outputs, namely counterfactual images (Sec. 3.3), sensitivity histograms (Sec. 3.4), and distributionally robust models (Sec. 3.5)." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.658, + 0.317, + 0.674 + ], + "angle": 0, + "content": "3.2. Extracting Edit Directions" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.678, + 0.47, + 0.768 + ], + "angle": 0, + "content": "This section examines the terminologies, method, and modification we adopt in ZOOM to extract suitable global directions for attribute editing. Since CLIP has shown strong capability in disentangling visual representation [19], we incorporate style channel relevance from Style-CLIP [21] to find edit directions for each attribute." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.769, + 0.471, + 0.875 + ], + "angle": 0, + "content": "Given the user's input strings of attributes, we want to find an image manipulation direction \\(\\Delta \\mathbf{s}\\) for any \\(\\mathbf{s} \\sim \\mathcal{S}\\), such that the generated image \\(\\mathcal{G}_{\\phi}(\\mathbf{s} + \\Delta \\mathbf{s})\\) only varies in the input attributes. Recall that CLIP maps strings into a text embedding \\(\\mathbf{t} \\in \\mathcal{T}\\), the text embedding space. For a string attribute description \\(a\\) and a neutral prefix \\(p\\), we obtain the CLIP text embedding difference \\(\\Delta \\mathbf{t}\\) by:" + }, + { + "type": "equation", + "bbox": [ + 0.141, + 0.881, + 0.47, + 0.898 + ], + "angle": 0, + "content": "\\[\n\\Delta \\mathbf {t} = \\operatorname {C L I P} _ {\\text {t e x t}} (p \\oplus a) - \\operatorname {C L I P} _ {\\text {t e x t}} (p) \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.349, + 0.892, + 0.394 + ], + "angle": 0, + "content": "where \\(\\oplus\\) is the string concatenation operator. To take 'Eyeglasses' as an example, we can get \\(\\Delta t = \\mathrm{CLIP}_{\\mathrm{text}}\\) (a face with Eyeglasses) - \\(\\mathrm{CLIP}_{\\mathrm{text}}\\) (a face)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.395, + 0.893, + 0.546 + ], + "angle": 0, + "content": "To get the edit direction, \\(\\Delta \\mathbf{s}\\), we need to utilize a style relevance mapper \\(\\mathbf{M} \\in \\mathbb{R}^{c_S \\times c_T}\\) to map between the CLIP text embedding vectors of length \\(c_{\\mathcal{T}}\\) and the Style space vector of length \\(c_{\\mathcal{S}}\\). StyleCLIP optimizes \\(\\mathbf{M}\\) by iteratively searching meaningful style channels: mutating each channel in \\(\\mathcal{S}\\) and encoding the mutated images by CLIP to assess whether there is a significant change in \\(\\mathcal{T}\\) space. To prevent undesired edits that are irrelevant to the user prompt, the edit direction \\(\\Delta \\mathbf{s}\\) will filter out channels that the style value change is insignificant:" + }, + { + "type": "equation", + "bbox": [ + 0.57, + 0.555, + 0.892, + 0.571 + ], + "angle": 0, + "content": "\\[\n\\Delta \\mathbf {s} = (\\mathbf {M} \\cdot \\Delta \\mathbf {t}) \\odot \\mathbb {1} ((\\mathbf {M} \\cdot \\Delta \\mathbf {t}) > \\lambda), \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.581, + 0.892, + 0.687 + ], + "angle": 0, + "content": "where \\(\\lambda\\) is the hyper-parameter for the threshold value. \\(\\mathbb{1}(\\cdot)\\) is the indicator function, and \\(\\odot\\) is the element-wise product operator. Since the success of attribute editing by the extracted edit directions will be the key to our approach, Appendix A will show the capability of CLIP by visualizing the global edit direction on multiple sampled images, conducting the user study, and analyzing the effect of \\(\\lambda\\)." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.695, + 0.774, + 0.712 + ], + "angle": 0, + "content": "3.3. Style Counterfactual Synthesis" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.719, + 0.892, + 0.872 + ], + "angle": 0, + "content": "Identifying semantic counterfactuals necessitates a manageable parametrization of the semantic space for effective exploration. For ease of notation, we denote \\((\\Delta \\mathbf{s})_i\\) as the global edit direction for \\(i^{th}\\) attribute \\(a_i \\in \\mathcal{A}\\) from the user input. After these \\(N\\) attributes are provided and the edit directions are calculated, we initialize the control vectors \\(\\mathbf{w}\\) of length \\(N\\) where the \\(i^{th}\\) element \\(w_i\\) controls the strength of the \\(i^{th}\\) edit direction. Our counterfactual edit will be a linear combination of normalized edit directions: \\(\\mathbf{s}_{edit} = \\sum_{i=1}^{N} w_i \\frac{(\\Delta \\mathbf{s})_i}{||(\\Delta \\mathbf{s})_i||}\\)." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.872, + 0.892, + 0.901 + ], + "angle": 0, + "content": "The black arrows in Fig. 2 show the forward inference to synthesize counterfactual images. Given the parametriza" + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "11633" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.152 + ], + "angle": 0, + "content": "tion of attribute editing strengths and the final loss value, our framework searches for counterfactual examples in the. \noptimizable edit weight space. The original sampled image is \\(\\mathbf{x} = G_{\\phi}(\\mathbf{s})\\) , and the counterfactual image is" + }, + { + "type": "equation", + "bbox": [ + 0.093, + 0.161, + 0.469, + 0.202 + ], + "angle": 0, + "content": "\\[\n\\hat {\\mathbf {x}} = G _ {\\phi} (\\mathbf {s} + \\mathbf {s} _ {e d i t}) = G _ {\\phi} \\left(\\mathbf {s} + \\sum_ {i = 1} ^ {N} w _ {i} \\frac {(\\Delta \\mathbf {s}) _ {i}}{| | (\\Delta \\mathbf {s}) _ {i} | |}\\right), \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.209, + 0.47, + 0.24 + ], + "angle": 0, + "content": "which is obtained by minimizing the following loss, \\(\\mathcal{L}\\), that is the weighted sum of three terms:" + }, + { + "type": "equation", + "bbox": [ + 0.087, + 0.248, + 0.469, + 0.267 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} (\\mathbf {s}, \\mathbf {w}) = \\alpha \\mathcal {L} _ {\\text {t a r g e t}} (\\hat {\\mathbf {x}}) + \\beta \\mathcal {L} _ {\\text {s t r u c t}} (\\hat {\\mathbf {x}}) + \\gamma \\mathcal {L} _ {\\text {a t t r}} (\\hat {\\mathbf {x}}). \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.273, + 0.469, + 0.303 + ], + "angle": 0, + "content": "We back-propagate to optimize \\(\\mathcal{L}\\) w.r.t the weights of the edit directions \\(\\mathbf{w}\\), shown as the red pipeline in Fig. 2." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.304, + 0.469, + 0.44 + ], + "angle": 0, + "content": "The targeted adversarial loss \\(\\mathcal{L}_{target}\\) for binary attribute classifiers minimizes the distance between the current model prediction \\(f_{\\theta}(\\hat{\\mathbf{x}})\\) with the flip of original prediction \\(\\hat{p}_{cls} = 1 - f_{\\theta}(\\mathbf{x})\\). In the case of an eyeglass classifier on a person wearing eyeglasses, \\(\\mathcal{L}_{target}\\) will guide the optimization to search w such that the model predicts no eyeglasses. For a keypoint detector, the adversarial loss will minimize the distance between the model keypoint prediction with a set of random points \\(\\hat{p}_{kp} \\sim \\mathcal{N}\\):" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.448, + 0.469, + 0.465 + ], + "angle": 0, + "content": "(binary classifier) \\(\\mathcal{L}_{target}(\\hat{\\mathbf{x}}) = L_{CE}(f_{\\theta}(\\hat{\\mathbf{x}}),\\hat{p}_{cls})\\) (5)" + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.468, + 0.469, + 0.485 + ], + "angle": 0, + "content": "(keypoint detector) \\(\\mathcal{L}_{target}(\\hat{\\mathbf{x}}) = L_{MSE}(f_{\\theta}(\\hat{\\mathbf{x}}),\\hat{p}_{kp})\\) (6)" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.492, + 0.469, + 0.702 + ], + "angle": 0, + "content": "If we only optimize \\(\\mathcal{L}_{\\text {target }}\\) w.r.t the global edit directions, it is possible that the method will not preserve image statistics of the original image and can include the particular attribute that we are diagnosing. To constrain the optimization, we added a structural loss \\(\\mathcal{L}_{\\text {struct }}\\) and an attribute consistency loss \\(\\mathcal{L}_{\\text {attr }}\\) to avoid generation collapse. \\(\\mathcal{L}_{\\text {struct }}\\) [32] aims to preserve global image statistics of the original image x including image contrasts, background, or shape identity during the adversarial editing. While \\(\\mathcal{L}_{\\text {attr }}\\) enforces that the target attribute (perceived ground truth) be consistent on the style edits. For example, when diagnosing the eyeglasses classifier, ZOOM preserves the original status of eyeglasses and precludes direct eyeglasses addition/removal." + }, + { + "type": "equation", + "bbox": [ + 0.137, + 0.712, + 0.469, + 0.728 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {s t r u c t}} (\\hat {\\mathbf {x}}) = L _ {\\text {S S I M}} (\\hat {\\mathbf {x}}, \\mathbf {x}) \\tag {7}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.15, + 0.731, + 0.469, + 0.747 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {a t t r}} (\\hat {\\mathbf {x}}) = L _ {C E} \\left(\\operatorname {C L I P} (\\hat {\\mathbf {x}}), \\operatorname {C L I P} (\\mathbf {x})\\right) \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.756, + 0.469, + 0.817 + ], + "angle": 0, + "content": "Given a pretrained target model \\( f_{\\theta} \\), a domain-specific style generator \\( G_{\\phi} \\), and a text-driven attribute space \\( \\mathcal{A} \\), our goal is to sample an original style vector \\( \\mathbf{s} \\) for each image and search its counterfactual edit strength \\( \\hat{\\mathbf{w}} \\):" + }, + { + "type": "equation", + "bbox": [ + 0.198, + 0.825, + 0.469, + 0.849 + ], + "angle": 0, + "content": "\\[\n\\hat {\\mathbf {w}} = \\underset {\\mathbf {w}} {\\operatorname {a r g m i n}} \\mathcal {L} (\\mathbf {s}, \\mathbf {w}). \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.856, + 0.42, + 0.871 + ], + "angle": 0, + "content": "Unless otherwise stated, we iteratively update \\(\\mathbf{w}\\) as:" + }, + { + "type": "equation", + "bbox": [ + 0.168, + 0.88, + 0.469, + 0.898 + ], + "angle": 0, + "content": "\\[\n\\mathbf {w} = \\operatorname {c l a m p} _ {[ - \\epsilon , \\epsilon ]} (\\mathbf {w} - \\eta \\nabla_ {\\mathbf {w}} \\mathcal {L}), \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.228 + ], + "angle": 0, + "content": "where \\(\\eta\\) is the step size and \\(\\epsilon\\) is the clamp bound to avoid synthesis collapse caused by exaggerated edit. Note that the maximum counterfactual effectiveness does not indicate the maximum edit strength (i.e., \\(w_{i} = \\epsilon\\)), since the attribute edit direction does not necessarily overlap with the target classifier direction. The attribute change is bi-directional, as the \\(w_{i}\\) can be negative in Eq. 3. Details of using other optimization approaches (e.g., linear approximation [18]) will be discussed in Appendix C." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.239, + 0.762, + 0.256 + ], + "angle": 0, + "content": "3.4. Attribute Sensitivity Analysis" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.263, + 0.892, + 0.353 + ], + "angle": 0, + "content": "Single-attribute counterfactual reflects the sensitivity of target model on the individual attribute. By optimizing independently along the edit direction for a single attribute and averaging the model probability changes over images, our model generates independent sensitivity score \\( h_i \\) for each attribute \\( a_i \\):" + }, + { + "type": "equation", + "bbox": [ + 0.532, + 0.368, + 0.891, + 0.386 + ], + "angle": 0, + "content": "\\[\nh _ {i} = \\mathbb {E} _ {\\mathbf {x} \\sim \\mathcal {P} (\\mathbf {x}), \\hat {\\mathbf {x}} = \\mathrm {Z O O M} (\\mathbf {x}, a _ {i})} | f _ {\\theta} (\\mathbf {x}) - f _ {\\theta} (\\hat {\\mathbf {x}}) |. \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.397, + 0.892, + 0.548 + ], + "angle": 0, + "content": "The sensitivity score \\( h_i \\) is the probability difference between the original image \\( \\mathbf{x} \\) and generated image \\( \\hat{\\mathbf{x}} \\), at the most counterfactual point when changing attribute \\( a_i \\). We synthesize a number of images from \\( \\mathcal{G}_{\\phi} \\), then iteratively compute the sensitivity for each given attribute, and finally normalize all sensitivities to draw the histogram as shown in Fig. 4. The histogram indicates the sensitivity of the evaluated model \\( f_{\\theta} \\) on each of the user-defined attributes. Higher sensitivity of one attribute means that the model is more easily affected by that attribute." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.56, + 0.724, + 0.576 + ], + "angle": 0, + "content": "3.5. Counterfactual Training" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.584, + 0.892, + 0.735 + ], + "angle": 0, + "content": "The multi-attribute counterfactual approach visualizes semantic combinations that cause the model to falter, providing valuable insights for enhancing the model's robustness. We naturally adopt the concept of iterative adversarial training [18] to robustify the target model. For each iteration, ZOOM receives the target model parameter and returns a batch of mutated counterfactual images with the model's original predictions as labels. Then the target model will be trained on the counterfactually-augmented images to achieve the robust goal:" + }, + { + "type": "equation", + "bbox": [ + 0.511, + 0.747, + 0.891, + 0.768 + ], + "angle": 0, + "content": "\\[\n\\theta^ {*} = \\underset {\\theta} {\\operatorname {a r g m i n}} \\mathbb {E} _ {\\mathbf {x} \\sim \\mathcal {P} (\\mathbf {x}), \\hat {\\mathbf {x}} = \\operatorname {Z O O M} (\\mathbf {x}, A)} L _ {C E} \\left(f _ {\\theta} (\\hat {\\mathbf {x}}), f _ {\\theta} (\\mathbf {x})\\right) \\tag {12}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.78, + 0.892, + 0.901 + ], + "angle": 0, + "content": "where batches of \\(\\mathbf{x}\\) are randomly sampled from the StyleGAN generator \\(\\mathcal{G}_{\\phi}\\). In the following, we abbreviate the process as Counterfactual Training (CT). Note that, although not explicitly expressed in Eq. 12, the CT process is a min-max game. ZOOM synthesizes counterfactuals to maximize the variation of model prediction (while persevering the perceived ground truth), and the target model is learned with the counterfactual images to minimize the variation." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "11634" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.084, + 0.089, + 0.164, + 0.151 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.094, + 0.152, + 0.153, + 0.16 + ], + "angle": 0, + "content": "Open Mouth" + }, + { + "type": "image", + "bbox": [ + 0.165, + 0.089, + 0.244, + 0.151 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.165, + 0.152, + 0.244, + 0.159 + ], + "angle": 0, + "content": "\\( \\frac{1}{2}x - 1 > 0 \\)" + }, + { + "type": "image", + "bbox": [ + 0.245, + 0.089, + 0.324, + 0.151 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.245, + 0.152, + 0.324, + 0.159 + ], + "angle": 0, + "content": "\\( \\frac{1}{2}x - 1 > 0 \\)" + }, + { + "type": "image", + "bbox": [ + 0.324, + 0.089, + 0.403, + 0.151 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.403, + 0.089, + 0.481, + 0.151 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.409, + 0.152, + 0.475, + 0.159 + ], + "angle": 0, + "content": "Closed Mouth" + }, + { + "type": "image", + "bbox": [ + 0.483, + 0.089, + 0.562, + 0.151 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.562, + 0.089, + 0.642, + 0.151 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.642, + 0.089, + 0.722, + 0.151 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.722, + 0.089, + 0.802, + 0.151 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.803, + 0.089, + 0.883, + 0.151 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.084, + 0.163, + 0.164, + 0.225 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.094, + 0.226, + 0.153, + 0.235 + ], + "angle": 0, + "content": "Felidae Pupil" + }, + { + "type": "image", + "bbox": [ + 0.165, + 0.163, + 0.244, + 0.225 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.165, + 0.226, + 0.244, + 0.235 + ], + "angle": 0, + "content": "\\( \\frac{3}{1} + u + {4q} = 1 + u + {uq} \\) dH" + }, + { + "type": "image", + "bbox": [ + 0.245, + 0.163, + 0.324, + 0.225 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.245, + 0.226, + 0.324, + 0.235 + ], + "angle": 0, + "content": "\\( \\frac{3}{1} + u + {4q} = 1 + u + {uq} \\) dH" + }, + { + "type": "image", + "bbox": [ + 0.324, + 0.163, + 0.403, + 0.225 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.324, + 0.226, + 0.403, + 0.235 + ], + "angle": 0, + "content": "\\( \\frac{3}{1} + u + {4q} = 1 + u + {uq} \\) dH" + }, + { + "type": "image", + "bbox": [ + 0.403, + 0.163, + 0.481, + 0.225 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.409, + 0.226, + 0.475, + 0.235 + ], + "angle": 0, + "content": "Canidae Pupil" + }, + { + "type": "image", + "bbox": [ + 0.483, + 0.163, + 0.562, + 0.225 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.562, + 0.163, + 0.642, + 0.225 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.642, + 0.163, + 0.722, + 0.225 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.722, + 0.163, + 0.802, + 0.225 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.803, + 0.163, + 0.883, + 0.225 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.245, + 0.892, + 0.274 + ], + "angle": 0, + "content": "Figure 3. Effect of progressively generating counterfactual images on (left) cat/dog classifier (0-Cat / 1-Dog), and (right) perceived age classifier (0-Senior / 1-Young). Model probability prediction during the process is attached at the top right corner." + }, + { + "type": "image", + "bbox": [ + 0.078, + 0.279, + 0.278, + 0.385 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.281, + 0.279, + 0.48, + 0.384 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.489, + 0.279, + 0.683, + 0.384 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.685, + 0.279, + 0.885, + 0.384 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.257, + 0.387, + 0.713, + 0.399 + ], + "angle": 0, + "content": "(a) Model diagnosis histograms generated by ZOOM on four facial attribute classifiers." + }, + { + "type": "image", + "bbox": [ + 0.078, + 0.399, + 0.278, + 0.501 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.281, + 0.399, + 0.48, + 0.501 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.483, + 0.399, + 0.682, + 0.501 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.685, + 0.399, + 0.885, + 0.501 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.18, + 0.505, + 0.79, + 0.518 + ], + "angle": 0, + "content": "(b) Model diagnosis histograms generated by ZOOM on four classifiers trained on manually-crafted imbalance data." + }, + { + "type": "image_caption", + "bbox": [ + 0.075, + 0.527, + 0.892, + 0.556 + ], + "angle": 0, + "content": "Figure 4. Model diagnosis histograms generated by ZOOM. The vertical axis values reflect the attribute sensitivities calculated by averaging the model probability change over all sampled images. The horizontal axis is the attribute space input by user." + }, + { + "type": "title", + "bbox": [ + 0.076, + 0.57, + 0.282, + 0.587 + ], + "angle": 0, + "content": "4. Experimental Results" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.595, + 0.469, + 0.701 + ], + "angle": 0, + "content": "This section describes the experimental validations on the effectiveness and reliability of ZOOM. First, we describe the model setup in Sec. 4.1. Sec. 4.2 and Sec. 4.3 visualize and validate the model diagnosis results for the single-attribute setting. In Sec. 4.4, we show results on synthesized multiple-attribute counterfactual images and apply them to counterfactual training." + }, + { + "type": "title", + "bbox": [ + 0.076, + 0.709, + 0.212, + 0.725 + ], + "angle": 0, + "content": "4.1. Model Setup" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.732, + 0.469, + 0.807 + ], + "angle": 0, + "content": "Pre-trained models: We used Stylegan2-ADA [11] pretrained on FFHQ [12] and AFHQ [1] as our base generative networks, and the pre-trained CLIP model [24] which is parameterized by ViT-B/32. We followed StyleCLIP [21] setups to compute the channel relevance matrices \\(\\mathcal{M}\\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.808, + 0.469, + 0.868 + ], + "angle": 0, + "content": "Target models: Our classifier models are ResNet50 with single fully-connected head initialized by TorchVision1. In training the binary classifiers, we use the Adam optimizer with learning rate 0.001 and batch size 128. We train binary" + }, + { + "type": "page_footnote", + "bbox": [ + 0.076, + 0.875, + 0.458, + 0.9 + ], + "angle": 0, + "content": "1https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.572, + 0.892, + 0.632 + ], + "angle": 0, + "content": "classifiers for Eyeglasses, Perceived Gender, Mustache, Perceived Age attributes on CelebA and for cat/dog classification on AFHQ. For the 98-keypoint detectors, we used the HR-Net architecture [31] on WFLW [33]." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.647, + 0.853, + 0.664 + ], + "angle": 0, + "content": "4.2. Visual Model Diagnosis: Single-Attribute" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.673, + 0.892, + 0.763 + ], + "angle": 0, + "content": "Understanding where deep learning model fails is an essential step towards building trustworthy models. Our proposed work allows us to generate counterfactual images (Sec. 3.3) and provide insights on sensitivities of the target model (Sec. 3.4). This section visualizes the counterfactual images in which only one attribute is modified at a time." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.765, + 0.892, + 0.9 + ], + "angle": 0, + "content": "Fig. 3 shows the single-attribute counterfactual images. Interestingly (but not unexpectedly), we can see that reducing the hair length or joyfulness causes the age classifier more likely to label the face to an older person. Note that our approach is extendable to multiple domains, as we change the cat-like pupil to dog-like, the dog-cat classification tends towards the dog. Using the counterfactual images, we can conduct model diagnosis with the method mentioned in Sec. 3.4, on which attributes the model is sen" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "11635" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.137 + ], + "angle": 0, + "content": "sitive to. In the histogram generated in model diagnosis, a higher bar means the model is more sensitive toward the corresponding attribute." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.138, + 0.471, + 0.349 + ], + "angle": 0, + "content": "Fig. 4a shows the model diagnosis histograms on regularly-trained classifiers. For instance, the cat/dog classifier histogram shows outstanding sensitivity to green eyes and vertical pupil. The analysis is intuitive since these are cat-biased traits rarely observed in dog photos. Moreover, the histogram of eyeglasses classifier shows that the mutation on bushy eyebrows is more influential for flipping the model prediction. It potentially reveals the positional correlation between eyeglasses and bushy eyebrows. The advantage of single-attribute model diagnosis is that the score of each attribute in the histogram are independent from other attributes, enabling unambiguous understanding of exact semantics that make the model fail. Diagnosis results for additional target models can be found in Appendix B." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.359, + 0.398, + 0.376 + ], + "angle": 0, + "content": "4.3. Validation of Visual Model Diagnosis" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.383, + 0.47, + 0.504 + ], + "angle": 0, + "content": "Evaluating whether our zero-shot sensitivity histograms (Fig. 4) explain the true vulnerability is a difficult task, since we do not have access to a sufficiently large and balanced test set fully annotated in an open-vocabulary setting. To verify the performance, we create synthetically imbalanced cases where the model bias is known. We then compare our results with a supervised diagnosis setting [17]. In addition, we will validate the decoupling of the attributes by CLIP." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.517, + 0.349, + 0.532 + ], + "angle": 0, + "content": "4.3.1 Creating imbalanced classifiers" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.538, + 0.47, + 0.839 + ], + "angle": 0, + "content": "In order to evaluate whether our sensitivity histogram is correct, we train classifiers that are highly imbalanced towards a known attribute and see whether ZOOM can detect the sensitivity w.r.t the attribute. For instance, when training the perceived-age classifier (binarized as Young in CelebA), we created a dataset on which the trained classifier is strongly sensitive to Bangs (hair over forehead). The custom dataset is a CelebA training subset that consists of 20, 200 images. More specifically, there are 10,000 images that have both young people that have bangs, represented as (1, 1), and 10,000 images of people that are not young and have no bangs, represented as (0, 0). The remaining combinations of (1, 0) and (0, 1) have only 100 images. With this imbalanced dataset, bangs is the attribute that dominantly correlates with whether the person is young, and hence the perceived-age classifier would be highly sensitive towards bangs. See Fig. 5 (the right histograms) for an illustration of the sensitivity histogram computed by our method for the case of an age classifier with bangs (top) and lipstick (bottom) being imbalanced." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.84, + 0.47, + 0.901 + ], + "angle": 0, + "content": "We trained multiple imbalanced classifiers with this methodology, and visualize the model diagnosis histograms of these imbalanced classifiers in Fig. 4b. We can observe that the ZOOM histograms successfully detect the" + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.091, + 0.89, + 0.338 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.345, + 0.892, + 0.374 + ], + "angle": 0, + "content": "Figure 5. The sensitivity of the age classifier is evaluated with ZOOM (right) and AttGAN (left), achieving comparable results." + }, + { + "type": "image", + "bbox": [ + 0.507, + 0.385, + 0.692, + 0.519 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.538, + 0.523, + 0.659, + 0.535 + ], + "angle": 0, + "content": "(a) Mustache classifier" + }, + { + "type": "image", + "bbox": [ + 0.699, + 0.385, + 0.886, + 0.519 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.72, + 0.523, + 0.862, + 0.535 + ], + "angle": 0, + "content": "(b) Perceived age classifier" + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.54, + 0.892, + 0.582 + ], + "angle": 0, + "content": "Figure 6. Confusion matrix of CLIP score variation (vertical axis) when perturbing attributes (horizontal axis). This shows that attributes in ZOOM are highly decoupled." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.597, + 0.892, + 0.628 + ], + "angle": 0, + "content": "synthetically-made bias, which are shown as the highest bars in histograms. See the caption for more information." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.639, + 0.822, + 0.654 + ], + "angle": 0, + "content": "4.3.2 Comparison with supervised diagnosis" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.659, + 0.892, + 0.855 + ], + "angle": 0, + "content": "We also validated our histogram by comparing it with the case in which we have access to a generative model that has been explicitly trained to disentangle attributes. We follow the work on [17] and used AttGAN [6] trained on the CelebA training set over 15 attributes2. After the training converged, we performed adversarial learning in the attribute space of AttGAN and create a sensitivity histogram using the same approach as Sec. 3.4. Fig. 5 shows the result of this method on the perceived-age classifier which is made biased towards bangs. As anticipated, the AttGAN histogram (left) corroborates the histogram derived from our method (right). Interestingly, unlike ZOOM, AttGAN show less sensitivity to remaining attributes. This is likely" + }, + { + "type": "page_footnote", + "bbox": [ + 0.5, + 0.863, + 0.892, + 0.901 + ], + "angle": 0, + "content": "\\(^{2}\\)Bald, Bangs, Black_Hair, Blond_Hair, Brown_Hair, Bushy_Eyesbrows, Eyeglasses, Male, Mouth_Slightly_Open, Mustache, No_Board, Pale_Skin, Young, Smiling, Wearing_Lipstick." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "11636" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.12, + 0.09, + 0.396, + 0.101 + ], + "angle": 0, + "content": "Eyeglasses Classifier (0-No Eyeglasses / 1-Eyeglasses)" + }, + { + "type": "header", + "bbox": [ + 0.416, + 0.09, + 0.649, + 0.101 + ], + "angle": 0, + "content": "Perceived Age Classifier (0-Senior / 1-Young)" + }, + { + "type": "header", + "bbox": [ + 0.679, + 0.09, + 0.86, + 0.1 + ], + "angle": 0, + "content": "Facial Keypoint Detector (WFLW)" + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.119, + 0.097, + 0.224 + ], + "angle": 270, + "content": "Counterfactual Original" + }, + { + "type": "image", + "bbox": [ + 0.099, + 0.103, + 0.177, + 0.165 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.099, + 0.166, + 0.177, + 0.224 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.178, + 0.103, + 0.256, + 0.165 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.178, + 0.166, + 0.255, + 0.224 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.257, + 0.103, + 0.334, + 0.165 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.257, + 0.166, + 0.334, + 0.224 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.335, + 0.103, + 0.412, + 0.165 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.335, + 0.166, + 0.412, + 0.224 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.414, + 0.103, + 0.491, + 0.165 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.414, + 0.166, + 0.491, + 0.224 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.493, + 0.103, + 0.571, + 0.165 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.493, + 0.166, + 0.571, + 0.224 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.573, + 0.103, + 0.651, + 0.165 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.573, + 0.166, + 0.651, + 0.224 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.653, + 0.103, + 0.731, + 0.165 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.653, + 0.166, + 0.731, + 0.224 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.733, + 0.103, + 0.808, + 0.165 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.733, + 0.166, + 0.808, + 0.224 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.812, + 0.103, + 0.888, + 0.165 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.812, + 0.166, + 0.888, + 0.224 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.11, + 0.237, + 0.858, + 0.251 + ], + "angle": 0, + "content": "Figure 7. Multi-attribute counterfactual in faces. The model probability is documented in the upper right corner of each image." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.264, + 0.468, + 0.369 + ], + "angle": 0, + "content": "because AttGAN has a latent space learned in a supervised manner and hence attributes are better disentangled than with StyleGAN. Note that AttGAN is trained with a fixed set of attributes; if a new attribute of interest is introduced, the dataset needs to be re-labeled and AttGAN retrained. ZOOM, however, merely calls for the addition of a new text prompt. More results in Appendix B." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.381, + 0.414, + 0.395 + ], + "angle": 0, + "content": "4.3.3 Measuring disentanglement of attributes" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.401, + 0.468, + 0.475 + ], + "angle": 0, + "content": "Previous works demonstrated that the StyleGAN's latent space can be entangled [2, 27], adding undesired dependencies when searching single-attribute counterfactuals. This section verifies that our framework can disentangle the attributes and mostly edit the desirable attributes." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.477, + 0.468, + 0.717 + ], + "angle": 0, + "content": "We use CLIP as a super annotator to measure attribute changes during single-attribute modifications. For 1,000 images, we record the attribute change after performing adversarial learning in each attribute, and average the attribute score change. Fig. 6 shows the confusion matrix during single-attribute counterfactual synthesis. The horizontal axis is the attribute being edited during the optimization, and the vertical axis represents the CLIP prediction changed by the process. For instance, the first column of Fig. 6a is generated when we optimize over bangs for the mustache classifier. We record the CLIP prediction variation. It clearly shows that bangs is the dominant attribute changing during the optimization. From the main diagonal of matrices, it is evident that the ZOOM mostly perturbs the attribute of interest. The results indicate reasonable disentanglement among attributes." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.727, + 0.433, + 0.742 + ], + "angle": 0, + "content": "4.4. Visual Model Diagnosis: Multi-Attributes" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.75, + 0.468, + 0.901 + ], + "angle": 0, + "content": "In the previous sections, we have visualized and validated single-attribute model diagnosis histograms and counterfactual images. In this section, we will assess ZOOM's ability to produce counterfactual images by concurrently exploring multiple attributes within \\(\\mathcal{A}\\), the domain of user-defined attributes. The approach conducts multi-attribute counterfactual searches across various edit directions, identifying distinct semantic combinations that result in the target model's failure. By doing so, we can effectively create more powerful counterfactuals images (see Fig. 9)." + }, + { + "type": "image_caption", + "bbox": [ + 0.631, + 0.261, + 0.776, + 0.271 + ], + "angle": 0, + "content": "Cat / Dog Classifier (0-Cat / 1-Dog)" + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.272, + 0.578, + 0.317 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.318, + 0.578, + 0.365 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.58, + 0.272, + 0.642, + 0.318 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.58, + 0.319, + 0.642, + 0.365 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.642, + 0.272, + 0.703, + 0.318 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.642, + 0.319, + 0.703, + 0.365 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.703, + 0.272, + 0.765, + 0.318 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.703, + 0.319, + 0.765, + 0.365 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.765, + 0.272, + 0.828, + 0.318 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.766, + 0.319, + 0.827, + 0.365 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.829, + 0.272, + 0.888, + 0.318 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.829, + 0.319, + 0.888, + 0.365 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.5, + 0.377, + 0.892, + 0.405 + ], + "angle": 0, + "content": "Figure 8. Multi-attribute counterfactual on Cat/Dog classifier. The number in each image is the predicted probability of being a dog." + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.42, + 0.581, + 0.478 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.508, + 0.48, + 0.577, + 0.488 + ], + "angle": 0, + "content": "Original Reference" + }, + { + "type": "image", + "bbox": [ + 0.582, + 0.42, + 0.658, + 0.478 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.592, + 0.48, + 0.646, + 0.488 + ], + "angle": 0, + "content": "SAC by Beard" + }, + { + "type": "image", + "bbox": [ + 0.66, + 0.42, + 0.735, + 0.478 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.665, + 0.48, + 0.731, + 0.488 + ], + "angle": 0, + "content": "SAC by Pale Skin" + }, + { + "type": "image", + "bbox": [ + 0.736, + 0.42, + 0.813, + 0.478 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.739, + 0.48, + 0.809, + 0.488 + ], + "angle": 0, + "content": "SAC by Black Hair" + }, + { + "type": "image", + "bbox": [ + 0.815, + 0.42, + 0.891, + 0.478 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.819, + 0.48, + 0.886, + 0.488 + ], + "angle": 0, + "content": "Multiple-Attribute" + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.495, + 0.581, + 0.554 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.508, + 0.556, + 0.577, + 0.565 + ], + "angle": 0, + "content": "Original Reference" + }, + { + "type": "image", + "bbox": [ + 0.582, + 0.495, + 0.658, + 0.554 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.585, + 0.556, + 0.655, + 0.565 + ], + "angle": 0, + "content": "SAC by Lips Color" + }, + { + "type": "image", + "bbox": [ + 0.66, + 0.495, + 0.736, + 0.554 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.667, + 0.556, + 0.727, + 0.565 + ], + "angle": 0, + "content": "SAC by Smiling" + }, + { + "type": "image", + "bbox": [ + 0.736, + 0.495, + 0.813, + 0.554 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.747, + 0.556, + 0.802, + 0.565 + ], + "angle": 0, + "content": "SAC by Bangs" + }, + { + "type": "image", + "bbox": [ + 0.815, + 0.495, + 0.891, + 0.554 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.819, + 0.556, + 0.882, + 0.565 + ], + "angle": 0, + "content": "Multiple-Attribute" + }, + { + "type": "image_caption", + "bbox": [ + 0.5, + 0.573, + 0.892, + 0.628 + ], + "angle": 0, + "content": "Figure 9. Multiple-Attribute Counterfactual (MAC, Sec. 4.4) compared with Single-Attribute Counterfactual (SAC, Sec. 4.2). We can see that optimization along multiple directions enable the generation of more powerful counterfactuals." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.649, + 0.892, + 0.83 + ], + "angle": 0, + "content": "Fig. 7 and Fig. 8 show examples of multi-attribute counterfactual images generated by ZOOM, against human and animal face classifiers. It can be observed that multiple face attributes such as lipsticks or hair color are edited in Fig. 7, and various cat/dog attributes like nose pinkness, eye shape, and fur patterns are edited in Fig. 8. These attribute edits are blended to affect the target model prediction. Appendix B further illustrates ZOOM counterfactual images for semantic segmentation, multi-class classification, and a church classifier. By mutating semantic representations, ZOOM reveals semantic combinations as outliers where the target model underfits." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.834, + 0.892, + 0.909 + ], + "angle": 0, + "content": "In the following sections, we will use the Flip Rate (the percentage of counterfactuals that flipped the model prediction) and Flip Resistance (the percentage of counterfactuals for which the model successfully withheld its prediction) to evaluate the multi-attribute setting." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "11637" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.078, + 0.088, + 0.272, + 0.189 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.274, + 0.088, + 0.468, + 0.189 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.079, + 0.191, + 0.468, + 0.204 + ], + "angle": 0, + "content": "(a) Sensitivity histograms generated by ZOOM on attribute combinations." + }, + { + "type": "image", + "bbox": [ + 0.078, + 0.205, + 0.465, + 0.288 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.078, + 0.29, + 0.468, + 0.315 + ], + "angle": 0, + "content": "(b) Model diagnosis by ZOOM over 19 attributes. Our framework is generalizable to analyze facial attributes of various domains." + }, + { + "type": "image_caption", + "bbox": [ + 0.12, + 0.321, + 0.425, + 0.335 + ], + "angle": 0, + "content": "Figure 10. Customizing attribute space for ZOOM." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.348, + 0.327, + 0.364 + ], + "angle": 0, + "content": "4.4.1 Customizing attribute space" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.365, + 0.47, + 0.622 + ], + "angle": 0, + "content": "In some circumstances, users may finish one round of model diagnosis and proceed to another round by adding new attributes, or trying a new attribute space. The linear nature of attribute editing (Eq. 3) in ZOOM makes it possible to easily add or remove attributes. Table 1 shows the flip rates results when adding new attributes into \\(\\mathcal{A}\\) for perceived age classifier and big lips classifier. We can observe that a different attribute space will result in different effectiveness of counterfactual images. Also, increasing the search iteration will make counterfactual more effective (see last row). Note that neither re-training the StyleGAN nor user-collection/labeling of data is required at any point in this procedure. Moreover, Fig. 10a shows the model diagnosis histograms generated with combinations of two attributes. Fig. 10b demonstrates the capability of ZOOM in a rich vocabulary setting where we can analyze attributes that are not labeled in existing datasets [16, 29]." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.628, + 0.348, + 0.643 + ], + "angle": 0, + "content": "4.4.2 Counterfactual training results" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.649, + 0.47, + 0.905 + ], + "angle": 0, + "content": "This section evaluates regular classifiers trained on CelebA [16] and counterfactually-trained (CT) classifiers on a mix of CelebA data and counterfactual images as described in Sec. 3.5. Table 2 presents accuracy and flip resistance (FR) results. CT outperforms the regular classifier. FR is assessed over 10,000 counterfactual images, with FR-25 and FR-100 denoting Flip Resistance after 25 and 100 optimization iterations, respectively. Both use \\(\\eta = 0.2\\) and \\(\\epsilon = 30\\). We can observe that the classifiers after CT are way less likely to be flipped by counterfactual images while maintaining a decent accuracy on the CalebA testset. Our approach robustifies the model by increasing the tolerance toward counterfactuals. Note that CT slightly improves the CelebA classifier when trained on a mixture of CelebA images (original images) and the counterfactual images generated with a generative model trained in the FFHQ [12] images (different domain)." + }, + { + "type": "table", + "bbox": [ + 0.503, + 0.089, + 0.89, + 0.178 + ], + "angle": 0, + "content": "
MethodAC Flip Rate (%)BC Flip Rate (%)
Initialize ZOOM by A61.9583.47
+ Attribute: Beard72.0890.07
+ Attribute: Smiling87.4796.27
+ Attribute: Lipstick90.9694.07
+ Iterations increased to 20092.9194.87
" + }, + { + "type": "table_caption", + "bbox": [ + 0.5, + 0.188, + 0.892, + 0.231 + ], + "angle": 0, + "content": "Table 1. Model flip rate study. The initial attribute space \\(\\mathcal{A} =\\) {Bangs, Blond Hair, Bushy Eyebrows, Pale Skin, Pointy Nose}. AC is the perceived age classifier and BC is the big lips classifier." + }, + { + "type": "table", + "bbox": [ + 0.502, + 0.235, + 0.894, + 0.342 + ], + "angle": 0, + "content": "
AttributeMetricRegular (%)CT (Ours) (%)
Perceived AgeCelebA Accuracy86.1086.29
ZOOM FR-2519.5497.36
ZOOM FR-1009.0495.65
Big LipsCelebA Accuracy74.3675.39
ZOOM FR-2514.1299.19
ZOOM FR-1005.9388.91
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.352, + 0.892, + 0.408 + ], + "angle": 0, + "content": "Table 2. Results of network inference on CelebA original images and ZOOM-generated counterfactual. The CT classifier is significantly less prone to be flipped by counterfactual images, while test accuracy on CelebA remains performant." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.413, + 0.748, + 0.429 + ], + "angle": 0, + "content": "5. Conclusion and Discussion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.432, + 0.892, + 0.581 + ], + "angle": 0, + "content": "In this paper, we present ZOOM, a zero-shot model diagnosis framework that generates sensitivity histograms based on user's input of natural language attributes. ZOOM assesses failures and generates corresponding sensitivity histograms for each attribute. A significant advantage of our technique is its ability to analyze the failures of a target deep model without the need for laborious collection and annotation of test sets. ZOOM effectively visualizes the correlation between attributes and model outputs, elucidating model behaviors and intrinsic biases." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.583, + 0.893, + 0.824 + ], + "angle": 0, + "content": "Our work has three primary limitations. First, users should possess domain knowledge as their input (text of attributes of interest) should be relevant to the target domain. Recall that it is a small price to pay for model evaluation without an annotated test set. Second, StyleGAN2-ADA struggles with generating out-of-domain samples. Nevertheless, our adversarial learning framework can be adapted to other generative models (e.g., stable diffusion), and the generator can be improved by training on more images. We have rigorously tested our generator with various user inputs, confirming its effectiveness for regular diagnosis requests. Currently, we are exploring stable diffusion models to generate a broader range of classes while maintaining the core concept. Finally, we rely on a pre-trained model like CLIP which we presume to be free of bias and capable of encompassing all relevant attributes." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.825, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Acknowledgements: We would like to thank George Cazenavette, Tianyuan Zhang, Yinong Wang, Hanzhe Hu, Bharath Raj for suggestions in the presentation and experiments. We sincerely thank Ken Ziyu Liu, Jiashun Wang, Bowen Li, and Ce Zheng for revisions to improve this work." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "11638" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.468, + 0.156 + ], + "angle": 0, + "content": "[1] Yunjey Choi, Youngjung Uh, Jaejun Yoo, and Jung-Woo Ha. StarGAN v2: Diverse Image Synthesis for Multiple Domains. In CVPR, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.157, + 0.468, + 0.198 + ], + "angle": 0, + "content": "[2] Edo Collins, Raja Bala, Bob Price, and Sabine Susstrunk. Editing in Style: Uncovering the Local Semantics of GANs. In CVPR, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.089, + 0.2, + 0.468, + 0.255 + ], + "angle": 0, + "content": "[3] Emily Denton and Ben Hutchinson and Margaret Mitchell and Timnit Gebru and Andrew Zaldivar. Image counterfactual sensitivity analysis for detecting unintended bias. arXiv preprint arXiv:1906.06439, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.089, + 0.256, + 0.468, + 0.283 + ], + "angle": 0, + "content": "[4] Ian J. Goodfellow, Jonathon Shlens, and Christian Szegedy. Explaining and Harnessing Adversarial Examples. 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.089, + 0.285, + 0.468, + 0.324 + ], + "angle": 0, + "content": "[5] Yash Goyal, Ziyan Wu, Jan Ernst, Dhruv Batra, Devi Parikh, and Stefan Lee. Counterfactual Visual Explanations. In ICML, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.326, + 0.468, + 0.367 + ], + "angle": 0, + "content": "[6] Z. He, W. Zuo, M. Kan, S. Shan, and X. Chen. AttGAN: Facial Attribute Editing by Only Changing What You Want. In IEEE TIP, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.368, + 0.468, + 0.422 + ], + "angle": 0, + "content": "[7] Fangzhou Hong, Mingyuan Zhang, Liang Pan, Zhongang Cai, Lei Yang, and Ziwei Liu. AvatarCLIP: Zero-Shot Text-Driven Generation and Animation of 3D Avatars. In ACM TOG, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.424, + 0.468, + 0.465 + ], + "angle": 0, + "content": "[8] Erik Härkönen, Aaron Hertzmann, Jaakko Lehtinen, and Sylvain Paris. GANSpace: Discovering Interpretable GAN Controls. In NeurIPS, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.467, + 0.468, + 0.508 + ], + "angle": 0, + "content": "[9] Ameya Joshi, Amitangshu Mukherjee, Soumik Sarkar, and Chinmay Hegde. Semantic Adversarial Attacks: Parametric Transformations That Fool Deep Classifiers. In ICCV, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.509, + 0.468, + 0.55 + ], + "angle": 0, + "content": "[10] Kimmo Karkkainen and Jungseock Joo. FairFace: Face Attribute Dataset for Balanced Race, Gender, and Age for Bias Measurement and Mitigation. In WACV, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.551, + 0.468, + 0.592 + ], + "angle": 0, + "content": "[11] Tero Karras, Miika Aittala, Janne Hellsten, Samuli Laine, Jaakko Lehtinen, and Timo Aila. Training Generative Adversarial Networks with Limited Data. In NeurIPS, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.593, + 0.468, + 0.633 + ], + "angle": 0, + "content": "[12] Tero Karras, Samuli Laine, and Timo Aila. A Style-Based Generator Architecture for Generative Adversarial Networks. In CVPR, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.635, + 0.468, + 0.703 + ], + "angle": 0, + "content": "[13] Oran Lang, Yossi Gandelsman, Michal Yarom, Yoav Wald, Gal Elidan, Avinatan Hassidim, William T. Freeman, Phillip Isola, Amir Globerson, Michal Irani, and Inbar Mosseri. Explaining in Style: Training a GAN To Explain a Classifier in StyleSpace. In ICCV, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.705, + 0.468, + 0.76 + ], + "angle": 0, + "content": "[14] Bo Li, Qiulin Wang, Jiquan Pei, Yu Yang, and Xiangyang Ji. Which Style Makes Me Attractive? Interpretable Control Discovery and Counterfactual Explanation on StyleGAN. arXiv preprint arXiv:2201.09689, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.761, + 0.468, + 0.788 + ], + "angle": 0, + "content": "[15] Zhiheng Li and Chenliang Xu. Discover the Unknown Biased Attribute of an Image Classifier. In ICCV, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.789, + 0.468, + 0.816 + ], + "angle": 0, + "content": "[16] Ziwei Liu, Ping Luo, Xiaogang Wang, and Xiaou Tang. Deep Learning Face Attributes in the Wild. In ICCV, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.817, + 0.468, + 0.858 + ], + "angle": 0, + "content": "[17] Jinqi Luo, Zhaoning Wang, Chen Henry Wu, Dong Huang, and Fernando De la Torre. Semantic image attack for visual model diagnosis. arXiv preprint arXiv:2303.13010, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.86, + 0.468, + 0.9 + ], + "angle": 0, + "content": "[18] Aleksander Madry, Aleksandar Makelov, Ludwig Schmidt, Dimitris Tsipras, and Adrian Vladu. Towards Deep Learning Models Resistant to Adversarial Attacks. In ICLR, 2018." + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.468, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.133 + ], + "angle": 0, + "content": "[19] Joanna Materzynska, Antonio Torralba, and David Bau. Disentangling Visual and Written Concepts in CLIP. In CVPR, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.136, + 0.892, + 0.177 + ], + "angle": 0, + "content": "[20] Ramaravind K. Mothilal, Amit Sharma, and Chenhao Tan. Explaining Machine Learning Classifiers through Diverse Counterfactual Explanations. In ACM FAccT, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.179, + 0.892, + 0.222 + ], + "angle": 0, + "content": "[21] Or Patashnik, Zongze Wu, Eli Shechtman, Daniel Cohen-Or, and Dani Lischinski. StyleCLIP: Text-Driven Manipulation of StyleGAN Imagery. In ICCV, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.224, + 0.892, + 0.264 + ], + "angle": 0, + "content": "[22] Ben Poole, Ajay Jain, Jonathan T. Barron, and Ben Mildenhall. DreamFusion: Text-to-3D using 2D Diffusion. arXiv preprint arXiv:2209.14988, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.267, + 0.892, + 0.322 + ], + "angle": 0, + "content": "[23] Haonan Qiu, Chaowei Xiao, Lei Yang, Xinchen Yan, Honglak Lee, and Bo Li. SemanticAdv: Generating Adversarial Examples via Attribute-conditioned Image Editing. In ECCV, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.324, + 0.892, + 0.407 + ], + "angle": 0, + "content": "[24] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning Transferable Visual Models From Natural Language Supervision. In ICML, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.409, + 0.892, + 0.451 + ], + "angle": 0, + "content": "[25] Vikram V. Ramaswamy, Sunnie S. Y. Kim, and Olga Russakovsky. Fair Attribute Classification Through Latent Space De-Biasing. In CVPR, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.453, + 0.892, + 0.479 + ], + "angle": 0, + "content": "[26] Axel Sauer and Andreas Geiger. Counterfactual Generative Networks. In ICLR, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.482, + 0.892, + 0.524 + ], + "angle": 0, + "content": "[27] Yujun Shen, Ceyuan Yang, Xiaou Tang, and Bolei Zhou. InterFaceGAN: Interpreting the Disentangled Face Representation Learned by GANs. In IEEE TPAMI, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.526, + 0.892, + 0.553 + ], + "angle": 0, + "content": "[28] Yujun Shen and Bolei Zhou. Closed-Form Factorization of Latent Semantics in GANs. In CVPR, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.556, + 0.892, + 0.611 + ], + "angle": 0, + "content": "[29] Philipp Terhörst, Daniel Fährmann, Jan Niklas Kolf, Naser Damer, Florian Kirchbuchner, and Arjan Kuijper. MAAD-Face: A Massively Annotated Attribute Dataset for Face Images. In IEEE TIFS, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.613, + 0.892, + 0.655 + ], + "angle": 0, + "content": "[30] Can Wang, Menglei Chai, Mingming He, Dongdong Chen, and Jing Liao. CLIP-NeRF: Text-and-Image Driven Manipulation of Neural Radiance Fields. In CVPR, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.657, + 0.892, + 0.725 + ], + "angle": 0, + "content": "[31] Jingdong Wang, Ke Sun, Tianheng Cheng, Borui Jiang, Chaorui Deng, Yang Zhao, Dong Liu, Yadong Mu, Mingkui Tan, Xinggang Wang, Wenyu Liu, and Bin Xiao. Deep High-Resolution Representation Learning for Visual Recognition. In IEEE TPAMI, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.728, + 0.892, + 0.77 + ], + "angle": 0, + "content": "[32] Zhou Wang, A.C. Bovik, H.R. Sheikh, and E.P. Simoncelli. Image Quality Assessment: from Error Visibility to Structural Similarity. In IEEE TIP, 2004." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.772, + 0.892, + 0.813 + ], + "angle": 0, + "content": "[33] Wayne Wu, Chen Qian, Shuo Yang, Quan Wang, Yici Cai, and Qiang Zhou. Look at Boundary: A Boundary-Aware Face Alignment Algorithm. In CVPR, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.816, + 0.892, + 0.857 + ], + "angle": 0, + "content": "[34] Zongze Wu, Dani Lischinski, and Eli Shechtman. StyleSpace Analysis: Disentangled Controls for StyleGAN Image Generation. In CVPR, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.859, + 0.892, + 0.9 + ], + "angle": 0, + "content": "[35] Weihao Xia, Yulun Zhang, Yujiu Yang, Jing-Hao Xue, Bolei Zhou, and Ming-Hsuan Yang. GAN Inversion: A Survey. In IEEE TPAMI, 2022." + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.946, + 0.52, + 0.957 + ], + "angle": 0, + "content": "11639" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.092, + 0.47, + 0.133 + ], + "angle": 0, + "content": "[36] Chaowei Xiao, Bo Li, Jun-yan Zhu, Warren He, Mingyan Liu, and Dawn Song. Generating Adversarial Examples with Adversarial Networks. In *IJCAI*, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.136, + 0.47, + 0.19 + ], + "angle": 0, + "content": "[37] Mingyuan Zhang, Zhongang Cai, Liang Pan, Fangzhou Hong, Xinying Guo, Lei Yang, and Ziwei Liu. MotionDiffuse: Text-Driven Human Motion Generation with Diffusion Model. arXiv preprint arXiv:2208.15001, 2022." + }, + { + "type": "list", + "bbox": [ + 0.079, + 0.092, + 0.47, + 0.19 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.946, + 0.52, + 0.957 + ], + "angle": 0, + "content": "11640" + } + ] +] \ No newline at end of file diff --git a/2023/Zero-Shot Model Diagnosis/549384d4-f244-4966-8076-15abf189c0a7_origin.pdf b/2023/Zero-Shot Model Diagnosis/549384d4-f244-4966-8076-15abf189c0a7_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..19e75d557a3212f77340c6cd9caf29fb3170b561 --- /dev/null +++ b/2023/Zero-Shot Model Diagnosis/549384d4-f244-4966-8076-15abf189c0a7_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:561ec038d43be43f634578de6b66c2d89b9f1edc3dd8033fbc098fc6e0071daf +size 6163757 diff --git a/2023/Zero-Shot Model Diagnosis/full.md b/2023/Zero-Shot Model Diagnosis/full.md new file mode 100644 index 0000000000000000000000000000000000000000..da03808d10626570b054ec320239eacc793ff8e4 --- /dev/null +++ b/2023/Zero-Shot Model Diagnosis/full.md @@ -0,0 +1,467 @@ +# Zero-shot Model Diagnosis + +Jinqi Luo* + +Zhaoning Wang* + +Chen Henry Wu + +Dong Huang + +Fernando De la Torre + +Carnegie Mellon University + +{jinqil, zhaoning, chenwu2, dghuang, ftorre}@cs.cmu.edu + +# Abstract + +When it comes to deploying deep vision models, the behavior of these systems must be explicable to ensure confidence in their reliability and fairness. A common approach to evaluate deep learning models is to build a labeled test set with attributes of interest and assess how well it performs. However, creating a balanced test set (i.e., one that is uniformly sampled over all the important traits) is often time-consuming, expensive, and prone to mistakes. The question we try to address is: can we evaluate the sensitivity of deep learning models to arbitrary visual attributes without an annotated test set? + +This paper argues the case that Zero-shot Model Diagnosis (ZOOM) is possible without the need for a test set nor labeling. To avoid the need for test sets, our system relies on a generative model and CLIP. The key idea is enabling the user to select a set of prompts (relevant to the problem) and our system will automatically search for semantic counterfactual images (i.e., synthesized images that flip the prediction in the case of a binary classifier) using the generative model. We evaluate several visual tasks (classification, key-point detection, and segmentation) in multiple visual domains to demonstrate the viability of our methodology. Extensive experiments demonstrate that our method is capable of producing counterfactual images and offering sensitivity analysis for model diagnosis without the need for a test set. + +# 1. Introduction + +Deep learning models inherit data biases, which can be accentuated or downplayed depending on the model's architecture and optimization strategy. Deploying a computer vision deep learning model requires extensive testing and evaluation, with a particular focus on features with potentially dire social consequences (e.g., non-uniform behavior across gender or ethnicity). Given the importance of the problem, it is common to collect and label large-scale datasets to evaluate the behavior of these models across attributes of interest. Unfortunately, collecting these test + +![](images/3c343307fb18c4ea29ef25ad4087d7b7663b31d9277a7b39fe95b2f82af49943.jpg) +Figure 1. Given a differentiable deep learning model (e.g., a cat/dog classifier) and user-defined text attributes, how can we determine the model's sensitivity to specific attributes without using labeled test data? Our system generates counterfactual images (bottom right) based on the textual directions provided by the user, while also computing the sensitivity histogram (top right). + +datasets is extremely time-consuming, error-prone, and expensive. Moreover, a balanced dataset, that is uniformly distributed across all attributes of interest, is also typically impractical to acquire due to its combinatorial nature. Even with careful metric analysis in this test set, no robustness nor fairness can be guaranteed since there can be a mismatch between the real and test distributions [25]. This research will explore model diagnosis without relying on a test set in an effort to democratize model diagnosis and lower the associated cost. + +Counterfactual explainability as a means of model diagnosis is drawing the community's attention [5,20]. Counterfactual images visualize the sensitive factors of an input image that can influence the model's outputs. In other words, counterfactuals answer the question: "How can we modify the input image $\mathbf{x}$ (while fixing the ground truth) so that the model prediction would diverge from $\mathbf{y}$ to $\hat{\mathbf{y}}$ ?". The parameterization of such counterfactuals will provide insights into identifying key factors of where the model fails. Unlike existing image-space adversary techniques [4,18], counterfactuals provide semantic perturbations that are interpretable by humans. However, existing counterfactual studies re + +require the user to either collect uniform test sets [10], annotate discovered bias [15], or train a model-specific explanation every time the user wants to diagnose a new model [13]. + +On the other hand, recent advances in Contrastive Language-Image Pretraining (CLIP) [24] can help to overcome the above challenges. CLIP enables text-driven applications that map user text representations to visual manifolds for downstream tasks such as avatar generation [7], motion generation [37] or neural rendering [22, 30]. In the domain of image synthesis, StyleCLIP [21] reveals that text-conditioned optimization in the StyleGAN [12] latent space can decompose latent directions for image editing, allowing for the mutation of a specific attribute without disturbing others. With such capability, users can freely edit semantic attributes conditioned on text inputs. This paper further explores its use in the scope of model diagnosis. + +The central concept of the paper is depicted in Fig. 1. Consider a user interested in evaluating which factors contribute to the lack of robustness in a cat/dog classifier (target model). By selecting a list of keyword attributes, the user is able to (1) see counterfactual images where semantic variations flip the target model predictions (see the classifier score in the top-right corner of the counterfactual images) and (2) quantify the sensitivity of each attribute for the target model (see sensitivity histogram on the top). Instead of using a test set, we propose using a StyleGAN generator as the picture engine for sampling counterfactual images. CLIP transforms user's text input, and enables model diagnosis in an open-vocabulary setting. This is a major advantage since there is no need for collecting and annotating images and minimal user expert knowledge. In addition, we are not tied to a particular annotation from datasets (e.g., specific attributes in CelebA [16]). + +To summarize, our proposed work offers three major improvements over earlier efforts: + +- The user requires neither a labeled, balanced test dataset, and minimal expert knowledge in order to evaluate where a model fails (i.e., model diagnosis). In addition, the method provides a sensitivity histogram across the attributes of interest. +- When a different target model or a new user-defined attribute space is introduced, it is not necessary to retrain our system, allowing for practical use. +- The target model fine-tuned with counterfactual images not only slightly improves the classification performance, but also greatly increases the distributional robustness against counterfactual images. + +# 2. Related Work + +This section reviews prior work on attribute editing with generative models and recent efforts on model diagnosis. + +# 2.1. Attribute Editing with Generative Models + +With recent progress in generative models, GANs supports high-quality image synthesis, as well as semantic attributes editing [35]. [1, 6] edit the images by perturbing the intermediate latent space encoded from the original images. These methods rely on images to be encoded to latent vectors to perform attribute editing. On the contrary, StyleGAN [12] can produce images by sampling the latent space. Many works have explored ways to edit attributes in the latent space of StyleGAN, either by relying on image annotations [27] or in an unsupervised manner [8, 28]. StyleSpace [34] further disentangles the latent space of StyleGAN and can perform specific attribute edits by disentangled style vectors. Based upon StyleSpace, StyleCLIP [21] builds the connection between the CLIP language space and StyleGAN latent space to enable arbitrary edits specified by the text. Our work adopts this concept for fine-grained attribute editing. + +# 2.2. Model Diagnosis + +To the best of our knowledge, model diagnosis without a test set is a relatively unexplored problem. In the adversarial learning literature, it is common to find methods that show how image-space perturbations [4, 18] flip the model prediction; however, such perturbations lack visual interpretability. [36] pioneers in synthesizing adversaries by GANs. More recently, [9, 23, 26] propose generative methods to synthesize semantically perturbed images to visualize where the target model fails. However, their attribute editing is limited within the dataset's annotated labels. Instead, our framework allows users to easily customize their own attribute space, in which we visualize and quantify the biased factors that affect the model prediction. On the bias detection track, [13] co-trains a model-specific StyleGAN with each target model, and requires human annotators to name attribute coordinates in the Stylespace. [3, 14, 15] synthesize counterfactual images by either optimally traversing the latent space or learning an attribute hyperplane, after which the user will inspect the represented bias. Unlike previous work, we propose to diagnose a deep learning model without any model-specific re-training, new test sets, or manual annotations/inspections. + +# 3. Method + +This section firstly describes our method to generate counterfactual images guided by CLIP in a zero-shot manner. We then introduce how we perform the sensitivity analysis across attributes of interest. Fig. 2 shows the overview of our framework. + +# 3.1. Notation and Problem Definition + +Let $f_{\theta}$ , parameterized by $\theta$ , be the target model that we want to diagnose. In this paper, $f_{\theta}$ denotes two types of + +![](images/247dfc97e41f80fd7d1d2a86fd3efb21ea4c42cb07451c98651a328c990284e9.jpg) +Figure 2. The ZOOM framework. Black solid lines stand for forward passes, red dashed lines stand for backpropagation, and purple dashed lines stand for inference after the optimization converges. The user inputs single or multiple attributes, and we map them into edit directions with the method in Sec. 3.2. Then we assign to each edit direction (attribute) a weight, which represents how much we are adding/removing this attribute. We iteratively perform adversarial learning on the attribute space to maximize the counterfactual effectiveness. + +deep nets: binary attribute classifiers and face keypoint detectors. Note that our approach is extendable to any end-to-end differentiable target deep models. Let $\mathcal{G}_{\phi}$ , parameterized by $\phi$ , be the style generator that synthesizes images by $\mathbf{x} = \mathcal{G}_{\phi}(\mathbf{s})$ where $\mathbf{s}$ is the style vector in Style Space $S$ [34]. We denote a counterfactual image as $\hat{\mathbf{x}}$ , which is a synthesized image that misleads the target model $f_{\theta}$ , and denote the original reference image as $\mathbf{x}$ . $a$ is defined as a single user input text-based attribute, with its domain $\mathcal{A} = \{a_i\}_{i=1}^N$ for $N$ input attributes. $\hat{\mathbf{x}}$ and $\mathbf{x}$ differs only along attribute directions $\mathcal{A}$ . Given a set of $\{f_{\theta}, \mathcal{G}_{\phi}, \mathcal{A}\}$ , our goal is to perform counterfactual-based diagnosis to interpret where the model fails without manually collecting nor labeling any test set. Unlike traditional approaches of image-space noises which lack explainability to users, our method adversarially searches the counterfactual in the user-designed semantic space. To this end, our diagnosis will have three outputs, namely counterfactual images (Sec. 3.3), sensitivity histograms (Sec. 3.4), and distributionally robust models (Sec. 3.5). + +# 3.2. Extracting Edit Directions + +This section examines the terminologies, method, and modification we adopt in ZOOM to extract suitable global directions for attribute editing. Since CLIP has shown strong capability in disentangling visual representation [19], we incorporate style channel relevance from Style-CLIP [21] to find edit directions for each attribute. + +Given the user's input strings of attributes, we want to find an image manipulation direction $\Delta \mathbf{s}$ for any $\mathbf{s} \sim \mathcal{S}$ , such that the generated image $\mathcal{G}_{\phi}(\mathbf{s} + \Delta \mathbf{s})$ only varies in the input attributes. Recall that CLIP maps strings into a text embedding $\mathbf{t} \in \mathcal{T}$ , the text embedding space. For a string attribute description $a$ and a neutral prefix $p$ , we obtain the CLIP text embedding difference $\Delta \mathbf{t}$ by: + +$$ +\Delta \mathbf {t} = \operatorname {C L I P} _ {\text {t e x t}} (p \oplus a) - \operatorname {C L I P} _ {\text {t e x t}} (p) \tag {1} +$$ + +where $\oplus$ is the string concatenation operator. To take 'Eyeglasses' as an example, we can get $\Delta t = \mathrm{CLIP}_{\mathrm{text}}$ (a face with Eyeglasses) - $\mathrm{CLIP}_{\mathrm{text}}$ (a face). + +To get the edit direction, $\Delta \mathbf{s}$ , we need to utilize a style relevance mapper $\mathbf{M} \in \mathbb{R}^{c_S \times c_T}$ to map between the CLIP text embedding vectors of length $c_{\mathcal{T}}$ and the Style space vector of length $c_{\mathcal{S}}$ . StyleCLIP optimizes $\mathbf{M}$ by iteratively searching meaningful style channels: mutating each channel in $\mathcal{S}$ and encoding the mutated images by CLIP to assess whether there is a significant change in $\mathcal{T}$ space. To prevent undesired edits that are irrelevant to the user prompt, the edit direction $\Delta \mathbf{s}$ will filter out channels that the style value change is insignificant: + +$$ +\Delta \mathbf {s} = (\mathbf {M} \cdot \Delta \mathbf {t}) \odot \mathbb {1} ((\mathbf {M} \cdot \Delta \mathbf {t}) > \lambda), \tag {2} +$$ + +where $\lambda$ is the hyper-parameter for the threshold value. $\mathbb{1}(\cdot)$ is the indicator function, and $\odot$ is the element-wise product operator. Since the success of attribute editing by the extracted edit directions will be the key to our approach, Appendix A will show the capability of CLIP by visualizing the global edit direction on multiple sampled images, conducting the user study, and analyzing the effect of $\lambda$ . + +# 3.3. Style Counterfactual Synthesis + +Identifying semantic counterfactuals necessitates a manageable parametrization of the semantic space for effective exploration. For ease of notation, we denote $(\Delta \mathbf{s})_i$ as the global edit direction for $i^{th}$ attribute $a_i \in \mathcal{A}$ from the user input. After these $N$ attributes are provided and the edit directions are calculated, we initialize the control vectors $\mathbf{w}$ of length $N$ where the $i^{th}$ element $w_i$ controls the strength of the $i^{th}$ edit direction. Our counterfactual edit will be a linear combination of normalized edit directions: $\mathbf{s}_{edit} = \sum_{i=1}^{N} w_i \frac{(\Delta \mathbf{s})_i}{||(\Delta \mathbf{s})_i||}$ . + +The black arrows in Fig. 2 show the forward inference to synthesize counterfactual images. Given the parametriza + +tion of attribute editing strengths and the final loss value, our framework searches for counterfactual examples in the. +optimizable edit weight space. The original sampled image is $\mathbf{x} = G_{\phi}(\mathbf{s})$ , and the counterfactual image is + +$$ +\hat {\mathbf {x}} = G _ {\phi} (\mathbf {s} + \mathbf {s} _ {e d i t}) = G _ {\phi} \left(\mathbf {s} + \sum_ {i = 1} ^ {N} w _ {i} \frac {(\Delta \mathbf {s}) _ {i}}{| | (\Delta \mathbf {s}) _ {i} | |}\right), \tag {3} +$$ + +which is obtained by minimizing the following loss, $\mathcal{L}$ , that is the weighted sum of three terms: + +$$ +\mathcal {L} (\mathbf {s}, \mathbf {w}) = \alpha \mathcal {L} _ {\text {t a r g e t}} (\hat {\mathbf {x}}) + \beta \mathcal {L} _ {\text {s t r u c t}} (\hat {\mathbf {x}}) + \gamma \mathcal {L} _ {\text {a t t r}} (\hat {\mathbf {x}}). \tag {4} +$$ + +We back-propagate to optimize $\mathcal{L}$ w.r.t the weights of the edit directions $\mathbf{w}$ , shown as the red pipeline in Fig. 2. + +The targeted adversarial loss $\mathcal{L}_{target}$ for binary attribute classifiers minimizes the distance between the current model prediction $f_{\theta}(\hat{\mathbf{x}})$ with the flip of original prediction $\hat{p}_{cls} = 1 - f_{\theta}(\mathbf{x})$ . In the case of an eyeglass classifier on a person wearing eyeglasses, $\mathcal{L}_{target}$ will guide the optimization to search w such that the model predicts no eyeglasses. For a keypoint detector, the adversarial loss will minimize the distance between the model keypoint prediction with a set of random points $\hat{p}_{kp} \sim \mathcal{N}$ : + +(binary classifier) $\mathcal{L}_{target}(\hat{\mathbf{x}}) = L_{CE}(f_{\theta}(\hat{\mathbf{x}}),\hat{p}_{cls})$ (5) + +(keypoint detector) $\mathcal{L}_{target}(\hat{\mathbf{x}}) = L_{MSE}(f_{\theta}(\hat{\mathbf{x}}),\hat{p}_{kp})$ (6) + +If we only optimize $\mathcal{L}_{\text {target }}$ w.r.t the global edit directions, it is possible that the method will not preserve image statistics of the original image and can include the particular attribute that we are diagnosing. To constrain the optimization, we added a structural loss $\mathcal{L}_{\text {struct }}$ and an attribute consistency loss $\mathcal{L}_{\text {attr }}$ to avoid generation collapse. $\mathcal{L}_{\text {struct }}$ [32] aims to preserve global image statistics of the original image x including image contrasts, background, or shape identity during the adversarial editing. While $\mathcal{L}_{\text {attr }}$ enforces that the target attribute (perceived ground truth) be consistent on the style edits. For example, when diagnosing the eyeglasses classifier, ZOOM preserves the original status of eyeglasses and precludes direct eyeglasses addition/removal. + +$$ +\mathcal {L} _ {\text {s t r u c t}} (\hat {\mathbf {x}}) = L _ {\text {S S I M}} (\hat {\mathbf {x}}, \mathbf {x}) \tag {7} +$$ + +$$ +\mathcal {L} _ {\text {a t t r}} (\hat {\mathbf {x}}) = L _ {C E} \left(\operatorname {C L I P} (\hat {\mathbf {x}}), \operatorname {C L I P} (\mathbf {x})\right) \tag {8} +$$ + +Given a pretrained target model $f_{\theta}$ , a domain-specific style generator $G_{\phi}$ , and a text-driven attribute space $\mathcal{A}$ , our goal is to sample an original style vector $\mathbf{s}$ for each image and search its counterfactual edit strength $\hat{\mathbf{w}}$ : + +$$ +\hat {\mathbf {w}} = \underset {\mathbf {w}} {\operatorname {a r g m i n}} \mathcal {L} (\mathbf {s}, \mathbf {w}). \tag {9} +$$ + +Unless otherwise stated, we iteratively update $\mathbf{w}$ as: + +$$ +\mathbf {w} = \operatorname {c l a m p} _ {[ - \epsilon , \epsilon ]} (\mathbf {w} - \eta \nabla_ {\mathbf {w}} \mathcal {L}), \tag {10} +$$ + +where $\eta$ is the step size and $\epsilon$ is the clamp bound to avoid synthesis collapse caused by exaggerated edit. Note that the maximum counterfactual effectiveness does not indicate the maximum edit strength (i.e., $w_{i} = \epsilon$ ), since the attribute edit direction does not necessarily overlap with the target classifier direction. The attribute change is bi-directional, as the $w_{i}$ can be negative in Eq. 3. Details of using other optimization approaches (e.g., linear approximation [18]) will be discussed in Appendix C. + +# 3.4. Attribute Sensitivity Analysis + +Single-attribute counterfactual reflects the sensitivity of target model on the individual attribute. By optimizing independently along the edit direction for a single attribute and averaging the model probability changes over images, our model generates independent sensitivity score $h_i$ for each attribute $a_i$ : + +$$ +h _ {i} = \mathbb {E} _ {\mathbf {x} \sim \mathcal {P} (\mathbf {x}), \hat {\mathbf {x}} = \mathrm {Z O O M} (\mathbf {x}, a _ {i})} | f _ {\theta} (\mathbf {x}) - f _ {\theta} (\hat {\mathbf {x}}) |. \tag {11} +$$ + +The sensitivity score $h_i$ is the probability difference between the original image $\mathbf{x}$ and generated image $\hat{\mathbf{x}}$ , at the most counterfactual point when changing attribute $a_i$ . We synthesize a number of images from $\mathcal{G}_{\phi}$ , then iteratively compute the sensitivity for each given attribute, and finally normalize all sensitivities to draw the histogram as shown in Fig. 4. The histogram indicates the sensitivity of the evaluated model $f_{\theta}$ on each of the user-defined attributes. Higher sensitivity of one attribute means that the model is more easily affected by that attribute. + +# 3.5. Counterfactual Training + +The multi-attribute counterfactual approach visualizes semantic combinations that cause the model to falter, providing valuable insights for enhancing the model's robustness. We naturally adopt the concept of iterative adversarial training [18] to robustify the target model. For each iteration, ZOOM receives the target model parameter and returns a batch of mutated counterfactual images with the model's original predictions as labels. Then the target model will be trained on the counterfactually-augmented images to achieve the robust goal: + +$$ +\theta^ {*} = \underset {\theta} {\operatorname {a r g m i n}} \mathbb {E} _ {\mathbf {x} \sim \mathcal {P} (\mathbf {x}), \hat {\mathbf {x}} = \operatorname {Z O O M} (\mathbf {x}, A)} L _ {C E} \left(f _ {\theta} (\hat {\mathbf {x}}), f _ {\theta} (\mathbf {x})\right) \tag {12} +$$ + +where batches of $\mathbf{x}$ are randomly sampled from the StyleGAN generator $\mathcal{G}_{\phi}$ . In the following, we abbreviate the process as Counterfactual Training (CT). Note that, although not explicitly expressed in Eq. 12, the CT process is a min-max game. ZOOM synthesizes counterfactuals to maximize the variation of model prediction (while persevering the perceived ground truth), and the target model is learned with the counterfactual images to minimize the variation. + +![](images/382f5252f5eef69ed8ac56fa86f515853e05d632939c48c6660966221ace8272.jpg) +Open Mouth + +![](images/6e6f7765c0c6d9f1d2dfdc190d779bfe664e680f1fa60e96badfb38c36bde1f4.jpg) +$\frac{1}{2}x - 1 > 0$ + +![](images/d58c5d6850d439ce2439ccb870594aa331b04646e7cdef3bd0558f01e41331d5.jpg) +$\frac{1}{2}x - 1 > 0$ + +![](images/71d46012b365d52f94b9082a48b36a98cd58e025a583732997d1de6c2f945647.jpg) + +![](images/e52e69fb3e1a7bb5d2200ba25acd415cca0e9d8625a7a2ccf1e06fd40de18944.jpg) +Closed Mouth + +![](images/5998e6f16297004dcded0e568b7cc7fa1aeb1d4478fa0d868cedde8c94e8abb9.jpg) + +![](images/f8433beb32c5896e06a7aa642d6d2e15c4fb1518e7fd0631894bb952d4187d32.jpg) + +![](images/b6dd9a6086f785bfc6b1ac4c27d8833b5f9c5b1ab95041790dcf663b8f50f846.jpg) + +![](images/28703eb187d242dc891d55db71d5efddeceab3e8980a5c1277a0ce6afedee26e.jpg) + +![](images/7614e0fc2876e9a275a20e70f7ed9a9f2b21e3d09bda32999fac3b9d7e2e8d29.jpg) + +![](images/6a3a3474601bb128b509ebe8645c0463326834db7d238ec593ff709a98712b77.jpg) +Felidae Pupil +$\frac{3}{1} + u + {4q} = 1 + u + {uq}$ dH + +![](images/4d6861e565df844de868c6952bcbc37a40413ae51cdc59f8416074b34ef31c4e.jpg) +$\frac{3}{1} + u + {4q} = 1 + u + {uq}$ dH + +![](images/4d8a7695d44eb9f470f709190f28a206de0c1aab536a4b0437ec3bc92cf5bd11.jpg) +$\frac{3}{1} + u + {4q} = 1 + u + {uq}$ dH + +![](images/d057aae1bdbe74f1ef3ade2252fe5600868dacc63d10ffa80489d5108f2465cd.jpg) + +![](images/7f0ee7ecc4f01729ebc6c197b05497ab9e0d2c9ad1e1974529aaba7ff496842a.jpg) +Canidae Pupil + +![](images/950ca79c0a21d73328cb6c2141ef3a183badae2ab7c780a9fdaf31d8eff33e9a.jpg) + +![](images/4a3892a846507c7e850b5c42d892d2c92afc49f1ac4bfb0c9247b91e1e0e3782.jpg) + +![](images/fff7032c14f3dd2afb30d5725689aeb7224f00b1835d24d2086789fd848604f4.jpg) + +![](images/f4ee52e5caa6487a1c008a41983ab01976ff119b38dd1cfdc50448e4f2284da8.jpg) + +![](images/f01f0ae813036598b13cd750bae2ac75ae2ade78cfe29545fa92258a290b064b.jpg) + +![](images/fa8994a05b33e168543b91bca0c47422be0966914496d537e87c679df5e3861f.jpg) +Figure 3. Effect of progressively generating counterfactual images on (left) cat/dog classifier (0-Cat / 1-Dog), and (right) perceived age classifier (0-Senior / 1-Young). Model probability prediction during the process is attached at the top right corner. + +![](images/727877f509e7e5d67c5b05d7d903a72ee905c64a7b8dbdfad753de413e2325fe.jpg) + +![](images/b77fcf9e992f6c5a9b18b69d5691eb5072855ddebb3e5b44b5ca39ecb67ab12b.jpg) + +![](images/5600c547b9caf898459903860dd888f4bcb0a9050f29754b6b7151f43b1597df.jpg) + +![](images/1847122c4f8c41b0eb8a40cf92eecf152e7305a956b9ffb3b71751c5c69b6fd7.jpg) +(a) Model diagnosis histograms generated by ZOOM on four facial attribute classifiers. +(b) Model diagnosis histograms generated by ZOOM on four classifiers trained on manually-crafted imbalance data. +Figure 4. Model diagnosis histograms generated by ZOOM. The vertical axis values reflect the attribute sensitivities calculated by averaging the model probability change over all sampled images. The horizontal axis is the attribute space input by user. + +![](images/7aefa2cf30afb6773a8d83624c5cc365680c7a839dfb50d5f30e1c4037506cf3.jpg) + +![](images/3b29ecd3f3d5ddb487e1fec77c9ab7e51b89f73310a02eefdbed083bdd755f27.jpg) + +![](images/e39ef17850764b0a9307a0e37db6c248af616552d7b6550dc1ceacd5997a6f2f.jpg) + +# 4. Experimental Results + +This section describes the experimental validations on the effectiveness and reliability of ZOOM. First, we describe the model setup in Sec. 4.1. Sec. 4.2 and Sec. 4.3 visualize and validate the model diagnosis results for the single-attribute setting. In Sec. 4.4, we show results on synthesized multiple-attribute counterfactual images and apply them to counterfactual training. + +# 4.1. Model Setup + +Pre-trained models: We used Stylegan2-ADA [11] pretrained on FFHQ [12] and AFHQ [1] as our base generative networks, and the pre-trained CLIP model [24] which is parameterized by ViT-B/32. We followed StyleCLIP [21] setups to compute the channel relevance matrices $\mathcal{M}$ . + +Target models: Our classifier models are ResNet50 with single fully-connected head initialized by TorchVision1. In training the binary classifiers, we use the Adam optimizer with learning rate 0.001 and batch size 128. We train binary + +classifiers for Eyeglasses, Perceived Gender, Mustache, Perceived Age attributes on CelebA and for cat/dog classification on AFHQ. For the 98-keypoint detectors, we used the HR-Net architecture [31] on WFLW [33]. + +# 4.2. Visual Model Diagnosis: Single-Attribute + +Understanding where deep learning model fails is an essential step towards building trustworthy models. Our proposed work allows us to generate counterfactual images (Sec. 3.3) and provide insights on sensitivities of the target model (Sec. 3.4). This section visualizes the counterfactual images in which only one attribute is modified at a time. + +Fig. 3 shows the single-attribute counterfactual images. Interestingly (but not unexpectedly), we can see that reducing the hair length or joyfulness causes the age classifier more likely to label the face to an older person. Note that our approach is extendable to multiple domains, as we change the cat-like pupil to dog-like, the dog-cat classification tends towards the dog. Using the counterfactual images, we can conduct model diagnosis with the method mentioned in Sec. 3.4, on which attributes the model is sen + +sitive to. In the histogram generated in model diagnosis, a higher bar means the model is more sensitive toward the corresponding attribute. + +Fig. 4a shows the model diagnosis histograms on regularly-trained classifiers. For instance, the cat/dog classifier histogram shows outstanding sensitivity to green eyes and vertical pupil. The analysis is intuitive since these are cat-biased traits rarely observed in dog photos. Moreover, the histogram of eyeglasses classifier shows that the mutation on bushy eyebrows is more influential for flipping the model prediction. It potentially reveals the positional correlation between eyeglasses and bushy eyebrows. The advantage of single-attribute model diagnosis is that the score of each attribute in the histogram are independent from other attributes, enabling unambiguous understanding of exact semantics that make the model fail. Diagnosis results for additional target models can be found in Appendix B. + +# 4.3. Validation of Visual Model Diagnosis + +Evaluating whether our zero-shot sensitivity histograms (Fig. 4) explain the true vulnerability is a difficult task, since we do not have access to a sufficiently large and balanced test set fully annotated in an open-vocabulary setting. To verify the performance, we create synthetically imbalanced cases where the model bias is known. We then compare our results with a supervised diagnosis setting [17]. In addition, we will validate the decoupling of the attributes by CLIP. + +# 4.3.1 Creating imbalanced classifiers + +In order to evaluate whether our sensitivity histogram is correct, we train classifiers that are highly imbalanced towards a known attribute and see whether ZOOM can detect the sensitivity w.r.t the attribute. For instance, when training the perceived-age classifier (binarized as Young in CelebA), we created a dataset on which the trained classifier is strongly sensitive to Bangs (hair over forehead). The custom dataset is a CelebA training subset that consists of 20, 200 images. More specifically, there are 10,000 images that have both young people that have bangs, represented as (1, 1), and 10,000 images of people that are not young and have no bangs, represented as (0, 0). The remaining combinations of (1, 0) and (0, 1) have only 100 images. With this imbalanced dataset, bangs is the attribute that dominantly correlates with whether the person is young, and hence the perceived-age classifier would be highly sensitive towards bangs. See Fig. 5 (the right histograms) for an illustration of the sensitivity histogram computed by our method for the case of an age classifier with bangs (top) and lipstick (bottom) being imbalanced. + +We trained multiple imbalanced classifiers with this methodology, and visualize the model diagnosis histograms of these imbalanced classifiers in Fig. 4b. We can observe that the ZOOM histograms successfully detect the + +![](images/03c482988bb95c3fb2b913e7d04e7753f129604886046e43297446927fd39540.jpg) +Figure 5. The sensitivity of the age classifier is evaluated with ZOOM (right) and AttGAN (left), achieving comparable results. + +![](images/868104e74e862e5874b0fc8bdea2db78e9c3a4eddc9dca8eb7eac1b667dd688e.jpg) +(a) Mustache classifier +Figure 6. Confusion matrix of CLIP score variation (vertical axis) when perturbing attributes (horizontal axis). This shows that attributes in ZOOM are highly decoupled. + +![](images/dcbaf559d890ab0d776db1dfd7c533b9d30df76634298628b22e1c7d2c277833.jpg) +(b) Perceived age classifier + +synthetically-made bias, which are shown as the highest bars in histograms. See the caption for more information. + +# 4.3.2 Comparison with supervised diagnosis + +We also validated our histogram by comparing it with the case in which we have access to a generative model that has been explicitly trained to disentangle attributes. We follow the work on [17] and used AttGAN [6] trained on the CelebA training set over 15 attributes2. After the training converged, we performed adversarial learning in the attribute space of AttGAN and create a sensitivity histogram using the same approach as Sec. 3.4. Fig. 5 shows the result of this method on the perceived-age classifier which is made biased towards bangs. As anticipated, the AttGAN histogram (left) corroborates the histogram derived from our method (right). Interestingly, unlike ZOOM, AttGAN show less sensitivity to remaining attributes. This is likely + +![](images/971f72c88bb26074e4f23622c9671dc69ad2c5c8ef52b1de0e3abff7f432ed84.jpg) +Counterfactual Original + +![](images/59a77b820be44c6ef95e06c79ed435d4aea09f30e59a51a44334ce5918612f50.jpg) +Figure 7. Multi-attribute counterfactual in faces. The model probability is documented in the upper right corner of each image. + +![](images/f9405d19790d274a6d1931829f8e4e575bd3f7cc28b5941c5c136c6e6fe50c9f.jpg) + +![](images/0b74918e51861880fb6780c140d8792c3dd7328306aa4b189e381e22a0959b59.jpg) + +![](images/62d9231115f02f5bfbba46a4a4e0d5568e7376856e2196457f2c2cadb092c30d.jpg) + +![](images/95c9e54324f64ecb0c2b49a24c3925de7336d3b45bf0e60163eb774073f3797e.jpg) + +![](images/35b5f6e20e6167a44791b6cb91234ca74c030f5e4c36171689c395c8065cd4e6.jpg) + +![](images/97e45ff9d147e87912d810e4d276a4c527a7d1ad64ea900907f4c91ed774a842.jpg) + +![](images/da9051d7fff8dc80e0c62ad7ee8bb1f22aee7c66ceee600246b30ecd46b68ec9.jpg) + +![](images/fb66c5f6fd296b219f4e39383cf0d5e8611263f518e77b88dcc07bac8524b5fb.jpg) + +![](images/64ca890b2eb603a0c45fbe34384d65cb3f74b4fb1c434434a634b531abed726f.jpg) + +![](images/75a69bd63b6bb345d672a5f742d694288aff8578f206e84032f24c69b9a9b894.jpg) + +![](images/ca61c210042de2fc0e449802d99fb5d4d50854952d4b0d1c3b744eeb1bc247eb.jpg) + +![](images/946eed3c0bbd26dbe64a2cfba69b326b2847fd1f8cba97cbe6ac25e8917ff046.jpg) + +![](images/d123ff6aac734384ccd47ae0c44a41de25a370497a5d42d8c5f027dda49c9db5.jpg) + +![](images/0ef9843e612c9e58dda93b42683eadb3af0fd3f8500c8a006c5c82c974bc2501.jpg) + +![](images/117f9d63d2885bbdb0be3707734cbab387bc97bca6cd5b6a8ae619f0b9996fa3.jpg) + +![](images/f48d716b2acde35b9f94c46749523a8fb01e8260331bb184c25132e442f4c22e.jpg) + +![](images/c909bca5f6f918435a78f23e7f3cbf31b341cf4d888a1eecd8568c29216efa66.jpg) + +![](images/2e4a4d43d388288a2a74b5386a43481c22c06d07edd49bac51178d01e8a5a1bf.jpg) + +because AttGAN has a latent space learned in a supervised manner and hence attributes are better disentangled than with StyleGAN. Note that AttGAN is trained with a fixed set of attributes; if a new attribute of interest is introduced, the dataset needs to be re-labeled and AttGAN retrained. ZOOM, however, merely calls for the addition of a new text prompt. More results in Appendix B. + +# 4.3.3 Measuring disentanglement of attributes + +Previous works demonstrated that the StyleGAN's latent space can be entangled [2, 27], adding undesired dependencies when searching single-attribute counterfactuals. This section verifies that our framework can disentangle the attributes and mostly edit the desirable attributes. + +We use CLIP as a super annotator to measure attribute changes during single-attribute modifications. For 1,000 images, we record the attribute change after performing adversarial learning in each attribute, and average the attribute score change. Fig. 6 shows the confusion matrix during single-attribute counterfactual synthesis. The horizontal axis is the attribute being edited during the optimization, and the vertical axis represents the CLIP prediction changed by the process. For instance, the first column of Fig. 6a is generated when we optimize over bangs for the mustache classifier. We record the CLIP prediction variation. It clearly shows that bangs is the dominant attribute changing during the optimization. From the main diagonal of matrices, it is evident that the ZOOM mostly perturbs the attribute of interest. The results indicate reasonable disentanglement among attributes. + +# 4.4. Visual Model Diagnosis: Multi-Attributes + +In the previous sections, we have visualized and validated single-attribute model diagnosis histograms and counterfactual images. In this section, we will assess ZOOM's ability to produce counterfactual images by concurrently exploring multiple attributes within $\mathcal{A}$ , the domain of user-defined attributes. The approach conducts multi-attribute counterfactual searches across various edit directions, identifying distinct semantic combinations that result in the target model's failure. By doing so, we can effectively create more powerful counterfactuals images (see Fig. 9). + +![](images/d63f194e849545ee4d327760565ffb442da574671e4f852701c57f64f75ccd06.jpg) + +![](images/2a8b19bda7e3c098f3b7515ecb4c437e82aff219568f13b46ca980369ed954b0.jpg) +Figure 8. Multi-attribute counterfactual on Cat/Dog classifier. The number in each image is the predicted probability of being a dog. + +![](images/97575becca7c2b0a58f05e7bd3efea1ce8f805407f9a9a8bc5b0b8fa133de821.jpg) +Cat / Dog Classifier (0-Cat / 1-Dog) + +![](images/9f015960384356c923f938a96ad155d7920e2f40848154f6927b5ca61d80c9da.jpg) + +![](images/9dc79a7da6f29d71c72a5adf20673275f04537bf9eea485fde0a25d54eae61fd.jpg) + +![](images/b0ef6e22515f9fbe3fe524912b72ea323ef911ac09d0c03ab0e227513c25dffc.jpg) + +![](images/eacf43272b02d9d8200813f9d64795ddfee3402d5fde9196f88ee01631e050d5.jpg) + +![](images/00c3b77821922449ec954cf2a408c8a712da33871a82ab4f911ece894aeb4d58.jpg) + +![](images/73ef0f2cf59bc1d4b27d02b70df889a8d0b602bd88ce7e79c4b3b4e6e43cb0e9.jpg) + +![](images/44bba462e0e42241c95319ec2b01d11c8a422c41b0ce523d8f3136a9bc721633.jpg) + +![](images/93e5220bf68cbf225c272fed33def41a45c1fad35b0f715590fc34da455c23ed.jpg) + +![](images/b8335552498ed460f4d22b2fd05b6d4609a5b26d9c180d8d4bc002a607f780d4.jpg) + +![](images/5d137f7d3aa9d683b7868c27d6b50d55214821f413f2a649b34ec317e8ccc450.jpg) +Original Reference + +![](images/cd94d5ea6748597fb9c03d4f85d7fe91a324a817224d3587d29b461780e9954f.jpg) +SAC by Beard + +![](images/ef8f24a423832c377b502751ea1a6ec5c02870f6c9bf6f57ca957e36e35ccf1e.jpg) +SAC by Pale Skin + +![](images/14f558865c5e0601cdaa01a946868a1455678b57180ec80975d4ed1ececa0a91.jpg) +SAC by Black Hair + +![](images/d503aca254f8fbd0ba1ddfc316957c35f64f0e256656c8f21634b487f29c57bd.jpg) +Multiple-Attribute + +![](images/4175e75073b501fe5f39001245d583575431f4cbce9915be62cc70647315c29c.jpg) +Original Reference + +![](images/5d65bb23276b3a3c12c60e02c786251d3ba8fc8c89d311fea013e13ecf6352fa.jpg) +SAC by Lips Color + +![](images/3fa0538d80ef39f3390920970221e72096153f9028e5e434697ae973a6488b8f.jpg) +SAC by Smiling + +![](images/1fa5c86fcdf03b6475a88a8351d7ebf59208fccfc5f637b753b80fc3793cd0d7.jpg) +SAC by Bangs + +![](images/06ab8a56e1dd662eb7212a310fc18e69a77dde02e44ad7df8ead56fe5f2145de.jpg) +Multiple-Attribute +Figure 9. Multiple-Attribute Counterfactual (MAC, Sec. 4.4) compared with Single-Attribute Counterfactual (SAC, Sec. 4.2). We can see that optimization along multiple directions enable the generation of more powerful counterfactuals. + +Fig. 7 and Fig. 8 show examples of multi-attribute counterfactual images generated by ZOOM, against human and animal face classifiers. It can be observed that multiple face attributes such as lipsticks or hair color are edited in Fig. 7, and various cat/dog attributes like nose pinkness, eye shape, and fur patterns are edited in Fig. 8. These attribute edits are blended to affect the target model prediction. Appendix B further illustrates ZOOM counterfactual images for semantic segmentation, multi-class classification, and a church classifier. By mutating semantic representations, ZOOM reveals semantic combinations as outliers where the target model underfits. + +In the following sections, we will use the Flip Rate (the percentage of counterfactuals that flipped the model prediction) and Flip Resistance (the percentage of counterfactuals for which the model successfully withheld its prediction) to evaluate the multi-attribute setting. + +![](images/3a31fe81e15c74b575c6093e46f0834299f52fbd05a5297a4af77ec9e568073e.jpg) +(a) Sensitivity histograms generated by ZOOM on attribute combinations. + +![](images/b0c505f7748d04f811c5d12aed9bbf43c879e1b7ff0a3273c81dba77b6e38c5b.jpg) + +![](images/0801f88cca854414287012d32b85e15604c661f7e8ca63dfc868dc3e24766f24.jpg) +(b) Model diagnosis by ZOOM over 19 attributes. Our framework is generalizable to analyze facial attributes of various domains. +Figure 10. Customizing attribute space for ZOOM. + +# 4.4.1 Customizing attribute space + +In some circumstances, users may finish one round of model diagnosis and proceed to another round by adding new attributes, or trying a new attribute space. The linear nature of attribute editing (Eq. 3) in ZOOM makes it possible to easily add or remove attributes. Table 1 shows the flip rates results when adding new attributes into $\mathcal{A}$ for perceived age classifier and big lips classifier. We can observe that a different attribute space will result in different effectiveness of counterfactual images. Also, increasing the search iteration will make counterfactual more effective (see last row). Note that neither re-training the StyleGAN nor user-collection/labeling of data is required at any point in this procedure. Moreover, Fig. 10a shows the model diagnosis histograms generated with combinations of two attributes. Fig. 10b demonstrates the capability of ZOOM in a rich vocabulary setting where we can analyze attributes that are not labeled in existing datasets [16, 29]. + +# 4.4.2 Counterfactual training results + +This section evaluates regular classifiers trained on CelebA [16] and counterfactually-trained (CT) classifiers on a mix of CelebA data and counterfactual images as described in Sec. 3.5. Table 2 presents accuracy and flip resistance (FR) results. CT outperforms the regular classifier. FR is assessed over 10,000 counterfactual images, with FR-25 and FR-100 denoting Flip Resistance after 25 and 100 optimization iterations, respectively. Both use $\eta = 0.2$ and $\epsilon = 30$ . We can observe that the classifiers after CT are way less likely to be flipped by counterfactual images while maintaining a decent accuracy on the CalebA testset. Our approach robustifies the model by increasing the tolerance toward counterfactuals. Note that CT slightly improves the CelebA classifier when trained on a mixture of CelebA images (original images) and the counterfactual images generated with a generative model trained in the FFHQ [12] images (different domain). + +
MethodAC Flip Rate (%)BC Flip Rate (%)
Initialize ZOOM by A61.9583.47
+ Attribute: Beard72.0890.07
+ Attribute: Smiling87.4796.27
+ Attribute: Lipstick90.9694.07
+ Iterations increased to 20092.9194.87
+ +Table 1. Model flip rate study. The initial attribute space $\mathcal{A} =$ {Bangs, Blond Hair, Bushy Eyebrows, Pale Skin, Pointy Nose}. AC is the perceived age classifier and BC is the big lips classifier. + +
AttributeMetricRegular (%)CT (Ours) (%)
Perceived AgeCelebA Accuracy86.1086.29
ZOOM FR-2519.5497.36
ZOOM FR-1009.0495.65
Big LipsCelebA Accuracy74.3675.39
ZOOM FR-2514.1299.19
ZOOM FR-1005.9388.91
+ +Table 2. Results of network inference on CelebA original images and ZOOM-generated counterfactual. The CT classifier is significantly less prone to be flipped by counterfactual images, while test accuracy on CelebA remains performant. + +# 5. Conclusion and Discussion + +In this paper, we present ZOOM, a zero-shot model diagnosis framework that generates sensitivity histograms based on user's input of natural language attributes. ZOOM assesses failures and generates corresponding sensitivity histograms for each attribute. A significant advantage of our technique is its ability to analyze the failures of a target deep model without the need for laborious collection and annotation of test sets. ZOOM effectively visualizes the correlation between attributes and model outputs, elucidating model behaviors and intrinsic biases. + +Our work has three primary limitations. First, users should possess domain knowledge as their input (text of attributes of interest) should be relevant to the target domain. Recall that it is a small price to pay for model evaluation without an annotated test set. Second, StyleGAN2-ADA struggles with generating out-of-domain samples. Nevertheless, our adversarial learning framework can be adapted to other generative models (e.g., stable diffusion), and the generator can be improved by training on more images. We have rigorously tested our generator with various user inputs, confirming its effectiveness for regular diagnosis requests. Currently, we are exploring stable diffusion models to generate a broader range of classes while maintaining the core concept. Finally, we rely on a pre-trained model like CLIP which we presume to be free of bias and capable of encompassing all relevant attributes. + +Acknowledgements: We would like to thank George Cazenavette, Tianyuan Zhang, Yinong Wang, Hanzhe Hu, Bharath Raj for suggestions in the presentation and experiments. We sincerely thank Ken Ziyu Liu, Jiashun Wang, Bowen Li, and Ce Zheng for revisions to improve this work. + +# References + +[1] Yunjey Choi, Youngjung Uh, Jaejun Yoo, and Jung-Woo Ha. StarGAN v2: Diverse Image Synthesis for Multiple Domains. In CVPR, 2020. +[2] Edo Collins, Raja Bala, Bob Price, and Sabine Susstrunk. Editing in Style: Uncovering the Local Semantics of GANs. In CVPR, 2020. +[3] Emily Denton and Ben Hutchinson and Margaret Mitchell and Timnit Gebru and Andrew Zaldivar. Image counterfactual sensitivity analysis for detecting unintended bias. arXiv preprint arXiv:1906.06439, 2019. +[4] Ian J. Goodfellow, Jonathon Shlens, and Christian Szegedy. Explaining and Harnessing Adversarial Examples. 2014. +[5] Yash Goyal, Ziyan Wu, Jan Ernst, Dhruv Batra, Devi Parikh, and Stefan Lee. Counterfactual Visual Explanations. In ICML, 2019. +[6] Z. He, W. Zuo, M. Kan, S. Shan, and X. Chen. AttGAN: Facial Attribute Editing by Only Changing What You Want. In IEEE TIP, 2019. +[7] Fangzhou Hong, Mingyuan Zhang, Liang Pan, Zhongang Cai, Lei Yang, and Ziwei Liu. AvatarCLIP: Zero-Shot Text-Driven Generation and Animation of 3D Avatars. In ACM TOG, 2022. +[8] Erik Härkönen, Aaron Hertzmann, Jaakko Lehtinen, and Sylvain Paris. GANSpace: Discovering Interpretable GAN Controls. In NeurIPS, 2020. +[9] Ameya Joshi, Amitangshu Mukherjee, Soumik Sarkar, and Chinmay Hegde. Semantic Adversarial Attacks: Parametric Transformations That Fool Deep Classifiers. In ICCV, 2019. +[10] Kimmo Karkkainen and Jungseock Joo. FairFace: Face Attribute Dataset for Balanced Race, Gender, and Age for Bias Measurement and Mitigation. In WACV, 2021. +[11] Tero Karras, Miika Aittala, Janne Hellsten, Samuli Laine, Jaakko Lehtinen, and Timo Aila. Training Generative Adversarial Networks with Limited Data. In NeurIPS, 2020. +[12] Tero Karras, Samuli Laine, and Timo Aila. A Style-Based Generator Architecture for Generative Adversarial Networks. In CVPR, 2019. +[13] Oran Lang, Yossi Gandelsman, Michal Yarom, Yoav Wald, Gal Elidan, Avinatan Hassidim, William T. Freeman, Phillip Isola, Amir Globerson, Michal Irani, and Inbar Mosseri. Explaining in Style: Training a GAN To Explain a Classifier in StyleSpace. In ICCV, 2021. +[14] Bo Li, Qiulin Wang, Jiquan Pei, Yu Yang, and Xiangyang Ji. Which Style Makes Me Attractive? Interpretable Control Discovery and Counterfactual Explanation on StyleGAN. arXiv preprint arXiv:2201.09689, 2022. +[15] Zhiheng Li and Chenliang Xu. Discover the Unknown Biased Attribute of an Image Classifier. In ICCV, 2021. +[16] Ziwei Liu, Ping Luo, Xiaogang Wang, and Xiaou Tang. Deep Learning Face Attributes in the Wild. In ICCV, 2015. +[17] Jinqi Luo, Zhaoning Wang, Chen Henry Wu, Dong Huang, and Fernando De la Torre. Semantic image attack for visual model diagnosis. arXiv preprint arXiv:2303.13010, 2023. +[18] Aleksander Madry, Aleksandar Makelov, Ludwig Schmidt, Dimitris Tsipras, and Adrian Vladu. Towards Deep Learning Models Resistant to Adversarial Attacks. In ICLR, 2018. + +[19] Joanna Materzynska, Antonio Torralba, and David Bau. Disentangling Visual and Written Concepts in CLIP. In CVPR, 2022. +[20] Ramaravind K. Mothilal, Amit Sharma, and Chenhao Tan. Explaining Machine Learning Classifiers through Diverse Counterfactual Explanations. In ACM FAccT, 2020. +[21] Or Patashnik, Zongze Wu, Eli Shechtman, Daniel Cohen-Or, and Dani Lischinski. StyleCLIP: Text-Driven Manipulation of StyleGAN Imagery. In ICCV, 2021. +[22] Ben Poole, Ajay Jain, Jonathan T. Barron, and Ben Mildenhall. DreamFusion: Text-to-3D using 2D Diffusion. arXiv preprint arXiv:2209.14988, 2022. +[23] Haonan Qiu, Chaowei Xiao, Lei Yang, Xinchen Yan, Honglak Lee, and Bo Li. SemanticAdv: Generating Adversarial Examples via Attribute-conditioned Image Editing. In ECCV, 2020. +[24] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning Transferable Visual Models From Natural Language Supervision. In ICML, 2021. +[25] Vikram V. Ramaswamy, Sunnie S. Y. Kim, and Olga Russakovsky. Fair Attribute Classification Through Latent Space De-Biasing. In CVPR, 2021. +[26] Axel Sauer and Andreas Geiger. Counterfactual Generative Networks. In ICLR, 2021. +[27] Yujun Shen, Ceyuan Yang, Xiaou Tang, and Bolei Zhou. InterFaceGAN: Interpreting the Disentangled Face Representation Learned by GANs. In IEEE TPAMI, 2020. +[28] Yujun Shen and Bolei Zhou. Closed-Form Factorization of Latent Semantics in GANs. In CVPR, 2021. +[29] Philipp Terhörst, Daniel Fährmann, Jan Niklas Kolf, Naser Damer, Florian Kirchbuchner, and Arjan Kuijper. MAAD-Face: A Massively Annotated Attribute Dataset for Face Images. In IEEE TIFS, 2021. +[30] Can Wang, Menglei Chai, Mingming He, Dongdong Chen, and Jing Liao. CLIP-NeRF: Text-and-Image Driven Manipulation of Neural Radiance Fields. In CVPR, 2022. +[31] Jingdong Wang, Ke Sun, Tianheng Cheng, Borui Jiang, Chaorui Deng, Yang Zhao, Dong Liu, Yadong Mu, Mingkui Tan, Xinggang Wang, Wenyu Liu, and Bin Xiao. Deep High-Resolution Representation Learning for Visual Recognition. In IEEE TPAMI, 2019. +[32] Zhou Wang, A.C. Bovik, H.R. Sheikh, and E.P. Simoncelli. Image Quality Assessment: from Error Visibility to Structural Similarity. In IEEE TIP, 2004. +[33] Wayne Wu, Chen Qian, Shuo Yang, Quan Wang, Yici Cai, and Qiang Zhou. Look at Boundary: A Boundary-Aware Face Alignment Algorithm. In CVPR, 2018. +[34] Zongze Wu, Dani Lischinski, and Eli Shechtman. StyleSpace Analysis: Disentangled Controls for StyleGAN Image Generation. In CVPR, 2021. +[35] Weihao Xia, Yulun Zhang, Yujiu Yang, Jing-Hao Xue, Bolei Zhou, and Ming-Hsuan Yang. GAN Inversion: A Survey. In IEEE TPAMI, 2022. + +[36] Chaowei Xiao, Bo Li, Jun-yan Zhu, Warren He, Mingyan Liu, and Dawn Song. Generating Adversarial Examples with Adversarial Networks. In *IJCAI*, 2018. +[37] Mingyuan Zhang, Zhongang Cai, Liang Pan, Fangzhou Hong, Xinying Guo, Lei Yang, and Ziwei Liu. MotionDiffuse: Text-Driven Human Motion Generation with Diffusion Model. arXiv preprint arXiv:2208.15001, 2022. \ No newline at end of file diff --git a/2023/Zero-Shot Model Diagnosis/images.zip b/2023/Zero-Shot Model Diagnosis/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..d006858337f9fbc907cabcaf2c5c1a03b749e367 --- /dev/null +++ b/2023/Zero-Shot Model Diagnosis/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2fadd0e06a2e68242e06ad607b5324622796cac7cd95ac83d9feda3105a8f7c5 +size 674728 diff --git a/2023/Zero-Shot Model Diagnosis/layout.json b/2023/Zero-Shot Model Diagnosis/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..a2cb0c0ec2f742182d9daeef61e700e670caad11 --- /dev/null +++ b/2023/Zero-Shot Model Diagnosis/layout.json @@ -0,0 +1,12258 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 212, + 103, + 381, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 212, + 103, + 381, + 121 + ], + "spans": [ + { + "bbox": [ + 212, + 103, + 381, + 121 + ], + "type": "text", + "content": "Zero-shot Model Diagnosis" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 59, + 143, + 112, + 158 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 143, + 112, + 158 + ], + "spans": [ + { + "bbox": [ + 59, + 143, + 112, + 158 + ], + "type": "text", + "content": "Jinqi Luo*" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 143, + 215, + 158 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 143, + 215, + 158 + ], + "spans": [ + { + "bbox": [ + 132, + 143, + 215, + 158 + ], + "type": "text", + "content": "Zhaoning Wang*" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 235, + 144, + 315, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 235, + 144, + 315, + 157 + ], + "spans": [ + { + "bbox": [ + 235, + 144, + 315, + 157 + ], + "type": "text", + "content": "Chen Henry Wu" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 340, + 144, + 404, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 340, + 144, + 404, + 157 + ], + "spans": [ + { + "bbox": [ + 340, + 144, + 404, + 157 + ], + "type": "text", + "content": "Dong Huang" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 429, + 144, + 533, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 429, + 144, + 533, + 156 + ], + "spans": [ + { + "bbox": [ + 429, + 144, + 533, + 156 + ], + "type": "text", + "content": "Fernando De la Torre" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 228, + 159, + 364, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 228, + 159, + 364, + 171 + ], + "spans": [ + { + "bbox": [ + 228, + 159, + 364, + 171 + ], + "type": "text", + "content": "Carnegie Mellon University" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 146, + 174, + 443, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 146, + 174, + 443, + 185 + ], + "spans": [ + { + "bbox": [ + 146, + 174, + 443, + 185 + ], + "type": "text", + "content": "{jinqil, zhaoning, chenwu2, dghuang, ftorre}@cs.cmu.edu" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 143, + 213, + 192, + 225 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 213, + 192, + 225 + ], + "spans": [ + { + "bbox": [ + 143, + 213, + 192, + 225 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 45, + 229, + 287, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 229, + 287, + 361 + ], + "spans": [ + { + "bbox": [ + 45, + 229, + 287, + 361 + ], + "type": "text", + "content": "When it comes to deploying deep vision models, the behavior of these systems must be explicable to ensure confidence in their reliability and fairness. A common approach to evaluate deep learning models is to build a labeled test set with attributes of interest and assess how well it performs. However, creating a balanced test set (i.e., one that is uniformly sampled over all the important traits) is often time-consuming, expensive, and prone to mistakes. The question we try to address is: can we evaluate the sensitivity of deep learning models to arbitrary visual attributes without an annotated test set?" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 45, + 361, + 288, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 361, + 288, + 540 + ], + "spans": [ + { + "bbox": [ + 45, + 361, + 288, + 540 + ], + "type": "text", + "content": "This paper argues the case that Zero-shot Model Diagnosis (ZOOM) is possible without the need for a test set nor labeling. To avoid the need for test sets, our system relies on a generative model and CLIP. The key idea is enabling the user to select a set of prompts (relevant to the problem) and our system will automatically search for semantic counterfactual images (i.e., synthesized images that flip the prediction in the case of a binary classifier) using the generative model. We evaluate several visual tasks (classification, key-point detection, and segmentation) in multiple visual domains to demonstrate the viability of our methodology. Extensive experiments demonstrate that our method is capable of producing counterfactual images and offering sensitivity analysis for model diagnosis without the need for a test set." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 554, + 127, + 566 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 554, + 127, + 566 + ], + "spans": [ + { + "bbox": [ + 47, + 554, + 127, + 566 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 46, + 574, + 287, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 574, + 287, + 694 + ], + "spans": [ + { + "bbox": [ + 46, + 574, + 287, + 694 + ], + "type": "text", + "content": "Deep learning models inherit data biases, which can be accentuated or downplayed depending on the model's architecture and optimization strategy. Deploying a computer vision deep learning model requires extensive testing and evaluation, with a particular focus on features with potentially dire social consequences (e.g., non-uniform behavior across gender or ethnicity). Given the importance of the problem, it is common to collect and label large-scale datasets to evaluate the behavior of these models across attributes of interest. Unfortunately, collecting these test" + } + ] + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 309, + 213, + 539, + 361 + ], + "blocks": [ + { + "bbox": [ + 309, + 213, + 539, + 361 + ], + "lines": [ + { + "bbox": [ + 309, + 213, + 539, + 361 + ], + "spans": [ + { + "bbox": [ + 309, + 213, + 539, + 361 + ], + "type": "image", + "image_path": "3c343307fb18c4ea29ef25ad4087d7b7663b31d9277a7b39fe95b2f82af49943.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 370, + 545, + 437 + ], + "lines": [ + { + "bbox": [ + 304, + 370, + 545, + 437 + ], + "spans": [ + { + "bbox": [ + 304, + 370, + 545, + 437 + ], + "type": "text", + "content": "Figure 1. Given a differentiable deep learning model (e.g., a cat/dog classifier) and user-defined text attributes, how can we determine the model's sensitivity to specific attributes without using labeled test data? Our system generates counterfactual images (bottom right) based on the textual directions provided by the user, while also computing the sensitivity histogram (top right)." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 449, + 545, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 449, + 545, + 567 + ], + "spans": [ + { + "bbox": [ + 304, + 449, + 545, + 567 + ], + "type": "text", + "content": "datasets is extremely time-consuming, error-prone, and expensive. Moreover, a balanced dataset, that is uniformly distributed across all attributes of interest, is also typically impractical to acquire due to its combinatorial nature. Even with careful metric analysis in this test set, no robustness nor fairness can be guaranteed since there can be a mismatch between the real and test distributions [25]. This research will explore model diagnosis without relying on a test set in an effort to democratize model diagnosis and lower the associated cost." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 570, + 546, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 570, + 546, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 570, + 546, + 713 + ], + "type": "text", + "content": "Counterfactual explainability as a means of model diagnosis is drawing the community's attention [5,20]. Counterfactual images visualize the sensitive factors of an input image that can influence the model's outputs. In other words, counterfactuals answer the question: \"How can we modify the input image " + }, + { + "bbox": [ + 304, + 570, + 546, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 304, + 570, + 546, + 713 + ], + "type": "text", + "content": " (while fixing the ground truth) so that the model prediction would diverge from " + }, + { + "bbox": [ + 304, + 570, + 546, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{y}" + }, + { + "bbox": [ + 304, + 570, + 546, + 713 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 304, + 570, + 546, + 713 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{y}}" + }, + { + "bbox": [ + 304, + 570, + 546, + 713 + ], + "type": "text", + "content": "?\". The parameterization of such counterfactuals will provide insights into identifying key factors of where the model fails. Unlike existing image-space adversary techniques [4,18], counterfactuals provide semantic perturbations that are interpretable by humans. However, existing counterfactual studies re" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 702, + 126, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 702, + 126, + 712 + ], + "spans": [ + { + "bbox": [ + 58, + 702, + 126, + 712 + ], + "type": "text", + "content": "*Equal contribution." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "11631" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 286, + 108 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 286, + 108 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 286, + 108 + ], + "type": "text", + "content": "require the user to either collect uniform test sets [10], annotate discovered bias [15], or train a model-specific explanation every time the user wants to diagnose a new model [13]." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 108, + 286, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 108, + 286, + 262 + ], + "spans": [ + { + "bbox": [ + 46, + 108, + 286, + 262 + ], + "type": "text", + "content": "On the other hand, recent advances in Contrastive Language-Image Pretraining (CLIP) [24] can help to overcome the above challenges. CLIP enables text-driven applications that map user text representations to visual manifolds for downstream tasks such as avatar generation [7], motion generation [37] or neural rendering [22, 30]. In the domain of image synthesis, StyleCLIP [21] reveals that text-conditioned optimization in the StyleGAN [12] latent space can decompose latent directions for image editing, allowing for the mutation of a specific attribute without disturbing others. With such capability, users can freely edit semantic attributes conditioned on text inputs. This paper further explores its use in the scope of model diagnosis." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 263, + 286, + 466 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 263, + 286, + 466 + ], + "spans": [ + { + "bbox": [ + 46, + 263, + 286, + 466 + ], + "type": "text", + "content": "The central concept of the paper is depicted in Fig. 1. Consider a user interested in evaluating which factors contribute to the lack of robustness in a cat/dog classifier (target model). By selecting a list of keyword attributes, the user is able to (1) see counterfactual images where semantic variations flip the target model predictions (see the classifier score in the top-right corner of the counterfactual images) and (2) quantify the sensitivity of each attribute for the target model (see sensitivity histogram on the top). Instead of using a test set, we propose using a StyleGAN generator as the picture engine for sampling counterfactual images. CLIP transforms user's text input, and enables model diagnosis in an open-vocabulary setting. This is a major advantage since there is no need for collecting and annotating images and minimal user expert knowledge. In addition, we are not tied to a particular annotation from datasets (e.g., specific attributes in CelebA [16])." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 467, + 286, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 467, + 286, + 491 + ], + "spans": [ + { + "bbox": [ + 47, + 467, + 286, + 491 + ], + "type": "text", + "content": "To summarize, our proposed work offers three major improvements over earlier efforts:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 58, + 495, + 286, + 654 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 58, + 495, + 286, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 495, + 286, + 555 + ], + "spans": [ + { + "bbox": [ + 58, + 495, + 286, + 555 + ], + "type": "text", + "content": "- The user requires neither a labeled, balanced test dataset, and minimal expert knowledge in order to evaluate where a model fails (i.e., model diagnosis). In addition, the method provides a sensitivity histogram across the attributes of interest." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 58, + 563, + 286, + 599 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 563, + 286, + 599 + ], + "spans": [ + { + "bbox": [ + 58, + 563, + 286, + 599 + ], + "type": "text", + "content": "- When a different target model or a new user-defined attribute space is introduced, it is not necessary to retrain our system, allowing for practical use." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 58, + 606, + 286, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 606, + 286, + 654 + ], + "spans": [ + { + "bbox": [ + 58, + 606, + 286, + 654 + ], + "type": "text", + "content": "- The target model fine-tuned with counterfactual images not only slightly improves the classification performance, but also greatly increases the distributional robustness against counterfactual images." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 47, + 669, + 133, + 681 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 669, + 133, + 681 + ], + "spans": [ + { + "bbox": [ + 47, + 669, + 133, + 681 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 689, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 286, + 713 + ], + "type": "text", + "content": "This section reviews prior work on attribute editing with generative models and recent efforts on model diagnosis." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 72, + 524, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 72, + 524, + 84 + ], + "spans": [ + { + "bbox": [ + 306, + 72, + 524, + 84 + ], + "type": "text", + "content": "2.1. Attribute Editing with Generative Models" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 87, + 545, + 291 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 87, + 545, + 291 + ], + "spans": [ + { + "bbox": [ + 304, + 87, + 545, + 291 + ], + "type": "text", + "content": "With recent progress in generative models, GANs supports high-quality image synthesis, as well as semantic attributes editing [35]. [1, 6] edit the images by perturbing the intermediate latent space encoded from the original images. These methods rely on images to be encoded to latent vectors to perform attribute editing. On the contrary, StyleGAN [12] can produce images by sampling the latent space. Many works have explored ways to edit attributes in the latent space of StyleGAN, either by relying on image annotations [27] or in an unsupervised manner [8, 28]. StyleSpace [34] further disentangles the latent space of StyleGAN and can perform specific attribute edits by disentangled style vectors. Based upon StyleSpace, StyleCLIP [21] builds the connection between the CLIP language space and StyleGAN latent space to enable arbitrary edits specified by the text. Our work adopts this concept for fine-grained attribute editing." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 297, + 406, + 309 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 297, + 406, + 309 + ], + "spans": [ + { + "bbox": [ + 306, + 297, + 406, + 309 + ], + "type": "text", + "content": "2.2. Model Diagnosis" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 316, + 545, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 316, + 545, + 578 + ], + "spans": [ + { + "bbox": [ + 304, + 316, + 545, + 578 + ], + "type": "text", + "content": "To the best of our knowledge, model diagnosis without a test set is a relatively unexplored problem. In the adversarial learning literature, it is common to find methods that show how image-space perturbations [4, 18] flip the model prediction; however, such perturbations lack visual interpretability. [36] pioneers in synthesizing adversaries by GANs. More recently, [9, 23, 26] propose generative methods to synthesize semantically perturbed images to visualize where the target model fails. However, their attribute editing is limited within the dataset's annotated labels. Instead, our framework allows users to easily customize their own attribute space, in which we visualize and quantify the biased factors that affect the model prediction. On the bias detection track, [13] co-trains a model-specific StyleGAN with each target model, and requires human annotators to name attribute coordinates in the Stylespace. [3, 14, 15] synthesize counterfactual images by either optimally traversing the latent space or learning an attribute hyperplane, after which the user will inspect the represented bias. Unlike previous work, we propose to diagnose a deep learning model without any model-specific re-training, new test sets, or manual annotations/inspections." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 588, + 361, + 600 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 588, + 361, + 600 + ], + "spans": [ + { + "bbox": [ + 306, + 588, + 361, + 600 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 605, + 545, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 605, + 545, + 664 + ], + "spans": [ + { + "bbox": [ + 304, + 605, + 545, + 664 + ], + "type": "text", + "content": "This section firstly describes our method to generate counterfactual images guided by CLIP in a zero-shot manner. We then introduce how we perform the sensitivity analysis across attributes of interest. Fig. 2 shows the overview of our framework." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 671, + 481, + 682 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 671, + 481, + 682 + ], + "spans": [ + { + "bbox": [ + 306, + 671, + 481, + 682 + ], + "type": "text", + "content": "3.1. Notation and Problem Definition" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "inline_equation", + "content": "f_{\\theta}" + }, + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "text", + "content": ", parameterized by " + }, + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "text", + "content": ", be the target model that we want to diagnose. In this paper, " + }, + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "inline_equation", + "content": "f_{\\theta}" + }, + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "text", + "content": " denotes two types of" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "11632" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 69, + 68, + 525, + 205 + ], + "blocks": [ + { + "bbox": [ + 69, + 68, + 525, + 205 + ], + "lines": [ + { + "bbox": [ + 69, + 68, + 525, + 205 + ], + "spans": [ + { + "bbox": [ + 69, + 68, + 525, + 205 + ], + "type": "image", + "image_path": "247dfc97e41f80fd7d1d2a86fd3efb21ea4c42cb07451c98651a328c990284e9.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 216, + 547, + 262 + ], + "lines": [ + { + "bbox": [ + 46, + 216, + 547, + 262 + ], + "spans": [ + { + "bbox": [ + 46, + 216, + 547, + 262 + ], + "type": "text", + "content": "Figure 2. The ZOOM framework. Black solid lines stand for forward passes, red dashed lines stand for backpropagation, and purple dashed lines stand for inference after the optimization converges. The user inputs single or multiple attributes, and we map them into edit directions with the method in Sec. 3.2. Then we assign to each edit direction (attribute) a weight, which represents how much we are adding/removing this attribute. We iteratively perform adversarial learning on the attribute space to maximize the counterfactual effectiveness." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "spans": [ + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "text", + "content": "deep nets: binary attribute classifiers and face keypoint detectors. Note that our approach is extendable to any end-to-end differentiable target deep models. Let " + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_{\\phi}" + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "text", + "content": ", parameterized by " + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "text", + "content": ", be the style generator that synthesizes images by " + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "inline_equation", + "content": "\\mathbf{x} = \\mathcal{G}_{\\phi}(\\mathbf{s})" + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "inline_equation", + "content": "\\mathbf{s}" + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "text", + "content": " is the style vector in Style Space " + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "text", + "content": " [34]. We denote a counterfactual image as " + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{x}}" + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "text", + "content": ", which is a synthesized image that misleads the target model " + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "inline_equation", + "content": "f_{\\theta}" + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "text", + "content": ", and denote the original reference image as " + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "text", + "content": " is defined as a single user input text-based attribute, with its domain " + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "inline_equation", + "content": "\\mathcal{A} = \\{a_i\\}_{i=1}^N" + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "text", + "content": " input attributes. " + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{x}}" + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "text", + "content": " differs only along attribute directions " + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "text", + "content": ". Given a set of " + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "inline_equation", + "content": "\\{f_{\\theta}, \\mathcal{G}_{\\phi}, \\mathcal{A}\\}" + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "text", + "content": ", our goal is to perform counterfactual-based diagnosis to interpret where the model fails without manually collecting nor labeling any test set. Unlike traditional approaches of image-space noises which lack explainability to users, our method adversarially searches the counterfactual in the user-designed semantic space. To this end, our diagnosis will have three outputs, namely counterfactual images (Sec. 3.3), sensitivity histograms (Sec. 3.4), and distributionally robust models (Sec. 3.5)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 521, + 194, + 533 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 521, + 194, + 533 + ], + "spans": [ + { + "bbox": [ + 47, + 521, + 194, + 533 + ], + "type": "text", + "content": "3.2. Extracting Edit Directions" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 536, + 287, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 536, + 287, + 608 + ], + "spans": [ + { + "bbox": [ + 46, + 536, + 287, + 608 + ], + "type": "text", + "content": "This section examines the terminologies, method, and modification we adopt in ZOOM to extract suitable global directions for attribute editing. Since CLIP has shown strong capability in disentangling visual representation [19], we incorporate style channel relevance from Style-CLIP [21] to find edit directions for each attribute." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 609, + 288, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 609, + 288, + 693 + ], + "spans": [ + { + "bbox": [ + 46, + 609, + 288, + 693 + ], + "type": "text", + "content": "Given the user's input strings of attributes, we want to find an image manipulation direction " + }, + { + "bbox": [ + 46, + 609, + 288, + 693 + ], + "type": "inline_equation", + "content": "\\Delta \\mathbf{s}" + }, + { + "bbox": [ + 46, + 609, + 288, + 693 + ], + "type": "text", + "content": " for any " + }, + { + "bbox": [ + 46, + 609, + 288, + 693 + ], + "type": "inline_equation", + "content": "\\mathbf{s} \\sim \\mathcal{S}" + }, + { + "bbox": [ + 46, + 609, + 288, + 693 + ], + "type": "text", + "content": ", such that the generated image " + }, + { + "bbox": [ + 46, + 609, + 288, + 693 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_{\\phi}(\\mathbf{s} + \\Delta \\mathbf{s})" + }, + { + "bbox": [ + 46, + 609, + 288, + 693 + ], + "type": "text", + "content": " only varies in the input attributes. Recall that CLIP maps strings into a text embedding " + }, + { + "bbox": [ + 46, + 609, + 288, + 693 + ], + "type": "inline_equation", + "content": "\\mathbf{t} \\in \\mathcal{T}" + }, + { + "bbox": [ + 46, + 609, + 288, + 693 + ], + "type": "text", + "content": ", the text embedding space. For a string attribute description " + }, + { + "bbox": [ + 46, + 609, + 288, + 693 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 46, + 609, + 288, + 693 + ], + "type": "text", + "content": " and a neutral prefix " + }, + { + "bbox": [ + 46, + 609, + 288, + 693 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 46, + 609, + 288, + 693 + ], + "type": "text", + "content": ", we obtain the CLIP text embedding difference " + }, + { + "bbox": [ + 46, + 609, + 288, + 693 + ], + "type": "inline_equation", + "content": "\\Delta \\mathbf{t}" + }, + { + "bbox": [ + 46, + 609, + 288, + 693 + ], + "type": "text", + "content": " by:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 86, + 697, + 287, + 711 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 697, + 287, + 711 + ], + "spans": [ + { + "bbox": [ + 86, + 697, + 287, + 711 + ], + "type": "interline_equation", + "content": "\\Delta \\mathbf {t} = \\operatorname {C L I P} _ {\\text {t e x t}} (p \\oplus a) - \\operatorname {C L I P} _ {\\text {t e x t}} (p) \\tag {1}", + "image_path": "406522a59249e10ab5cd88783268d84c4058e8f220dfd4af0401d849dff5e944.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 305, + 276, + 545, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 276, + 545, + 312 + ], + "spans": [ + { + "bbox": [ + 305, + 276, + 545, + 312 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 276, + 545, + 312 + ], + "type": "inline_equation", + "content": "\\oplus" + }, + { + "bbox": [ + 305, + 276, + 545, + 312 + ], + "type": "text", + "content": " is the string concatenation operator. To take 'Eyeglasses' as an example, we can get " + }, + { + "bbox": [ + 305, + 276, + 545, + 312 + ], + "type": "inline_equation", + "content": "\\Delta t = \\mathrm{CLIP}_{\\mathrm{text}}" + }, + { + "bbox": [ + 305, + 276, + 545, + 312 + ], + "type": "text", + "content": " (a face with Eyeglasses) - " + }, + { + "bbox": [ + 305, + 276, + 545, + 312 + ], + "type": "inline_equation", + "content": "\\mathrm{CLIP}_{\\mathrm{text}}" + }, + { + "bbox": [ + 305, + 276, + 545, + 312 + ], + "type": "text", + "content": " (a face)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 312, + 546, + 432 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 312, + 546, + 432 + ], + "spans": [ + { + "bbox": [ + 304, + 312, + 546, + 432 + ], + "type": "text", + "content": "To get the edit direction, " + }, + { + "bbox": [ + 304, + 312, + 546, + 432 + ], + "type": "inline_equation", + "content": "\\Delta \\mathbf{s}" + }, + { + "bbox": [ + 304, + 312, + 546, + 432 + ], + "type": "text", + "content": ", we need to utilize a style relevance mapper " + }, + { + "bbox": [ + 304, + 312, + 546, + 432 + ], + "type": "inline_equation", + "content": "\\mathbf{M} \\in \\mathbb{R}^{c_S \\times c_T}" + }, + { + "bbox": [ + 304, + 312, + 546, + 432 + ], + "type": "text", + "content": " to map between the CLIP text embedding vectors of length " + }, + { + "bbox": [ + 304, + 312, + 546, + 432 + ], + "type": "inline_equation", + "content": "c_{\\mathcal{T}}" + }, + { + "bbox": [ + 304, + 312, + 546, + 432 + ], + "type": "text", + "content": " and the Style space vector of length " + }, + { + "bbox": [ + 304, + 312, + 546, + 432 + ], + "type": "inline_equation", + "content": "c_{\\mathcal{S}}" + }, + { + "bbox": [ + 304, + 312, + 546, + 432 + ], + "type": "text", + "content": ". StyleCLIP optimizes " + }, + { + "bbox": [ + 304, + 312, + 546, + 432 + ], + "type": "inline_equation", + "content": "\\mathbf{M}" + }, + { + "bbox": [ + 304, + 312, + 546, + 432 + ], + "type": "text", + "content": " by iteratively searching meaningful style channels: mutating each channel in " + }, + { + "bbox": [ + 304, + 312, + 546, + 432 + ], + "type": "inline_equation", + "content": "\\mathcal{S}" + }, + { + "bbox": [ + 304, + 312, + 546, + 432 + ], + "type": "text", + "content": " and encoding the mutated images by CLIP to assess whether there is a significant change in " + }, + { + "bbox": [ + 304, + 312, + 546, + 432 + ], + "type": "inline_equation", + "content": "\\mathcal{T}" + }, + { + "bbox": [ + 304, + 312, + 546, + 432 + ], + "type": "text", + "content": " space. To prevent undesired edits that are irrelevant to the user prompt, the edit direction " + }, + { + "bbox": [ + 304, + 312, + 546, + 432 + ], + "type": "inline_equation", + "content": "\\Delta \\mathbf{s}" + }, + { + "bbox": [ + 304, + 312, + 546, + 432 + ], + "type": "text", + "content": " will filter out channels that the style value change is insignificant:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 348, + 439, + 545, + 452 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 348, + 439, + 545, + 452 + ], + "spans": [ + { + "bbox": [ + 348, + 439, + 545, + 452 + ], + "type": "interline_equation", + "content": "\\Delta \\mathbf {s} = (\\mathbf {M} \\cdot \\Delta \\mathbf {t}) \\odot \\mathbb {1} ((\\mathbf {M} \\cdot \\Delta \\mathbf {t}) > \\lambda), \\tag {2}", + "image_path": "f4eabcda229d25393671dbfd5547dd2e9e1ebf9423d0a153d2128a4fc59d47f9.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 460, + 545, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 460, + 545, + 544 + ], + "spans": [ + { + "bbox": [ + 304, + 460, + 545, + 544 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 460, + 545, + 544 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 304, + 460, + 545, + 544 + ], + "type": "text", + "content": " is the hyper-parameter for the threshold value. " + }, + { + "bbox": [ + 304, + 460, + 545, + 544 + ], + "type": "inline_equation", + "content": "\\mathbb{1}(\\cdot)" + }, + { + "bbox": [ + 304, + 460, + 545, + 544 + ], + "type": "text", + "content": " is the indicator function, and " + }, + { + "bbox": [ + 304, + 460, + 545, + 544 + ], + "type": "inline_equation", + "content": "\\odot" + }, + { + "bbox": [ + 304, + 460, + 545, + 544 + ], + "type": "text", + "content": " is the element-wise product operator. Since the success of attribute editing by the extracted edit directions will be the key to our approach, Appendix A will show the capability of CLIP by visualizing the global edit direction on multiple sampled images, conducting the user study, and analyzing the effect of " + }, + { + "bbox": [ + 304, + 460, + 545, + 544 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 304, + 460, + 545, + 544 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 550, + 473, + 563 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 550, + 473, + 563 + ], + "spans": [ + { + "bbox": [ + 306, + 550, + 473, + 563 + ], + "type": "text", + "content": "3.3. Style Counterfactual Synthesis" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 569, + 545, + 690 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 569, + 545, + 690 + ], + "spans": [ + { + "bbox": [ + 304, + 569, + 545, + 690 + ], + "type": "text", + "content": "Identifying semantic counterfactuals necessitates a manageable parametrization of the semantic space for effective exploration. For ease of notation, we denote " + }, + { + "bbox": [ + 304, + 569, + 545, + 690 + ], + "type": "inline_equation", + "content": "(\\Delta \\mathbf{s})_i" + }, + { + "bbox": [ + 304, + 569, + 545, + 690 + ], + "type": "text", + "content": " as the global edit direction for " + }, + { + "bbox": [ + 304, + 569, + 545, + 690 + ], + "type": "inline_equation", + "content": "i^{th}" + }, + { + "bbox": [ + 304, + 569, + 545, + 690 + ], + "type": "text", + "content": " attribute " + }, + { + "bbox": [ + 304, + 569, + 545, + 690 + ], + "type": "inline_equation", + "content": "a_i \\in \\mathcal{A}" + }, + { + "bbox": [ + 304, + 569, + 545, + 690 + ], + "type": "text", + "content": " from the user input. After these " + }, + { + "bbox": [ + 304, + 569, + 545, + 690 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 304, + 569, + 545, + 690 + ], + "type": "text", + "content": " attributes are provided and the edit directions are calculated, we initialize the control vectors " + }, + { + "bbox": [ + 304, + 569, + 545, + 690 + ], + "type": "inline_equation", + "content": "\\mathbf{w}" + }, + { + "bbox": [ + 304, + 569, + 545, + 690 + ], + "type": "text", + "content": " of length " + }, + { + "bbox": [ + 304, + 569, + 545, + 690 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 304, + 569, + 545, + 690 + ], + "type": "text", + "content": " where the " + }, + { + "bbox": [ + 304, + 569, + 545, + 690 + ], + "type": "inline_equation", + "content": "i^{th}" + }, + { + "bbox": [ + 304, + 569, + 545, + 690 + ], + "type": "text", + "content": " element " + }, + { + "bbox": [ + 304, + 569, + 545, + 690 + ], + "type": "inline_equation", + "content": "w_i" + }, + { + "bbox": [ + 304, + 569, + 545, + 690 + ], + "type": "text", + "content": " controls the strength of the " + }, + { + "bbox": [ + 304, + 569, + 545, + 690 + ], + "type": "inline_equation", + "content": "i^{th}" + }, + { + "bbox": [ + 304, + 569, + 545, + 690 + ], + "type": "text", + "content": " edit direction. Our counterfactual edit will be a linear combination of normalized edit directions: " + }, + { + "bbox": [ + 304, + 569, + 545, + 690 + ], + "type": "inline_equation", + "content": "\\mathbf{s}_{edit} = \\sum_{i=1}^{N} w_i \\frac{(\\Delta \\mathbf{s})_i}{||(\\Delta \\mathbf{s})_i||}" + }, + { + "bbox": [ + 304, + 569, + 545, + 690 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 690, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 690, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 690, + 545, + 713 + ], + "type": "text", + "content": "The black arrows in Fig. 2 show the forward inference to synthesize counterfactual images. Given the parametriza" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "11633" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "content": "tion of attribute editing strengths and the final loss value, our framework searches for counterfactual examples in the. \noptimizable edit weight space. The original sampled image is " + }, + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "inline_equation", + "content": "\\mathbf{x} = G_{\\phi}(\\mathbf{s})" + }, + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "content": " , and the counterfactual image is" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 127, + 287, + 159 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 127, + 287, + 159 + ], + "spans": [ + { + "bbox": [ + 56, + 127, + 287, + 159 + ], + "type": "interline_equation", + "content": "\\hat {\\mathbf {x}} = G _ {\\phi} (\\mathbf {s} + \\mathbf {s} _ {e d i t}) = G _ {\\phi} \\left(\\mathbf {s} + \\sum_ {i = 1} ^ {N} w _ {i} \\frac {(\\Delta \\mathbf {s}) _ {i}}{| | (\\Delta \\mathbf {s}) _ {i} | |}\\right), \\tag {3}", + "image_path": "522c65c1ac30a4ab308cd5179ecaf205912e6f30edc1fc9af47f3d6006d5dddd.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 165, + 287, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 165, + 287, + 190 + ], + "spans": [ + { + "bbox": [ + 47, + 165, + 287, + 190 + ], + "type": "text", + "content": "which is obtained by minimizing the following loss, " + }, + { + "bbox": [ + 47, + 165, + 287, + 190 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 47, + 165, + 287, + 190 + ], + "type": "text", + "content": ", that is the weighted sum of three terms:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 196, + 287, + 211 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 196, + 287, + 211 + ], + "spans": [ + { + "bbox": [ + 53, + 196, + 287, + 211 + ], + "type": "interline_equation", + "content": "\\mathcal {L} (\\mathbf {s}, \\mathbf {w}) = \\alpha \\mathcal {L} _ {\\text {t a r g e t}} (\\hat {\\mathbf {x}}) + \\beta \\mathcal {L} _ {\\text {s t r u c t}} (\\hat {\\mathbf {x}}) + \\gamma \\mathcal {L} _ {\\text {a t t r}} (\\hat {\\mathbf {x}}). \\tag {4}", + "image_path": "0e08d11381d1726448aeadb5be3a180bcf2b4ac7e56b7bab7222f213f756654c.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 216, + 287, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 216, + 287, + 239 + ], + "spans": [ + { + "bbox": [ + 47, + 216, + 287, + 239 + ], + "type": "text", + "content": "We back-propagate to optimize " + }, + { + "bbox": [ + 47, + 216, + 287, + 239 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 47, + 216, + 287, + 239 + ], + "type": "text", + "content": " w.r.t the weights of the edit directions " + }, + { + "bbox": [ + 47, + 216, + 287, + 239 + ], + "type": "inline_equation", + "content": "\\mathbf{w}" + }, + { + "bbox": [ + 47, + 216, + 287, + 239 + ], + "type": "text", + "content": ", shown as the red pipeline in Fig. 2." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 240, + 287, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 240, + 287, + 348 + ], + "spans": [ + { + "bbox": [ + 46, + 240, + 287, + 348 + ], + "type": "text", + "content": "The targeted adversarial loss " + }, + { + "bbox": [ + 46, + 240, + 287, + 348 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{target}" + }, + { + "bbox": [ + 46, + 240, + 287, + 348 + ], + "type": "text", + "content": " for binary attribute classifiers minimizes the distance between the current model prediction " + }, + { + "bbox": [ + 46, + 240, + 287, + 348 + ], + "type": "inline_equation", + "content": "f_{\\theta}(\\hat{\\mathbf{x}})" + }, + { + "bbox": [ + 46, + 240, + 287, + 348 + ], + "type": "text", + "content": " with the flip of original prediction " + }, + { + "bbox": [ + 46, + 240, + 287, + 348 + ], + "type": "inline_equation", + "content": "\\hat{p}_{cls} = 1 - f_{\\theta}(\\mathbf{x})" + }, + { + "bbox": [ + 46, + 240, + 287, + 348 + ], + "type": "text", + "content": ". In the case of an eyeglass classifier on a person wearing eyeglasses, " + }, + { + "bbox": [ + 46, + 240, + 287, + 348 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{target}" + }, + { + "bbox": [ + 46, + 240, + 287, + 348 + ], + "type": "text", + "content": " will guide the optimization to search w such that the model predicts no eyeglasses. For a keypoint detector, the adversarial loss will minimize the distance between the model keypoint prediction with a set of random points " + }, + { + "bbox": [ + 46, + 240, + 287, + 348 + ], + "type": "inline_equation", + "content": "\\hat{p}_{kp} \\sim \\mathcal{N}" + }, + { + "bbox": [ + 46, + 240, + 287, + 348 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 354, + 287, + 368 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 354, + 287, + 368 + ], + "spans": [ + { + "bbox": [ + 52, + 354, + 287, + 368 + ], + "type": "text", + "content": "(binary classifier) " + }, + { + "bbox": [ + 52, + 354, + 287, + 368 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{target}(\\hat{\\mathbf{x}}) = L_{CE}(f_{\\theta}(\\hat{\\mathbf{x}}),\\hat{p}_{cls})" + }, + { + "bbox": [ + 52, + 354, + 287, + 368 + ], + "type": "text", + "content": " (5)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 370, + 287, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 370, + 287, + 384 + ], + "spans": [ + { + "bbox": [ + 52, + 370, + 287, + 384 + ], + "type": "text", + "content": "(keypoint detector) " + }, + { + "bbox": [ + 52, + 370, + 287, + 384 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{target}(\\hat{\\mathbf{x}}) = L_{MSE}(f_{\\theta}(\\hat{\\mathbf{x}}),\\hat{p}_{kp})" + }, + { + "bbox": [ + 52, + 370, + 287, + 384 + ], + "type": "text", + "content": " (6)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 389, + 287, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 389, + 287, + 555 + ], + "spans": [ + { + "bbox": [ + 46, + 389, + 287, + 555 + ], + "type": "text", + "content": "If we only optimize " + }, + { + "bbox": [ + 46, + 389, + 287, + 555 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\text {target }}" + }, + { + "bbox": [ + 46, + 389, + 287, + 555 + ], + "type": "text", + "content": " w.r.t the global edit directions, it is possible that the method will not preserve image statistics of the original image and can include the particular attribute that we are diagnosing. To constrain the optimization, we added a structural loss " + }, + { + "bbox": [ + 46, + 389, + 287, + 555 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\text {struct }}" + }, + { + "bbox": [ + 46, + 389, + 287, + 555 + ], + "type": "text", + "content": " and an attribute consistency loss " + }, + { + "bbox": [ + 46, + 389, + 287, + 555 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\text {attr }}" + }, + { + "bbox": [ + 46, + 389, + 287, + 555 + ], + "type": "text", + "content": " to avoid generation collapse. " + }, + { + "bbox": [ + 46, + 389, + 287, + 555 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\text {struct }}" + }, + { + "bbox": [ + 46, + 389, + 287, + 555 + ], + "type": "text", + "content": " [32] aims to preserve global image statistics of the original image x including image contrasts, background, or shape identity during the adversarial editing. While " + }, + { + "bbox": [ + 46, + 389, + 287, + 555 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\text {attr }}" + }, + { + "bbox": [ + 46, + 389, + 287, + 555 + ], + "type": "text", + "content": " enforces that the target attribute (perceived ground truth) be consistent on the style edits. For example, when diagnosing the eyeglasses classifier, ZOOM preserves the original status of eyeglasses and precludes direct eyeglasses addition/removal." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 83, + 563, + 287, + 576 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 563, + 287, + 576 + ], + "spans": [ + { + "bbox": [ + 83, + 563, + 287, + 576 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {s t r u c t}} (\\hat {\\mathbf {x}}) = L _ {\\text {S S I M}} (\\hat {\\mathbf {x}}, \\mathbf {x}) \\tag {7}", + "image_path": "e9c01bbec353f59f0d838e57d9d7712ef0a9488489304a1687502582e5818dec.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 91, + 578, + 287, + 591 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 578, + 287, + 591 + ], + "spans": [ + { + "bbox": [ + 91, + 578, + 287, + 591 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {a t t r}} (\\hat {\\mathbf {x}}) = L _ {C E} \\left(\\operatorname {C L I P} (\\hat {\\mathbf {x}}), \\operatorname {C L I P} (\\mathbf {x})\\right) \\tag {8}", + "image_path": "e866dfe2b33896db5340deca40595d244352f71f167079ca62e58d7367ae29e3.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 598, + 287, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 598, + 287, + 647 + ], + "spans": [ + { + "bbox": [ + 47, + 598, + 287, + 647 + ], + "type": "text", + "content": "Given a pretrained target model " + }, + { + "bbox": [ + 47, + 598, + 287, + 647 + ], + "type": "inline_equation", + "content": "f_{\\theta}" + }, + { + "bbox": [ + 47, + 598, + 287, + 647 + ], + "type": "text", + "content": ", a domain-specific style generator " + }, + { + "bbox": [ + 47, + 598, + 287, + 647 + ], + "type": "inline_equation", + "content": "G_{\\phi}" + }, + { + "bbox": [ + 47, + 598, + 287, + 647 + ], + "type": "text", + "content": ", and a text-driven attribute space " + }, + { + "bbox": [ + 47, + 598, + 287, + 647 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 47, + 598, + 287, + 647 + ], + "type": "text", + "content": ", our goal is to sample an original style vector " + }, + { + "bbox": [ + 47, + 598, + 287, + 647 + ], + "type": "inline_equation", + "content": "\\mathbf{s}" + }, + { + "bbox": [ + 47, + 598, + 287, + 647 + ], + "type": "text", + "content": " for each image and search its counterfactual edit strength " + }, + { + "bbox": [ + 47, + 598, + 287, + 647 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{w}}" + }, + { + "bbox": [ + 47, + 598, + 287, + 647 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 121, + 653, + 287, + 672 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 653, + 287, + 672 + ], + "spans": [ + { + "bbox": [ + 121, + 653, + 287, + 672 + ], + "type": "interline_equation", + "content": "\\hat {\\mathbf {w}} = \\underset {\\mathbf {w}} {\\operatorname {a r g m i n}} \\mathcal {L} (\\mathbf {s}, \\mathbf {w}). \\tag {9}", + "image_path": "99d40a5c06afea29793207b057f81887dd56d07deff5621674d95fb698b54539.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 677, + 257, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 677, + 257, + 689 + ], + "spans": [ + { + "bbox": [ + 47, + 677, + 257, + 689 + ], + "type": "text", + "content": "Unless otherwise stated, we iteratively update " + }, + { + "bbox": [ + 47, + 677, + 257, + 689 + ], + "type": "inline_equation", + "content": "\\mathbf{w}" + }, + { + "bbox": [ + 47, + 677, + 257, + 689 + ], + "type": "text", + "content": " as:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 102, + 696, + 287, + 711 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 696, + 287, + 711 + ], + "spans": [ + { + "bbox": [ + 102, + 696, + 287, + 711 + ], + "type": "interline_equation", + "content": "\\mathbf {w} = \\operatorname {c l a m p} _ {[ - \\epsilon , \\epsilon ]} (\\mathbf {w} - \\eta \\nabla_ {\\mathbf {w}} \\mathcal {L}), \\tag {10}", + "image_path": "7fe0063f5c0604d3a3512f4ac37243bacd08cbab0e142ef20282d6717d310e28.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 72, + 545, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 180 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 180 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 72, + 545, + 180 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 304, + 72, + 545, + 180 + ], + "type": "text", + "content": " is the step size and " + }, + { + "bbox": [ + 304, + 72, + 545, + 180 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 304, + 72, + 545, + 180 + ], + "type": "text", + "content": " is the clamp bound to avoid synthesis collapse caused by exaggerated edit. Note that the maximum counterfactual effectiveness does not indicate the maximum edit strength (i.e., " + }, + { + "bbox": [ + 304, + 72, + 545, + 180 + ], + "type": "inline_equation", + "content": "w_{i} = \\epsilon" + }, + { + "bbox": [ + 304, + 72, + 545, + 180 + ], + "type": "text", + "content": "), since the attribute edit direction does not necessarily overlap with the target classifier direction. The attribute change is bi-directional, as the " + }, + { + "bbox": [ + 304, + 72, + 545, + 180 + ], + "type": "inline_equation", + "content": "w_{i}" + }, + { + "bbox": [ + 304, + 72, + 545, + 180 + ], + "type": "text", + "content": " can be negative in Eq. 3. Details of using other optimization approaches (e.g., linear approximation [18]) will be discussed in Appendix C." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 189, + 466, + 202 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 189, + 466, + 202 + ], + "spans": [ + { + "bbox": [ + 306, + 189, + 466, + 202 + ], + "type": "text", + "content": "3.4. Attribute Sensitivity Analysis" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 208, + 545, + 279 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 208, + 545, + 279 + ], + "spans": [ + { + "bbox": [ + 304, + 208, + 545, + 279 + ], + "type": "text", + "content": "Single-attribute counterfactual reflects the sensitivity of target model on the individual attribute. By optimizing independently along the edit direction for a single attribute and averaging the model probability changes over images, our model generates independent sensitivity score " + }, + { + "bbox": [ + 304, + 208, + 545, + 279 + ], + "type": "inline_equation", + "content": "h_i" + }, + { + "bbox": [ + 304, + 208, + 545, + 279 + ], + "type": "text", + "content": " for each attribute " + }, + { + "bbox": [ + 304, + 208, + 545, + 279 + ], + "type": "inline_equation", + "content": "a_i" + }, + { + "bbox": [ + 304, + 208, + 545, + 279 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 325, + 291, + 545, + 305 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 325, + 291, + 545, + 305 + ], + "spans": [ + { + "bbox": [ + 325, + 291, + 545, + 305 + ], + "type": "interline_equation", + "content": "h _ {i} = \\mathbb {E} _ {\\mathbf {x} \\sim \\mathcal {P} (\\mathbf {x}), \\hat {\\mathbf {x}} = \\mathrm {Z O O M} (\\mathbf {x}, a _ {i})} | f _ {\\theta} (\\mathbf {x}) - f _ {\\theta} (\\hat {\\mathbf {x}}) |. \\tag {11}", + "image_path": "448e395994d09b00225f77ca21f4b85795d2b755bc56b9177583da538e271b57.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 314, + 545, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 314, + 545, + 434 + ], + "spans": [ + { + "bbox": [ + 304, + 314, + 545, + 434 + ], + "type": "text", + "content": "The sensitivity score " + }, + { + "bbox": [ + 304, + 314, + 545, + 434 + ], + "type": "inline_equation", + "content": "h_i" + }, + { + "bbox": [ + 304, + 314, + 545, + 434 + ], + "type": "text", + "content": " is the probability difference between the original image " + }, + { + "bbox": [ + 304, + 314, + 545, + 434 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 304, + 314, + 545, + 434 + ], + "type": "text", + "content": " and generated image " + }, + { + "bbox": [ + 304, + 314, + 545, + 434 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{x}}" + }, + { + "bbox": [ + 304, + 314, + 545, + 434 + ], + "type": "text", + "content": ", at the most counterfactual point when changing attribute " + }, + { + "bbox": [ + 304, + 314, + 545, + 434 + ], + "type": "inline_equation", + "content": "a_i" + }, + { + "bbox": [ + 304, + 314, + 545, + 434 + ], + "type": "text", + "content": ". We synthesize a number of images from " + }, + { + "bbox": [ + 304, + 314, + 545, + 434 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_{\\phi}" + }, + { + "bbox": [ + 304, + 314, + 545, + 434 + ], + "type": "text", + "content": ", then iteratively compute the sensitivity for each given attribute, and finally normalize all sensitivities to draw the histogram as shown in Fig. 4. The histogram indicates the sensitivity of the evaluated model " + }, + { + "bbox": [ + 304, + 314, + 545, + 434 + ], + "type": "inline_equation", + "content": "f_{\\theta}" + }, + { + "bbox": [ + 304, + 314, + 545, + 434 + ], + "type": "text", + "content": " on each of the user-defined attributes. Higher sensitivity of one attribute means that the model is more easily affected by that attribute." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 306, + 443, + 443, + 456 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 443, + 443, + 456 + ], + "spans": [ + { + "bbox": [ + 306, + 443, + 443, + 456 + ], + "type": "text", + "content": "3.5. Counterfactual Training" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 462, + 545, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 462, + 545, + 582 + ], + "spans": [ + { + "bbox": [ + 304, + 462, + 545, + 582 + ], + "type": "text", + "content": "The multi-attribute counterfactual approach visualizes semantic combinations that cause the model to falter, providing valuable insights for enhancing the model's robustness. We naturally adopt the concept of iterative adversarial training [18] to robustify the target model. For each iteration, ZOOM receives the target model parameter and returns a batch of mutated counterfactual images with the model's original predictions as labels. Then the target model will be trained on the counterfactually-augmented images to achieve the robust goal:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 312, + 591, + 545, + 608 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 591, + 545, + 608 + ], + "spans": [ + { + "bbox": [ + 312, + 591, + 545, + 608 + ], + "type": "interline_equation", + "content": "\\theta^ {*} = \\underset {\\theta} {\\operatorname {a r g m i n}} \\mathbb {E} _ {\\mathbf {x} \\sim \\mathcal {P} (\\mathbf {x}), \\hat {\\mathbf {x}} = \\operatorname {Z O O M} (\\mathbf {x}, A)} L _ {C E} \\left(f _ {\\theta} (\\hat {\\mathbf {x}}), f _ {\\theta} (\\mathbf {x})\\right) \\tag {12}", + "image_path": "e30d546fe324acaf7d0ba3ea872d43bd5d6dffdc4d64f4d05f44f26e4e074ea7.jpg" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "text", + "content": "where batches of " + }, + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "text", + "content": " are randomly sampled from the StyleGAN generator " + }, + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_{\\phi}" + }, + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "text", + "content": ". In the following, we abbreviate the process as Counterfactual Training (CT). Note that, although not explicitly expressed in Eq. 12, the CT process is a min-max game. ZOOM synthesizes counterfactuals to maximize the variation of model prediction (while persevering the perceived ground truth), and the target model is learned with the counterfactual images to minimize the variation." + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "11634" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 51, + 70, + 100, + 119 + ], + "blocks": [ + { + "bbox": [ + 51, + 70, + 100, + 119 + ], + "lines": [ + { + "bbox": [ + 51, + 70, + 100, + 119 + ], + "spans": [ + { + "bbox": [ + 51, + 70, + 100, + 119 + ], + "type": "image", + "image_path": "382f5252f5eef69ed8ac56fa86f515853e05d632939c48c6660966221ace8272.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 57, + 120, + 93, + 126 + ], + "lines": [ + { + "bbox": [ + 57, + 120, + 93, + 126 + ], + "spans": [ + { + "bbox": [ + 57, + 120, + 93, + 126 + ], + "type": "text", + "content": "Open Mouth" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 100, + 70, + 149, + 119 + ], + "blocks": [ + { + "bbox": [ + 100, + 70, + 149, + 119 + ], + "lines": [ + { + "bbox": [ + 100, + 70, + 149, + 119 + ], + "spans": [ + { + "bbox": [ + 100, + 70, + 149, + 119 + ], + "type": "image", + "image_path": "6e6f7765c0c6d9f1d2dfdc190d779bfe664e680f1fa60e96badfb38c36bde1f4.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 100, + 120, + 149, + 125 + ], + "lines": [ + { + "bbox": [ + 100, + 120, + 149, + 125 + ], + "spans": [ + { + "bbox": [ + 100, + 120, + 149, + 125 + ], + "type": "inline_equation", + "content": "\\frac{1}{2}x - 1 > 0" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 149, + 70, + 198, + 119 + ], + "blocks": [ + { + "bbox": [ + 149, + 70, + 198, + 119 + ], + "lines": [ + { + "bbox": [ + 149, + 70, + 198, + 119 + ], + "spans": [ + { + "bbox": [ + 149, + 70, + 198, + 119 + ], + "type": "image", + "image_path": "d58c5d6850d439ce2439ccb870594aa331b04646e7cdef3bd0558f01e41331d5.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 149, + 120, + 198, + 125 + ], + "lines": [ + { + "bbox": [ + 149, + 120, + 198, + 125 + ], + "spans": [ + { + "bbox": [ + 149, + 120, + 198, + 125 + ], + "type": "inline_equation", + "content": "\\frac{1}{2}x - 1 > 0" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 198, + 70, + 246, + 119 + ], + "blocks": [ + { + "bbox": [ + 198, + 70, + 246, + 119 + ], + "lines": [ + { + "bbox": [ + 198, + 70, + 246, + 119 + ], + "spans": [ + { + "bbox": [ + 198, + 70, + 246, + 119 + ], + "type": "image", + "image_path": "71d46012b365d52f94b9082a48b36a98cd58e025a583732997d1de6c2f945647.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 246, + 70, + 294, + 119 + ], + "blocks": [ + { + "bbox": [ + 246, + 70, + 294, + 119 + ], + "lines": [ + { + "bbox": [ + 246, + 70, + 294, + 119 + ], + "spans": [ + { + "bbox": [ + 246, + 70, + 294, + 119 + ], + "type": "image", + "image_path": "e52e69fb3e1a7bb5d2200ba25acd415cca0e9d8625a7a2ccf1e06fd40de18944.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 250, + 120, + 290, + 125 + ], + "lines": [ + { + "bbox": [ + 250, + 120, + 290, + 125 + ], + "spans": [ + { + "bbox": [ + 250, + 120, + 290, + 125 + ], + "type": "text", + "content": "Closed Mouth" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 295, + 70, + 343, + 119 + ], + "blocks": [ + { + "bbox": [ + 295, + 70, + 343, + 119 + ], + "lines": [ + { + "bbox": [ + 295, + 70, + 343, + 119 + ], + "spans": [ + { + "bbox": [ + 295, + 70, + 343, + 119 + ], + "type": "image", + "image_path": "5998e6f16297004dcded0e568b7cc7fa1aeb1d4478fa0d868cedde8c94e8abb9.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 343, + 70, + 392, + 119 + ], + "blocks": [ + { + "bbox": [ + 343, + 70, + 392, + 119 + ], + "lines": [ + { + "bbox": [ + 343, + 70, + 392, + 119 + ], + "spans": [ + { + "bbox": [ + 343, + 70, + 392, + 119 + ], + "type": "image", + "image_path": "f8433beb32c5896e06a7aa642d6d2e15c4fb1518e7fd0631894bb952d4187d32.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 392, + 70, + 441, + 119 + ], + "blocks": [ + { + "bbox": [ + 392, + 70, + 441, + 119 + ], + "lines": [ + { + "bbox": [ + 392, + 70, + 441, + 119 + ], + "spans": [ + { + "bbox": [ + 392, + 70, + 441, + 119 + ], + "type": "image", + "image_path": "b6dd9a6086f785bfc6b1ac4c27d8833b5f9c5b1ab95041790dcf663b8f50f846.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 441, + 70, + 490, + 119 + ], + "blocks": [ + { + "bbox": [ + 441, + 70, + 490, + 119 + ], + "lines": [ + { + "bbox": [ + 441, + 70, + 490, + 119 + ], + "spans": [ + { + "bbox": [ + 441, + 70, + 490, + 119 + ], + "type": "image", + "image_path": "28703eb187d242dc891d55db71d5efddeceab3e8980a5c1277a0ce6afedee26e.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 491, + 70, + 540, + 119 + ], + "blocks": [ + { + "bbox": [ + 491, + 70, + 540, + 119 + ], + "lines": [ + { + "bbox": [ + 491, + 70, + 540, + 119 + ], + "spans": [ + { + "bbox": [ + 491, + 70, + 540, + 119 + ], + "type": "image", + "image_path": "7614e0fc2876e9a275a20e70f7ed9a9f2b21e3d09bda32999fac3b9d7e2e8d29.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 51, + 129, + 100, + 178 + ], + "blocks": [ + { + "bbox": [ + 51, + 129, + 100, + 178 + ], + "lines": [ + { + "bbox": [ + 51, + 129, + 100, + 178 + ], + "spans": [ + { + "bbox": [ + 51, + 129, + 100, + 178 + ], + "type": "image", + "image_path": "6a3a3474601bb128b509ebe8645c0463326834db7d238ec593ff709a98712b77.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 57, + 178, + 93, + 186 + ], + "lines": [ + { + "bbox": [ + 57, + 178, + 93, + 186 + ], + "spans": [ + { + "bbox": [ + 57, + 178, + 93, + 186 + ], + "type": "text", + "content": "Felidae Pupil" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 100, + 178, + 149, + 186 + ], + "lines": [ + { + "bbox": [ + 100, + 178, + 149, + 186 + ], + "spans": [ + { + "bbox": [ + 100, + 178, + 149, + 186 + ], + "type": "inline_equation", + "content": "\\frac{3}{1} + u + {4q} = 1 + u + {uq}" + }, + { + "bbox": [ + 100, + 178, + 149, + 186 + ], + "type": "text", + "content": " dH" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 100, + 129, + 149, + 178 + ], + "blocks": [ + { + "bbox": [ + 100, + 129, + 149, + 178 + ], + "lines": [ + { + "bbox": [ + 100, + 129, + 149, + 178 + ], + "spans": [ + { + "bbox": [ + 100, + 129, + 149, + 178 + ], + "type": "image", + "image_path": "4d6861e565df844de868c6952bcbc37a40413ae51cdc59f8416074b34ef31c4e.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 149, + 178, + 198, + 186 + ], + "lines": [ + { + "bbox": [ + 149, + 178, + 198, + 186 + ], + "spans": [ + { + "bbox": [ + 149, + 178, + 198, + 186 + ], + "type": "inline_equation", + "content": "\\frac{3}{1} + u + {4q} = 1 + u + {uq}" + }, + { + "bbox": [ + 149, + 178, + 198, + 186 + ], + "type": "text", + "content": " dH" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 149, + 129, + 198, + 178 + ], + "blocks": [ + { + "bbox": [ + 149, + 129, + 198, + 178 + ], + "lines": [ + { + "bbox": [ + 149, + 129, + 198, + 178 + ], + "spans": [ + { + "bbox": [ + 149, + 129, + 198, + 178 + ], + "type": "image", + "image_path": "4d8a7695d44eb9f470f709190f28a206de0c1aab536a4b0437ec3bc92cf5bd11.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 198, + 178, + 246, + 186 + ], + "lines": [ + { + "bbox": [ + 198, + 178, + 246, + 186 + ], + "spans": [ + { + "bbox": [ + 198, + 178, + 246, + 186 + ], + "type": "inline_equation", + "content": "\\frac{3}{1} + u + {4q} = 1 + u + {uq}" + }, + { + "bbox": [ + 198, + 178, + 246, + 186 + ], + "type": "text", + "content": " dH" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 198, + 129, + 246, + 178 + ], + "blocks": [ + { + "bbox": [ + 198, + 129, + 246, + 178 + ], + "lines": [ + { + "bbox": [ + 198, + 129, + 246, + 178 + ], + "spans": [ + { + "bbox": [ + 198, + 129, + 246, + 178 + ], + "type": "image", + "image_path": "d057aae1bdbe74f1ef3ade2252fe5600868dacc63d10ffa80489d5108f2465cd.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 246, + 129, + 294, + 178 + ], + "blocks": [ + { + "bbox": [ + 246, + 129, + 294, + 178 + ], + "lines": [ + { + "bbox": [ + 246, + 129, + 294, + 178 + ], + "spans": [ + { + "bbox": [ + 246, + 129, + 294, + 178 + ], + "type": "image", + "image_path": "7f0ee7ecc4f01729ebc6c197b05497ab9e0d2c9ad1e1974529aaba7ff496842a.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 250, + 178, + 290, + 186 + ], + "lines": [ + { + "bbox": [ + 250, + 178, + 290, + 186 + ], + "spans": [ + { + "bbox": [ + 250, + 178, + 290, + 186 + ], + "type": "text", + "content": "Canidae Pupil" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 295, + 129, + 343, + 178 + ], + "blocks": [ + { + "bbox": [ + 295, + 129, + 343, + 178 + ], + "lines": [ + { + "bbox": [ + 295, + 129, + 343, + 178 + ], + "spans": [ + { + "bbox": [ + 295, + 129, + 343, + 178 + ], + "type": "image", + "image_path": "950ca79c0a21d73328cb6c2141ef3a183badae2ab7c780a9fdaf31d8eff33e9a.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + } + ], + "index": 24 + }, + { + "type": "image", + "bbox": [ + 343, + 129, + 392, + 178 + ], + "blocks": [ + { + "bbox": [ + 343, + 129, + 392, + 178 + ], + "lines": [ + { + "bbox": [ + 343, + 129, + 392, + 178 + ], + "spans": [ + { + "bbox": [ + 343, + 129, + 392, + 178 + ], + "type": "image", + "image_path": "4a3892a846507c7e850b5c42d892d2c92afc49f1ac4bfb0c9247b91e1e0e3782.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 392, + 129, + 441, + 178 + ], + "blocks": [ + { + "bbox": [ + 392, + 129, + 441, + 178 + ], + "lines": [ + { + "bbox": [ + 392, + 129, + 441, + 178 + ], + "spans": [ + { + "bbox": [ + 392, + 129, + 441, + 178 + ], + "type": "image", + "image_path": "fff7032c14f3dd2afb30d5725689aeb7224f00b1835d24d2086789fd848604f4.jpg" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_body" + } + ], + "index": 26 + }, + { + "type": "image", + "bbox": [ + 441, + 129, + 490, + 178 + ], + "blocks": [ + { + "bbox": [ + 441, + 129, + 490, + 178 + ], + "lines": [ + { + "bbox": [ + 441, + 129, + 490, + 178 + ], + "spans": [ + { + "bbox": [ + 441, + 129, + 490, + 178 + ], + "type": "image", + "image_path": "f4ee52e5caa6487a1c008a41983ab01976ff119b38dd1cfdc50448e4f2284da8.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + } + ], + "index": 27 + }, + { + "type": "image", + "bbox": [ + 491, + 129, + 540, + 178 + ], + "blocks": [ + { + "bbox": [ + 491, + 129, + 540, + 178 + ], + "lines": [ + { + "bbox": [ + 491, + 129, + 540, + 178 + ], + "spans": [ + { + "bbox": [ + 491, + 129, + 540, + 178 + ], + "type": "image", + "image_path": "f01f0ae813036598b13cd750bae2ac75ae2ade78cfe29545fa92258a290b064b.jpg" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_body" + } + ], + "index": 28 + }, + { + "type": "image", + "bbox": [ + 47, + 220, + 170, + 304 + ], + "blocks": [ + { + "bbox": [ + 46, + 194, + 545, + 217 + ], + "lines": [ + { + "bbox": [ + 46, + 194, + 545, + 217 + ], + "spans": [ + { + "bbox": [ + 46, + 194, + 545, + 217 + ], + "type": "text", + "content": "Figure 3. Effect of progressively generating counterfactual images on (left) cat/dog classifier (0-Cat / 1-Dog), and (right) perceived age classifier (0-Senior / 1-Young). Model probability prediction during the process is attached at the top right corner." + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 47, + 220, + 170, + 304 + ], + "lines": [ + { + "bbox": [ + 47, + 220, + 170, + 304 + ], + "spans": [ + { + "bbox": [ + 47, + 220, + 170, + 304 + ], + "type": "image", + "image_path": "fa8994a05b33e168543b91bca0c47422be0966914496d537e87c679df5e3861f.jpg" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_body" + } + ], + "index": 30 + }, + { + "type": "image", + "bbox": [ + 171, + 220, + 293, + 304 + ], + "blocks": [ + { + "bbox": [ + 171, + 220, + 293, + 304 + ], + "lines": [ + { + "bbox": [ + 171, + 220, + 293, + 304 + ], + "spans": [ + { + "bbox": [ + 171, + 220, + 293, + 304 + ], + "type": "image", + "image_path": "727877f509e7e5d67c5b05d7d903a72ee905c64a7b8dbdfad753de413e2325fe.jpg" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_body" + } + ], + "index": 31 + }, + { + "type": "image", + "bbox": [ + 299, + 220, + 417, + 304 + ], + "blocks": [ + { + "bbox": [ + 299, + 220, + 417, + 304 + ], + "lines": [ + { + "bbox": [ + 299, + 220, + 417, + 304 + ], + "spans": [ + { + "bbox": [ + 299, + 220, + 417, + 304 + ], + "type": "image", + "image_path": "b77fcf9e992f6c5a9b18b69d5691eb5072855ddebb3e5b44b5ca39ecb67ab12b.jpg" + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_body" + } + ], + "index": 32 + }, + { + "type": "image", + "bbox": [ + 419, + 220, + 541, + 304 + ], + "blocks": [ + { + "bbox": [ + 419, + 220, + 541, + 304 + ], + "lines": [ + { + "bbox": [ + 419, + 220, + 541, + 304 + ], + "spans": [ + { + "bbox": [ + 419, + 220, + 541, + 304 + ], + "type": "image", + "image_path": "5600c547b9caf898459903860dd888f4bcb0a9050f29754b6b7151f43b1597df.jpg" + } + ] + } + ], + "index": 33, + "angle": 0, + "type": "image_body" + } + ], + "index": 33 + }, + { + "type": "image", + "bbox": [ + 47, + 316, + 170, + 396 + ], + "blocks": [ + { + "bbox": [ + 157, + 306, + 436, + 316 + ], + "lines": [ + { + "bbox": [ + 157, + 306, + 436, + 316 + ], + "spans": [ + { + "bbox": [ + 157, + 306, + 436, + 316 + ], + "type": "text", + "content": "(a) Model diagnosis histograms generated by ZOOM on four facial attribute classifiers." + } + ] + } + ], + "index": 34, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 47, + 316, + 170, + 396 + ], + "lines": [ + { + "bbox": [ + 47, + 316, + 170, + 396 + ], + "spans": [ + { + "bbox": [ + 47, + 316, + 170, + 396 + ], + "type": "image", + "image_path": "1847122c4f8c41b0eb8a40cf92eecf152e7305a956b9ffb3b71751c5c69b6fd7.jpg" + } + ] + } + ], + "index": 35, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 110, + 399, + 483, + 410 + ], + "lines": [ + { + "bbox": [ + 110, + 399, + 483, + 410 + ], + "spans": [ + { + "bbox": [ + 110, + 399, + 483, + 410 + ], + "type": "text", + "content": "(b) Model diagnosis histograms generated by ZOOM on four classifiers trained on manually-crafted imbalance data." + } + ] + } + ], + "index": 39, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 45, + 417, + 545, + 440 + ], + "lines": [ + { + "bbox": [ + 45, + 417, + 545, + 440 + ], + "spans": [ + { + "bbox": [ + 45, + 417, + 545, + 440 + ], + "type": "text", + "content": "Figure 4. Model diagnosis histograms generated by ZOOM. The vertical axis values reflect the attribute sensitivities calculated by averaging the model probability change over all sampled images. The horizontal axis is the attribute space input by user." + } + ] + } + ], + "index": 40, + "angle": 0, + "type": "image_caption" + } + ], + "index": 35 + }, + { + "type": "image", + "bbox": [ + 171, + 316, + 293, + 396 + ], + "blocks": [ + { + "bbox": [ + 171, + 316, + 293, + 396 + ], + "lines": [ + { + "bbox": [ + 171, + 316, + 293, + 396 + ], + "spans": [ + { + "bbox": [ + 171, + 316, + 293, + 396 + ], + "type": "image", + "image_path": "7aefa2cf30afb6773a8d83624c5cc365680c7a839dfb50d5f30e1c4037506cf3.jpg" + } + ] + } + ], + "index": 36, + "angle": 0, + "type": "image_body" + } + ], + "index": 36 + }, + { + "type": "image", + "bbox": [ + 295, + 316, + 417, + 396 + ], + "blocks": [ + { + "bbox": [ + 295, + 316, + 417, + 396 + ], + "lines": [ + { + "bbox": [ + 295, + 316, + 417, + 396 + ], + "spans": [ + { + "bbox": [ + 295, + 316, + 417, + 396 + ], + "type": "image", + "image_path": "3b29ecd3f3d5ddb487e1fec77c9ab7e51b89f73310a02eefdbed083bdd755f27.jpg" + } + ] + } + ], + "index": 37, + "angle": 0, + "type": "image_body" + } + ], + "index": 37 + }, + { + "type": "image", + "bbox": [ + 419, + 316, + 541, + 396 + ], + "blocks": [ + { + "bbox": [ + 419, + 316, + 541, + 396 + ], + "lines": [ + { + "bbox": [ + 419, + 316, + 541, + 396 + ], + "spans": [ + { + "bbox": [ + 419, + 316, + 541, + 396 + ], + "type": "image", + "image_path": "e39ef17850764b0a9307a0e37db6c248af616552d7b6550dc1ceacd5997a6f2f.jpg" + } + ] + } + ], + "index": 38, + "angle": 0, + "type": "image_body" + } + ], + "index": 38 + }, + { + "bbox": [ + 46, + 451, + 172, + 464 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 451, + 172, + 464 + ], + "spans": [ + { + "bbox": [ + 46, + 451, + 172, + 464 + ], + "type": "text", + "content": "4. Experimental Results" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 46, + 471, + 287, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 471, + 287, + 555 + ], + "spans": [ + { + "bbox": [ + 46, + 471, + 287, + 555 + ], + "type": "text", + "content": "This section describes the experimental validations on the effectiveness and reliability of ZOOM. First, we describe the model setup in Sec. 4.1. Sec. 4.2 and Sec. 4.3 visualize and validate the model diagnosis results for the single-attribute setting. In Sec. 4.4, we show results on synthesized multiple-attribute counterfactual images and apply them to counterfactual training." + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 46, + 561, + 129, + 574 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 561, + 129, + 574 + ], + "spans": [ + { + "bbox": [ + 46, + 561, + 129, + 574 + ], + "type": "text", + "content": "4.1. Model Setup" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 46, + 579, + 287, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 579, + 287, + 639 + ], + "spans": [ + { + "bbox": [ + 46, + 579, + 287, + 639 + ], + "type": "text", + "content": "Pre-trained models: We used Stylegan2-ADA [11] pretrained on FFHQ [12] and AFHQ [1] as our base generative networks, and the pre-trained CLIP model [24] which is parameterized by ViT-B/32. We followed StyleCLIP [21] setups to compute the channel relevance matrices " + }, + { + "bbox": [ + 46, + 579, + 287, + 639 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 46, + 579, + 287, + 639 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 46, + 639, + 287, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 639, + 287, + 687 + ], + "spans": [ + { + "bbox": [ + 46, + 639, + 287, + 687 + ], + "type": "text", + "content": "Target models: Our classifier models are ResNet50 with single fully-connected head initialized by TorchVision1. In training the binary classifiers, we use the Adam optimizer with learning rate 0.001 and batch size 128. We train binary" + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 305, + 453, + 545, + 500 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 453, + 545, + 500 + ], + "spans": [ + { + "bbox": [ + 305, + 453, + 545, + 500 + ], + "type": "text", + "content": "classifiers for Eyeglasses, Perceived Gender, Mustache, Perceived Age attributes on CelebA and for cat/dog classification on AFHQ. For the 98-keypoint detectors, we used the HR-Net architecture [31] on WFLW [33]." + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 305, + 512, + 522, + 525 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 512, + 522, + 525 + ], + "spans": [ + { + "bbox": [ + 305, + 512, + 522, + 525 + ], + "type": "text", + "content": "4.2. Visual Model Diagnosis: Single-Attribute" + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 304, + 533, + 545, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 533, + 545, + 604 + ], + "spans": [ + { + "bbox": [ + 304, + 533, + 545, + 604 + ], + "type": "text", + "content": "Understanding where deep learning model fails is an essential step towards building trustworthy models. Our proposed work allows us to generate counterfactual images (Sec. 3.3) and provide insights on sensitivities of the target model (Sec. 3.4). This section visualizes the counterfactual images in which only one attribute is modified at a time." + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 304, + 605, + 545, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 605, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 304, + 605, + 545, + 712 + ], + "type": "text", + "content": "Fig. 3 shows the single-attribute counterfactual images. Interestingly (but not unexpectedly), we can see that reducing the hair length or joyfulness causes the age classifier more likely to label the face to an older person. Note that our approach is extendable to multiple domains, as we change the cat-like pupil to dog-like, the dog-cat classification tends towards the dog. Using the counterfactual images, we can conduct model diagnosis with the method mentioned in Sec. 3.4, on which attributes the model is sen" + } + ] + } + ], + "index": 50 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 46, + 693, + 280, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 693, + 280, + 712 + ], + "spans": [ + { + "bbox": [ + 46, + 693, + 280, + 712 + ], + "type": "text", + "content": "1https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/" + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "11635" + } + ] + } + ], + "index": 51 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 108 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 108 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 108 + ], + "type": "text", + "content": "sitive to. In the histogram generated in model diagnosis, a higher bar means the model is more sensitive toward the corresponding attribute." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 109, + 288, + 276 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 109, + 288, + 276 + ], + "spans": [ + { + "bbox": [ + 46, + 109, + 288, + 276 + ], + "type": "text", + "content": "Fig. 4a shows the model diagnosis histograms on regularly-trained classifiers. For instance, the cat/dog classifier histogram shows outstanding sensitivity to green eyes and vertical pupil. The analysis is intuitive since these are cat-biased traits rarely observed in dog photos. Moreover, the histogram of eyeglasses classifier shows that the mutation on bushy eyebrows is more influential for flipping the model prediction. It potentially reveals the positional correlation between eyeglasses and bushy eyebrows. The advantage of single-attribute model diagnosis is that the score of each attribute in the histogram are independent from other attributes, enabling unambiguous understanding of exact semantics that make the model fail. Diagnosis results for additional target models can be found in Appendix B." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 284, + 243, + 297 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 284, + 243, + 297 + ], + "spans": [ + { + "bbox": [ + 47, + 284, + 243, + 297 + ], + "type": "text", + "content": "4.3. Validation of Visual Model Diagnosis" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 303, + 287, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 303, + 287, + 399 + ], + "spans": [ + { + "bbox": [ + 46, + 303, + 287, + 399 + ], + "type": "text", + "content": "Evaluating whether our zero-shot sensitivity histograms (Fig. 4) explain the true vulnerability is a difficult task, since we do not have access to a sufficiently large and balanced test set fully annotated in an open-vocabulary setting. To verify the performance, we create synthetically imbalanced cases where the model bias is known. We then compare our results with a supervised diagnosis setting [17]. In addition, we will validate the decoupling of the attributes by CLIP." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 409, + 213, + 421 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 409, + 213, + 421 + ], + "spans": [ + { + "bbox": [ + 47, + 409, + 213, + 421 + ], + "type": "text", + "content": "4.3.1 Creating imbalanced classifiers" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 426, + 287, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 426, + 287, + 664 + ], + "spans": [ + { + "bbox": [ + 46, + 426, + 287, + 664 + ], + "type": "text", + "content": "In order to evaluate whether our sensitivity histogram is correct, we train classifiers that are highly imbalanced towards a known attribute and see whether ZOOM can detect the sensitivity w.r.t the attribute. For instance, when training the perceived-age classifier (binarized as Young in CelebA), we created a dataset on which the trained classifier is strongly sensitive to Bangs (hair over forehead). The custom dataset is a CelebA training subset that consists of 20, 200 images. More specifically, there are 10,000 images that have both young people that have bangs, represented as (1, 1), and 10,000 images of people that are not young and have no bangs, represented as (0, 0). The remaining combinations of (1, 0) and (0, 1) have only 100 images. With this imbalanced dataset, bangs is the attribute that dominantly correlates with whether the person is young, and hence the perceived-age classifier would be highly sensitive towards bangs. See Fig. 5 (the right histograms) for an illustration of the sensitivity histogram computed by our method for the case of an age classifier with bangs (top) and lipstick (bottom) being imbalanced." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 665, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 665, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 665, + 287, + 713 + ], + "type": "text", + "content": "We trained multiple imbalanced classifiers with this methodology, and visualize the model diagnosis histograms of these imbalanced classifiers in Fig. 4b. We can observe that the ZOOM histograms successfully detect the" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 306, + 72, + 544, + 267 + ], + "blocks": [ + { + "bbox": [ + 306, + 72, + 544, + 267 + ], + "lines": [ + { + "bbox": [ + 306, + 72, + 544, + 267 + ], + "spans": [ + { + "bbox": [ + 306, + 72, + 544, + 267 + ], + "type": "image", + "image_path": "03c482988bb95c3fb2b913e7d04e7753f129604886046e43297446927fd39540.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 273, + 545, + 296 + ], + "lines": [ + { + "bbox": [ + 305, + 273, + 545, + 296 + ], + "spans": [ + { + "bbox": [ + 305, + 273, + 545, + 296 + ], + "type": "text", + "content": "Figure 5. The sensitivity of the age classifier is evaluated with ZOOM (right) and AttGAN (left), achieving comparable results." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 310, + 304, + 423, + 411 + ], + "blocks": [ + { + "bbox": [ + 310, + 304, + 423, + 411 + ], + "lines": [ + { + "bbox": [ + 310, + 304, + 423, + 411 + ], + "spans": [ + { + "bbox": [ + 310, + 304, + 423, + 411 + ], + "type": "image", + "image_path": "868104e74e862e5874b0fc8bdea2db78e9c3a4eddc9dca8eb7eac1b667dd688e.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 329, + 414, + 403, + 423 + ], + "lines": [ + { + "bbox": [ + 329, + 414, + 403, + 423 + ], + "spans": [ + { + "bbox": [ + 329, + 414, + 403, + 423 + ], + "type": "text", + "content": "(a) Mustache classifier" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 305, + 427, + 545, + 460 + ], + "lines": [ + { + "bbox": [ + 305, + 427, + 545, + 460 + ], + "spans": [ + { + "bbox": [ + 305, + 427, + 545, + 460 + ], + "type": "text", + "content": "Figure 6. Confusion matrix of CLIP score variation (vertical axis) when perturbing attributes (horizontal axis). This shows that attributes in ZOOM are highly decoupled." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 427, + 304, + 542, + 411 + ], + "blocks": [ + { + "bbox": [ + 427, + 304, + 542, + 411 + ], + "lines": [ + { + "bbox": [ + 427, + 304, + 542, + 411 + ], + "spans": [ + { + "bbox": [ + 427, + 304, + 542, + 411 + ], + "type": "image", + "image_path": "dcbaf559d890ab0d776db1dfd7c533b9d30df76634298628b22e1c7d2c277833.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 440, + 414, + 527, + 423 + ], + "lines": [ + { + "bbox": [ + 440, + 414, + 527, + 423 + ], + "spans": [ + { + "bbox": [ + 440, + 414, + 527, + 423 + ], + "type": "text", + "content": "(b) Perceived age classifier" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 472, + 545, + 497 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 472, + 545, + 497 + ], + "spans": [ + { + "bbox": [ + 305, + 472, + 545, + 497 + ], + "type": "text", + "content": "synthetically-made bias, which are shown as the highest bars in histograms. See the caption for more information." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 506, + 503, + 517 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 506, + 503, + 517 + ], + "spans": [ + { + "bbox": [ + 306, + 506, + 503, + 517 + ], + "type": "text", + "content": "4.3.2 Comparison with supervised diagnosis" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 521, + 545, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 521, + 545, + 677 + ], + "spans": [ + { + "bbox": [ + 304, + 521, + 545, + 677 + ], + "type": "text", + "content": "We also validated our histogram by comparing it with the case in which we have access to a generative model that has been explicitly trained to disentangle attributes. We follow the work on [17] and used AttGAN [6] trained on the CelebA training set over 15 attributes2. After the training converged, we performed adversarial learning in the attribute space of AttGAN and create a sensitivity histogram using the same approach as Sec. 3.4. Fig. 5 shows the result of this method on the perceived-age classifier which is made biased towards bangs. As anticipated, the AttGAN histogram (left) corroborates the histogram derived from our method (right). Interestingly, unlike ZOOM, AttGAN show less sensitivity to remaining attributes. This is likely" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 306, + 683, + 545, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 683, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 683, + 545, + 713 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 306, + 683, + 545, + 713 + ], + "type": "text", + "content": "Bald, Bangs, Black_Hair, Blond_Hair, Brown_Hair, Bushy_Eyesbrows, Eyeglasses, Male, Mouth_Slightly_Open, Mustache, No_Board, Pale_Skin, Young, Smiling, Wearing_Lipstick." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "11636" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 60, + 81, + 108, + 130 + ], + "blocks": [ + { + "bbox": [ + 50, + 94, + 59, + 177 + ], + "lines": [ + { + "bbox": [ + 50, + 94, + 59, + 177 + ], + "spans": [ + { + "bbox": [ + 50, + 94, + 59, + 177 + ], + "type": "text", + "content": "Counterfactual Original" + } + ] + } + ], + "index": 3, + "angle": 270, + "type": "image_caption" + }, + { + "bbox": [ + 60, + 81, + 108, + 130 + ], + "lines": [ + { + "bbox": [ + 60, + 81, + 108, + 130 + ], + "spans": [ + { + "bbox": [ + 60, + 81, + 108, + 130 + ], + "type": "image", + "image_path": "971f72c88bb26074e4f23622c9671dc69ad2c5c8ef52b1de0e3abff7f432ed84.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 60, + 131, + 108, + 177 + ], + "blocks": [ + { + "bbox": [ + 60, + 131, + 108, + 177 + ], + "lines": [ + { + "bbox": [ + 60, + 131, + 108, + 177 + ], + "spans": [ + { + "bbox": [ + 60, + 131, + 108, + 177 + ], + "type": "image", + "image_path": "59a77b820be44c6ef95e06c79ed435d4aea09f30e59a51a44334ce5918612f50.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 187, + 525, + 198 + ], + "lines": [ + { + "bbox": [ + 67, + 187, + 525, + 198 + ], + "spans": [ + { + "bbox": [ + 67, + 187, + 525, + 198 + ], + "type": "text", + "content": "Figure 7. Multi-attribute counterfactual in faces. The model probability is documented in the upper right corner of each image." + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 108, + 81, + 156, + 130 + ], + "blocks": [ + { + "bbox": [ + 108, + 81, + 156, + 130 + ], + "lines": [ + { + "bbox": [ + 108, + 81, + 156, + 130 + ], + "spans": [ + { + "bbox": [ + 108, + 81, + 156, + 130 + ], + "type": "image", + "image_path": "f9405d19790d274a6d1931829f8e4e575bd3f7cc28b5941c5c136c6e6fe50c9f.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 108, + 131, + 156, + 177 + ], + "blocks": [ + { + "bbox": [ + 108, + 131, + 156, + 177 + ], + "lines": [ + { + "bbox": [ + 108, + 131, + 156, + 177 + ], + "spans": [ + { + "bbox": [ + 108, + 131, + 156, + 177 + ], + "type": "image", + "image_path": "0b74918e51861880fb6780c140d8792c3dd7328306aa4b189e381e22a0959b59.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 157, + 81, + 204, + 130 + ], + "blocks": [ + { + "bbox": [ + 157, + 81, + 204, + 130 + ], + "lines": [ + { + "bbox": [ + 157, + 81, + 204, + 130 + ], + "spans": [ + { + "bbox": [ + 157, + 81, + 204, + 130 + ], + "type": "image", + "image_path": "62d9231115f02f5bfbba46a4a4e0d5568e7376856e2196457f2c2cadb092c30d.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 157, + 131, + 204, + 177 + ], + "blocks": [ + { + "bbox": [ + 157, + 131, + 204, + 177 + ], + "lines": [ + { + "bbox": [ + 157, + 131, + 204, + 177 + ], + "spans": [ + { + "bbox": [ + 157, + 131, + 204, + 177 + ], + "type": "image", + "image_path": "95c9e54324f64ecb0c2b49a24c3925de7336d3b45bf0e60163eb774073f3797e.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 205, + 81, + 252, + 130 + ], + "blocks": [ + { + "bbox": [ + 205, + 81, + 252, + 130 + ], + "lines": [ + { + "bbox": [ + 205, + 81, + 252, + 130 + ], + "spans": [ + { + "bbox": [ + 205, + 81, + 252, + 130 + ], + "type": "image", + "image_path": "35b5f6e20e6167a44791b6cb91234ca74c030f5e4c36171689c395c8065cd4e6.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 205, + 131, + 252, + 177 + ], + "blocks": [ + { + "bbox": [ + 205, + 131, + 252, + 177 + ], + "lines": [ + { + "bbox": [ + 205, + 131, + 252, + 177 + ], + "spans": [ + { + "bbox": [ + 205, + 131, + 252, + 177 + ], + "type": "image", + "image_path": "97e45ff9d147e87912d810e4d276a4c527a7d1ad64ea900907f4c91ed774a842.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 253, + 81, + 300, + 130 + ], + "blocks": [ + { + "bbox": [ + 253, + 81, + 300, + 130 + ], + "lines": [ + { + "bbox": [ + 253, + 81, + 300, + 130 + ], + "spans": [ + { + "bbox": [ + 253, + 81, + 300, + 130 + ], + "type": "image", + "image_path": "da9051d7fff8dc80e0c62ad7ee8bb1f22aee7c66ceee600246b30ecd46b68ec9.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 253, + 131, + 300, + 177 + ], + "blocks": [ + { + "bbox": [ + 253, + 131, + 300, + 177 + ], + "lines": [ + { + "bbox": [ + 253, + 131, + 300, + 177 + ], + "spans": [ + { + "bbox": [ + 253, + 131, + 300, + 177 + ], + "type": "image", + "image_path": "fb66c5f6fd296b219f4e39383cf0d5e8611263f518e77b88dcc07bac8524b5fb.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 301, + 81, + 349, + 130 + ], + "blocks": [ + { + "bbox": [ + 301, + 81, + 349, + 130 + ], + "lines": [ + { + "bbox": [ + 301, + 81, + 349, + 130 + ], + "spans": [ + { + "bbox": [ + 301, + 81, + 349, + 130 + ], + "type": "image", + "image_path": "64ca890b2eb603a0c45fbe34384d65cb3f74b4fb1c434434a634b531abed726f.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 301, + 131, + 349, + 177 + ], + "blocks": [ + { + "bbox": [ + 301, + 131, + 349, + 177 + ], + "lines": [ + { + "bbox": [ + 301, + 131, + 349, + 177 + ], + "spans": [ + { + "bbox": [ + 301, + 131, + 349, + 177 + ], + "type": "image", + "image_path": "75a69bd63b6bb345d672a5f742d694288aff8578f206e84032f24c69b9a9b894.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 350, + 81, + 398, + 130 + ], + "blocks": [ + { + "bbox": [ + 350, + 81, + 398, + 130 + ], + "lines": [ + { + "bbox": [ + 350, + 81, + 398, + 130 + ], + "spans": [ + { + "bbox": [ + 350, + 81, + 398, + 130 + ], + "type": "image", + "image_path": "ca61c210042de2fc0e449802d99fb5d4d50854952d4b0d1c3b744eeb1bc247eb.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 350, + 131, + 398, + 177 + ], + "blocks": [ + { + "bbox": [ + 350, + 131, + 398, + 177 + ], + "lines": [ + { + "bbox": [ + 350, + 131, + 398, + 177 + ], + "spans": [ + { + "bbox": [ + 350, + 131, + 398, + 177 + ], + "type": "image", + "image_path": "946eed3c0bbd26dbe64a2cfba69b326b2847fd1f8cba97cbe6ac25e8917ff046.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 399, + 81, + 447, + 130 + ], + "blocks": [ + { + "bbox": [ + 399, + 81, + 447, + 130 + ], + "lines": [ + { + "bbox": [ + 399, + 81, + 447, + 130 + ], + "spans": [ + { + "bbox": [ + 399, + 81, + 447, + 130 + ], + "type": "image", + "image_path": "d123ff6aac734384ccd47ae0c44a41de25a370497a5d42d8c5f027dda49c9db5.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 399, + 131, + 447, + 177 + ], + "blocks": [ + { + "bbox": [ + 399, + 131, + 447, + 177 + ], + "lines": [ + { + "bbox": [ + 399, + 131, + 447, + 177 + ], + "spans": [ + { + "bbox": [ + 399, + 131, + 447, + 177 + ], + "type": "image", + "image_path": "0ef9843e612c9e58dda93b42683eadb3af0fd3f8500c8a006c5c82c974bc2501.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 448, + 81, + 494, + 130 + ], + "blocks": [ + { + "bbox": [ + 448, + 81, + 494, + 130 + ], + "lines": [ + { + "bbox": [ + 448, + 81, + 494, + 130 + ], + "spans": [ + { + "bbox": [ + 448, + 81, + 494, + 130 + ], + "type": "image", + "image_path": "117f9d63d2885bbdb0be3707734cbab387bc97bca6cd5b6a8ae619f0b9996fa3.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 448, + 131, + 494, + 177 + ], + "blocks": [ + { + "bbox": [ + 448, + 131, + 494, + 177 + ], + "lines": [ + { + "bbox": [ + 448, + 131, + 494, + 177 + ], + "spans": [ + { + "bbox": [ + 448, + 131, + 494, + 177 + ], + "type": "image", + "image_path": "f48d716b2acde35b9f94c46749523a8fb01e8260331bb184c25132e442f4c22e.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 496, + 81, + 543, + 130 + ], + "blocks": [ + { + "bbox": [ + 496, + 81, + 543, + 130 + ], + "lines": [ + { + "bbox": [ + 496, + 81, + 543, + 130 + ], + "spans": [ + { + "bbox": [ + 496, + 81, + 543, + 130 + ], + "type": "image", + "image_path": "c909bca5f6f918435a78f23e7f3cbf31b341cf4d888a1eecd8568c29216efa66.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 496, + 131, + 543, + 177 + ], + "blocks": [ + { + "bbox": [ + 496, + 131, + 543, + 177 + ], + "lines": [ + { + "bbox": [ + 496, + 131, + 543, + 177 + ], + "spans": [ + { + "bbox": [ + 496, + 131, + 543, + 177 + ], + "type": "image", + "image_path": "2e4a4d43d388288a2a74b5386a43481c22c06d07edd49bac51178d01e8a5a1bf.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + } + ], + "index": 23 + }, + { + "bbox": [ + 46, + 209, + 286, + 292 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 209, + 286, + 292 + ], + "spans": [ + { + "bbox": [ + 46, + 209, + 286, + 292 + ], + "type": "text", + "content": "because AttGAN has a latent space learned in a supervised manner and hence attributes are better disentangled than with StyleGAN. Note that AttGAN is trained with a fixed set of attributes; if a new attribute of interest is introduced, the dataset needs to be re-labeled and AttGAN retrained. ZOOM, however, merely calls for the addition of a new text prompt. More results in Appendix B." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 47, + 301, + 253, + 312 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 301, + 253, + 312 + ], + "spans": [ + { + "bbox": [ + 47, + 301, + 253, + 312 + ], + "type": "text", + "content": "4.3.3 Measuring disentanglement of attributes" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 46, + 317, + 286, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 317, + 286, + 376 + ], + "spans": [ + { + "bbox": [ + 46, + 317, + 286, + 376 + ], + "type": "text", + "content": "Previous works demonstrated that the StyleGAN's latent space can be entangled [2, 27], adding undesired dependencies when searching single-attribute counterfactuals. This section verifies that our framework can disentangle the attributes and mostly edit the desirable attributes." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 46, + 377, + 286, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 377, + 286, + 567 + ], + "spans": [ + { + "bbox": [ + 46, + 377, + 286, + 567 + ], + "type": "text", + "content": "We use CLIP as a super annotator to measure attribute changes during single-attribute modifications. For 1,000 images, we record the attribute change after performing adversarial learning in each attribute, and average the attribute score change. Fig. 6 shows the confusion matrix during single-attribute counterfactual synthesis. The horizontal axis is the attribute being edited during the optimization, and the vertical axis represents the CLIP prediction changed by the process. For instance, the first column of Fig. 6a is generated when we optimize over bangs for the mustache classifier. We record the CLIP prediction variation. It clearly shows that bangs is the dominant attribute changing during the optimization. From the main diagonal of matrices, it is evident that the ZOOM mostly perturbs the attribute of interest. The results indicate reasonable disentanglement among attributes." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 47, + 575, + 264, + 587 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 575, + 264, + 587 + ], + "spans": [ + { + "bbox": [ + 47, + 575, + 264, + 587 + ], + "type": "text", + "content": "4.4. Visual Model Diagnosis: Multi-Attributes" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 46, + 594, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 594, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 594, + 286, + 713 + ], + "type": "text", + "content": "In the previous sections, we have visualized and validated single-attribute model diagnosis histograms and counterfactual images. In this section, we will assess ZOOM's ability to produce counterfactual images by concurrently exploring multiple attributes within " + }, + { + "bbox": [ + 46, + 594, + 286, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 46, + 594, + 286, + 713 + ], + "type": "text", + "content": ", the domain of user-defined attributes. The approach conducts multi-attribute counterfactual searches across various edit directions, identifying distinct semantic combinations that result in the target model's failure. By doing so, we can effectively create more powerful counterfactuals images (see Fig. 9)." + } + ] + } + ], + "index": 30 + }, + { + "type": "image", + "bbox": [ + 308, + 215, + 353, + 251 + ], + "blocks": [ + { + "bbox": [ + 308, + 215, + 353, + 251 + ], + "lines": [ + { + "bbox": [ + 308, + 215, + 353, + 251 + ], + "spans": [ + { + "bbox": [ + 308, + 215, + 353, + 251 + ], + "type": "image", + "image_path": "d63f194e849545ee4d327760565ffb442da574671e4f852701c57f64f75ccd06.jpg" + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_body" + } + ], + "index": 32 + }, + { + "type": "image", + "bbox": [ + 308, + 251, + 353, + 289 + ], + "blocks": [ + { + "bbox": [ + 308, + 251, + 353, + 289 + ], + "lines": [ + { + "bbox": [ + 308, + 251, + 353, + 289 + ], + "spans": [ + { + "bbox": [ + 308, + 251, + 353, + 289 + ], + "type": "image", + "image_path": "2a8b19bda7e3c098f3b7515ecb4c437e82aff219568f13b46ca980369ed954b0.jpg" + } + ] + } + ], + "index": 33, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 306, + 298, + 545, + 320 + ], + "lines": [ + { + "bbox": [ + 306, + 298, + 545, + 320 + ], + "spans": [ + { + "bbox": [ + 306, + 298, + 545, + 320 + ], + "type": "text", + "content": "Figure 8. Multi-attribute counterfactual on Cat/Dog classifier. The number in each image is the predicted probability of being a dog." + } + ] + } + ], + "index": 44, + "angle": 0, + "type": "image_caption" + } + ], + "index": 33 + }, + { + "type": "image", + "bbox": [ + 354, + 215, + 392, + 251 + ], + "blocks": [ + { + "bbox": [ + 386, + 206, + 474, + 214 + ], + "lines": [ + { + "bbox": [ + 386, + 206, + 474, + 214 + ], + "spans": [ + { + "bbox": [ + 386, + 206, + 474, + 214 + ], + "type": "text", + "content": "Cat / Dog Classifier (0-Cat / 1-Dog)" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 354, + 215, + 392, + 251 + ], + "lines": [ + { + "bbox": [ + 354, + 215, + 392, + 251 + ], + "spans": [ + { + "bbox": [ + 354, + 215, + 392, + 251 + ], + "type": "image", + "image_path": "97575becca7c2b0a58f05e7bd3efea1ce8f805407f9a9a8bc5b0b8fa133de821.jpg" + } + ] + } + ], + "index": 34, + "angle": 0, + "type": "image_body" + } + ], + "index": 34 + }, + { + "type": "image", + "bbox": [ + 354, + 252, + 392, + 289 + ], + "blocks": [ + { + "bbox": [ + 354, + 252, + 392, + 289 + ], + "lines": [ + { + "bbox": [ + 354, + 252, + 392, + 289 + ], + "spans": [ + { + "bbox": [ + 354, + 252, + 392, + 289 + ], + "type": "image", + "image_path": "9f015960384356c923f938a96ad155d7920e2f40848154f6927b5ca61d80c9da.jpg" + } + ] + } + ], + "index": 35, + "angle": 0, + "type": "image_body" + } + ], + "index": 35 + }, + { + "type": "image", + "bbox": [ + 392, + 215, + 430, + 251 + ], + "blocks": [ + { + "bbox": [ + 392, + 215, + 430, + 251 + ], + "lines": [ + { + "bbox": [ + 392, + 215, + 430, + 251 + ], + "spans": [ + { + "bbox": [ + 392, + 215, + 430, + 251 + ], + "type": "image", + "image_path": "9dc79a7da6f29d71c72a5adf20673275f04537bf9eea485fde0a25d54eae61fd.jpg" + } + ] + } + ], + "index": 36, + "angle": 0, + "type": "image_body" + } + ], + "index": 36 + }, + { + "type": "image", + "bbox": [ + 392, + 252, + 430, + 289 + ], + "blocks": [ + { + "bbox": [ + 392, + 252, + 430, + 289 + ], + "lines": [ + { + "bbox": [ + 392, + 252, + 430, + 289 + ], + "spans": [ + { + "bbox": [ + 392, + 252, + 430, + 289 + ], + "type": "image", + "image_path": "b0ef6e22515f9fbe3fe524912b72ea323ef911ac09d0c03ab0e227513c25dffc.jpg" + } + ] + } + ], + "index": 37, + "angle": 0, + "type": "image_body" + } + ], + "index": 37 + }, + { + "type": "image", + "bbox": [ + 430, + 215, + 468, + 251 + ], + "blocks": [ + { + "bbox": [ + 430, + 215, + 468, + 251 + ], + "lines": [ + { + "bbox": [ + 430, + 215, + 468, + 251 + ], + "spans": [ + { + "bbox": [ + 430, + 215, + 468, + 251 + ], + "type": "image", + "image_path": "eacf43272b02d9d8200813f9d64795ddfee3402d5fde9196f88ee01631e050d5.jpg" + } + ] + } + ], + "index": 38, + "angle": 0, + "type": "image_body" + } + ], + "index": 38 + }, + { + "type": "image", + "bbox": [ + 430, + 252, + 468, + 289 + ], + "blocks": [ + { + "bbox": [ + 430, + 252, + 468, + 289 + ], + "lines": [ + { + "bbox": [ + 430, + 252, + 468, + 289 + ], + "spans": [ + { + "bbox": [ + 430, + 252, + 468, + 289 + ], + "type": "image", + "image_path": "00c3b77821922449ec954cf2a408c8a712da33871a82ab4f911ece894aeb4d58.jpg" + } + ] + } + ], + "index": 39, + "angle": 0, + "type": "image_body" + } + ], + "index": 39 + }, + { + "type": "image", + "bbox": [ + 468, + 215, + 506, + 251 + ], + "blocks": [ + { + "bbox": [ + 468, + 215, + 506, + 251 + ], + "lines": [ + { + "bbox": [ + 468, + 215, + 506, + 251 + ], + "spans": [ + { + "bbox": [ + 468, + 215, + 506, + 251 + ], + "type": "image", + "image_path": "73ef0f2cf59bc1d4b27d02b70df889a8d0b602bd88ce7e79c4b3b4e6e43cb0e9.jpg" + } + ] + } + ], + "index": 40, + "angle": 0, + "type": "image_body" + } + ], + "index": 40 + }, + { + "type": "image", + "bbox": [ + 468, + 252, + 506, + 289 + ], + "blocks": [ + { + "bbox": [ + 468, + 252, + 506, + 289 + ], + "lines": [ + { + "bbox": [ + 468, + 252, + 506, + 289 + ], + "spans": [ + { + "bbox": [ + 468, + 252, + 506, + 289 + ], + "type": "image", + "image_path": "44bba462e0e42241c95319ec2b01d11c8a422c41b0ce523d8f3136a9bc721633.jpg" + } + ] + } + ], + "index": 41, + "angle": 0, + "type": "image_body" + } + ], + "index": 41 + }, + { + "type": "image", + "bbox": [ + 507, + 215, + 543, + 251 + ], + "blocks": [ + { + "bbox": [ + 507, + 215, + 543, + 251 + ], + "lines": [ + { + "bbox": [ + 507, + 215, + 543, + 251 + ], + "spans": [ + { + "bbox": [ + 507, + 215, + 543, + 251 + ], + "type": "image", + "image_path": "93e5220bf68cbf225c272fed33def41a45c1fad35b0f715590fc34da455c23ed.jpg" + } + ] + } + ], + "index": 42, + "angle": 0, + "type": "image_body" + } + ], + "index": 42 + }, + { + "type": "image", + "bbox": [ + 507, + 252, + 543, + 289 + ], + "blocks": [ + { + "bbox": [ + 507, + 252, + 543, + 289 + ], + "lines": [ + { + "bbox": [ + 507, + 252, + 543, + 289 + ], + "spans": [ + { + "bbox": [ + 507, + 252, + 543, + 289 + ], + "type": "image", + "image_path": "b8335552498ed460f4d22b2fd05b6d4609a5b26d9c180d8d4bc002a607f780d4.jpg" + } + ] + } + ], + "index": 43, + "angle": 0, + "type": "image_body" + } + ], + "index": 43 + }, + { + "type": "image", + "bbox": [ + 308, + 332, + 355, + 378 + ], + "blocks": [ + { + "bbox": [ + 308, + 332, + 355, + 378 + ], + "lines": [ + { + "bbox": [ + 308, + 332, + 355, + 378 + ], + "spans": [ + { + "bbox": [ + 308, + 332, + 355, + 378 + ], + "type": "image", + "image_path": "5d137f7d3aa9d683b7868c27d6b50d55214821f413f2a649b34ec317e8ccc450.jpg" + } + ] + } + ], + "index": 45, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 310, + 380, + 353, + 386 + ], + "lines": [ + { + "bbox": [ + 310, + 380, + 353, + 386 + ], + "spans": [ + { + "bbox": [ + 310, + 380, + 353, + 386 + ], + "type": "text", + "content": "Original Reference" + } + ] + } + ], + "index": 46, + "angle": 0, + "type": "image_caption" + } + ], + "index": 45 + }, + { + "type": "image", + "bbox": [ + 356, + 332, + 402, + 378 + ], + "blocks": [ + { + "bbox": [ + 356, + 332, + 402, + 378 + ], + "lines": [ + { + "bbox": [ + 356, + 332, + 402, + 378 + ], + "spans": [ + { + "bbox": [ + 356, + 332, + 402, + 378 + ], + "type": "image", + "image_path": "cd94d5ea6748597fb9c03d4f85d7fe91a324a817224d3587d29b461780e9954f.jpg" + } + ] + } + ], + "index": 47, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 362, + 380, + 395, + 386 + ], + "lines": [ + { + "bbox": [ + 362, + 380, + 395, + 386 + ], + "spans": [ + { + "bbox": [ + 362, + 380, + 395, + 386 + ], + "type": "text", + "content": "SAC by Beard" + } + ] + } + ], + "index": 48, + "angle": 0, + "type": "image_caption" + } + ], + "index": 47 + }, + { + "type": "image", + "bbox": [ + 403, + 332, + 449, + 378 + ], + "blocks": [ + { + "bbox": [ + 403, + 332, + 449, + 378 + ], + "lines": [ + { + "bbox": [ + 403, + 332, + 449, + 378 + ], + "spans": [ + { + "bbox": [ + 403, + 332, + 449, + 378 + ], + "type": "image", + "image_path": "ef8f24a423832c377b502751ea1a6ec5c02870f6c9bf6f57ca957e36e35ccf1e.jpg" + } + ] + } + ], + "index": 49, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 406, + 380, + 447, + 386 + ], + "lines": [ + { + "bbox": [ + 406, + 380, + 447, + 386 + ], + "spans": [ + { + "bbox": [ + 406, + 380, + 447, + 386 + ], + "type": "text", + "content": "SAC by Pale Skin" + } + ] + } + ], + "index": 50, + "angle": 0, + "type": "image_caption" + } + ], + "index": 49 + }, + { + "type": "image", + "bbox": [ + 450, + 332, + 497, + 378 + ], + "blocks": [ + { + "bbox": [ + 450, + 332, + 497, + 378 + ], + "lines": [ + { + "bbox": [ + 450, + 332, + 497, + 378 + ], + "spans": [ + { + "bbox": [ + 450, + 332, + 497, + 378 + ], + "type": "image", + "image_path": "14f558865c5e0601cdaa01a946868a1455678b57180ec80975d4ed1ececa0a91.jpg" + } + ] + } + ], + "index": 51, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 452, + 380, + 495, + 386 + ], + "lines": [ + { + "bbox": [ + 452, + 380, + 495, + 386 + ], + "spans": [ + { + "bbox": [ + 452, + 380, + 495, + 386 + ], + "type": "text", + "content": "SAC by Black Hair" + } + ] + } + ], + "index": 52, + "angle": 0, + "type": "image_caption" + } + ], + "index": 51 + }, + { + "type": "image", + "bbox": [ + 498, + 332, + 545, + 378 + ], + "blocks": [ + { + "bbox": [ + 498, + 332, + 545, + 378 + ], + "lines": [ + { + "bbox": [ + 498, + 332, + 545, + 378 + ], + "spans": [ + { + "bbox": [ + 498, + 332, + 545, + 378 + ], + "type": "image", + "image_path": "d503aca254f8fbd0ba1ddfc316957c35f64f0e256656c8f21634b487f29c57bd.jpg" + } + ] + } + ], + "index": 53, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 501, + 380, + 542, + 386 + ], + "lines": [ + { + "bbox": [ + 501, + 380, + 542, + 386 + ], + "spans": [ + { + "bbox": [ + 501, + 380, + 542, + 386 + ], + "type": "text", + "content": "Multiple-Attribute" + } + ] + } + ], + "index": 54, + "angle": 0, + "type": "image_caption" + } + ], + "index": 53 + }, + { + "type": "image", + "bbox": [ + 308, + 392, + 355, + 438 + ], + "blocks": [ + { + "bbox": [ + 308, + 392, + 355, + 438 + ], + "lines": [ + { + "bbox": [ + 308, + 392, + 355, + 438 + ], + "spans": [ + { + "bbox": [ + 308, + 392, + 355, + 438 + ], + "type": "image", + "image_path": "4175e75073b501fe5f39001245d583575431f4cbce9915be62cc70647315c29c.jpg" + } + ] + } + ], + "index": 55, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 310, + 440, + 353, + 447 + ], + "lines": [ + { + "bbox": [ + 310, + 440, + 353, + 447 + ], + "spans": [ + { + "bbox": [ + 310, + 440, + 353, + 447 + ], + "type": "text", + "content": "Original Reference" + } + ] + } + ], + "index": 56, + "angle": 0, + "type": "image_caption" + } + ], + "index": 55 + }, + { + "type": "image", + "bbox": [ + 356, + 392, + 402, + 438 + ], + "blocks": [ + { + "bbox": [ + 356, + 392, + 402, + 438 + ], + "lines": [ + { + "bbox": [ + 356, + 392, + 402, + 438 + ], + "spans": [ + { + "bbox": [ + 356, + 392, + 402, + 438 + ], + "type": "image", + "image_path": "5d65bb23276b3a3c12c60e02c786251d3ba8fc8c89d311fea013e13ecf6352fa.jpg" + } + ] + } + ], + "index": 57, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 358, + 440, + 400, + 447 + ], + "lines": [ + { + "bbox": [ + 358, + 440, + 400, + 447 + ], + "spans": [ + { + "bbox": [ + 358, + 440, + 400, + 447 + ], + "type": "text", + "content": "SAC by Lips Color" + } + ] + } + ], + "index": 58, + "angle": 0, + "type": "image_caption" + } + ], + "index": 57 + }, + { + "type": "image", + "bbox": [ + 403, + 392, + 450, + 438 + ], + "blocks": [ + { + "bbox": [ + 403, + 392, + 450, + 438 + ], + "lines": [ + { + "bbox": [ + 403, + 392, + 450, + 438 + ], + "spans": [ + { + "bbox": [ + 403, + 392, + 450, + 438 + ], + "type": "image", + "image_path": "3fa0538d80ef39f3390920970221e72096153f9028e5e434697ae973a6488b8f.jpg" + } + ] + } + ], + "index": 59, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 408, + 440, + 444, + 447 + ], + "lines": [ + { + "bbox": [ + 408, + 440, + 444, + 447 + ], + "spans": [ + { + "bbox": [ + 408, + 440, + 444, + 447 + ], + "type": "text", + "content": "SAC by Smiling" + } + ] + } + ], + "index": 60, + "angle": 0, + "type": "image_caption" + } + ], + "index": 59 + }, + { + "type": "image", + "bbox": [ + 450, + 392, + 497, + 438 + ], + "blocks": [ + { + "bbox": [ + 450, + 392, + 497, + 438 + ], + "lines": [ + { + "bbox": [ + 450, + 392, + 497, + 438 + ], + "spans": [ + { + "bbox": [ + 450, + 392, + 497, + 438 + ], + "type": "image", + "image_path": "1fa5c86fcdf03b6475a88a8351d7ebf59208fccfc5f637b753b80fc3793cd0d7.jpg" + } + ] + } + ], + "index": 61, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 457, + 440, + 490, + 447 + ], + "lines": [ + { + "bbox": [ + 457, + 440, + 490, + 447 + ], + "spans": [ + { + "bbox": [ + 457, + 440, + 490, + 447 + ], + "type": "text", + "content": "SAC by Bangs" + } + ] + } + ], + "index": 62, + "angle": 0, + "type": "image_caption" + } + ], + "index": 61 + }, + { + "type": "image", + "bbox": [ + 498, + 392, + 545, + 438 + ], + "blocks": [ + { + "bbox": [ + 498, + 392, + 545, + 438 + ], + "lines": [ + { + "bbox": [ + 498, + 392, + 545, + 438 + ], + "spans": [ + { + "bbox": [ + 498, + 392, + 545, + 438 + ], + "type": "image", + "image_path": "06ab8a56e1dd662eb7212a310fc18e69a77dde02e44ad7df8ead56fe5f2145de.jpg" + } + ] + } + ], + "index": 63, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 501, + 440, + 539, + 447 + ], + "lines": [ + { + "bbox": [ + 501, + 440, + 539, + 447 + ], + "spans": [ + { + "bbox": [ + 501, + 440, + 539, + 447 + ], + "type": "text", + "content": "Multiple-Attribute" + } + ] + } + ], + "index": 64, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 306, + 453, + 545, + 497 + ], + "lines": [ + { + "bbox": [ + 306, + 453, + 545, + 497 + ], + "spans": [ + { + "bbox": [ + 306, + 453, + 545, + 497 + ], + "type": "text", + "content": "Figure 9. Multiple-Attribute Counterfactual (MAC, Sec. 4.4) compared with Single-Attribute Counterfactual (SAC, Sec. 4.2). We can see that optimization along multiple directions enable the generation of more powerful counterfactuals." + } + ] + } + ], + "index": 65, + "angle": 0, + "type": "image_caption" + } + ], + "index": 63 + }, + { + "bbox": [ + 304, + 514, + 545, + 657 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 514, + 545, + 657 + ], + "spans": [ + { + "bbox": [ + 304, + 514, + 545, + 657 + ], + "type": "text", + "content": "Fig. 7 and Fig. 8 show examples of multi-attribute counterfactual images generated by ZOOM, against human and animal face classifiers. It can be observed that multiple face attributes such as lipsticks or hair color are edited in Fig. 7, and various cat/dog attributes like nose pinkness, eye shape, and fur patterns are edited in Fig. 8. These attribute edits are blended to affect the target model prediction. Appendix B further illustrates ZOOM counterfactual images for semantic segmentation, multi-class classification, and a church classifier. By mutating semantic representations, ZOOM reveals semantic combinations as outliers where the target model underfits." + } + ] + } + ], + "index": 66 + }, + { + "bbox": [ + 304, + 660, + 545, + 719 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 660, + 545, + 719 + ], + "spans": [ + { + "bbox": [ + 304, + 660, + 545, + 719 + ], + "type": "text", + "content": "In the following sections, we will use the Flip Rate (the percentage of counterfactuals that flipped the model prediction) and Flip Resistance (the percentage of counterfactuals for which the model successfully withheld its prediction) to evaluate the multi-attribute setting." + } + ] + } + ], + "index": 67 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 73, + 71, + 242, + 79 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 71, + 242, + 79 + ], + "spans": [ + { + "bbox": [ + 73, + 71, + 242, + 79 + ], + "type": "text", + "content": "Eyeglasses Classifier (0-No Eyeglasses / 1-Eyeglasses)" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 254, + 71, + 397, + 79 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 254, + 71, + 397, + 79 + ], + "spans": [ + { + "bbox": [ + 254, + 71, + 397, + 79 + ], + "type": "text", + "content": "Perceived Age Classifier (0-Senior / 1-Young)" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 415, + 71, + 526, + 79 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 415, + 71, + 526, + 79 + ], + "spans": [ + { + "bbox": [ + 415, + 71, + 526, + 79 + ], + "type": "text", + "content": "Facial Keypoint Detector (WFLW)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "11637" + } + ] + } + ], + "index": 68 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 47, + 69, + 166, + 149 + ], + "blocks": [ + { + "bbox": [ + 47, + 69, + 166, + 149 + ], + "lines": [ + { + "bbox": [ + 47, + 69, + 166, + 149 + ], + "spans": [ + { + "bbox": [ + 47, + 69, + 166, + 149 + ], + "type": "image", + "image_path": "3a31fe81e15c74b575c6093e46f0834299f52fbd05a5297a4af77ec9e568073e.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 48, + 151, + 286, + 161 + ], + "lines": [ + { + "bbox": [ + 48, + 151, + 286, + 161 + ], + "spans": [ + { + "bbox": [ + 48, + 151, + 286, + 161 + ], + "type": "text", + "content": "(a) Sensitivity histograms generated by ZOOM on attribute combinations." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 167, + 69, + 286, + 149 + ], + "blocks": [ + { + "bbox": [ + 167, + 69, + 286, + 149 + ], + "lines": [ + { + "bbox": [ + 167, + 69, + 286, + 149 + ], + "spans": [ + { + "bbox": [ + 167, + 69, + 286, + 149 + ], + "type": "image", + "image_path": "b0c505f7748d04f811c5d12aed9bbf43c879e1b7ff0a3273c81dba77b6e38c5b.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 47, + 162, + 284, + 228 + ], + "blocks": [ + { + "bbox": [ + 47, + 162, + 284, + 228 + ], + "lines": [ + { + "bbox": [ + 47, + 162, + 284, + 228 + ], + "spans": [ + { + "bbox": [ + 47, + 162, + 284, + 228 + ], + "type": "image", + "image_path": "0801f88cca854414287012d32b85e15604c661f7e8ca63dfc868dc3e24766f24.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 47, + 229, + 286, + 249 + ], + "lines": [ + { + "bbox": [ + 47, + 229, + 286, + 249 + ], + "spans": [ + { + "bbox": [ + 47, + 229, + 286, + 249 + ], + "type": "text", + "content": "(b) Model diagnosis by ZOOM over 19 attributes. Our framework is generalizable to analyze facial attributes of various domains." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 73, + 254, + 260, + 265 + ], + "lines": [ + { + "bbox": [ + 73, + 254, + 260, + 265 + ], + "spans": [ + { + "bbox": [ + 73, + 254, + 260, + 265 + ], + "type": "text", + "content": "Figure 10. Customizing attribute space for ZOOM." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 275, + 200, + 288 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 275, + 200, + 288 + ], + "spans": [ + { + "bbox": [ + 47, + 275, + 200, + 288 + ], + "type": "text", + "content": "4.4.1 Customizing attribute space" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 289, + 287, + 492 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 289, + 287, + 492 + ], + "spans": [ + { + "bbox": [ + 46, + 289, + 287, + 492 + ], + "type": "text", + "content": "In some circumstances, users may finish one round of model diagnosis and proceed to another round by adding new attributes, or trying a new attribute space. The linear nature of attribute editing (Eq. 3) in ZOOM makes it possible to easily add or remove attributes. Table 1 shows the flip rates results when adding new attributes into " + }, + { + "bbox": [ + 46, + 289, + 287, + 492 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 46, + 289, + 287, + 492 + ], + "type": "text", + "content": " for perceived age classifier and big lips classifier. We can observe that a different attribute space will result in different effectiveness of counterfactual images. Also, increasing the search iteration will make counterfactual more effective (see last row). Note that neither re-training the StyleGAN nor user-collection/labeling of data is required at any point in this procedure. Moreover, Fig. 10a shows the model diagnosis histograms generated with combinations of two attributes. Fig. 10b demonstrates the capability of ZOOM in a rich vocabulary setting where we can analyze attributes that are not labeled in existing datasets [16, 29]." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 497, + 212, + 509 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 497, + 212, + 509 + ], + "spans": [ + { + "bbox": [ + 47, + 497, + 212, + 509 + ], + "type": "text", + "content": "4.4.2 Counterfactual training results" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 514, + 287, + 716 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 514, + 287, + 716 + ], + "spans": [ + { + "bbox": [ + 46, + 514, + 287, + 716 + ], + "type": "text", + "content": "This section evaluates regular classifiers trained on CelebA [16] and counterfactually-trained (CT) classifiers on a mix of CelebA data and counterfactual images as described in Sec. 3.5. Table 2 presents accuracy and flip resistance (FR) results. CT outperforms the regular classifier. FR is assessed over 10,000 counterfactual images, with FR-25 and FR-100 denoting Flip Resistance after 25 and 100 optimization iterations, respectively. Both use " + }, + { + "bbox": [ + 46, + 514, + 287, + 716 + ], + "type": "inline_equation", + "content": "\\eta = 0.2" + }, + { + "bbox": [ + 46, + 514, + 287, + 716 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 514, + 287, + 716 + ], + "type": "inline_equation", + "content": "\\epsilon = 30" + }, + { + "bbox": [ + 46, + 514, + 287, + 716 + ], + "type": "text", + "content": ". We can observe that the classifiers after CT are way less likely to be flipped by counterfactual images while maintaining a decent accuracy on the CalebA testset. Our approach robustifies the model by increasing the tolerance toward counterfactuals. Note that CT slightly improves the CelebA classifier when trained on a mixture of CelebA images (original images) and the counterfactual images generated with a generative model trained in the FFHQ [12] images (different domain)." + } + ] + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 307, + 70, + 544, + 140 + ], + "blocks": [ + { + "bbox": [ + 307, + 70, + 544, + 140 + ], + "lines": [ + { + "bbox": [ + 307, + 70, + 544, + 140 + ], + "spans": [ + { + "bbox": [ + 307, + 70, + 544, + 140 + ], + "type": "table", + "html": "
MethodAC Flip Rate (%)BC Flip Rate (%)
Initialize ZOOM by A61.9583.47
+ Attribute: Beard72.0890.07
+ Attribute: Smiling87.4796.27
+ Attribute: Lipstick90.9694.07
+ Iterations increased to 20092.9194.87
", + "image_path": "4427e736fac19a282ea9a3abd30cb3b7bf9f5849ac3eecf1d3b8a74b1709f21c.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "type": "table", + "bbox": [ + 307, + 186, + 547, + 270 + ], + "blocks": [ + { + "bbox": [ + 306, + 148, + 545, + 182 + ], + "lines": [ + { + "bbox": [ + 306, + 148, + 545, + 182 + ], + "spans": [ + { + "bbox": [ + 306, + 148, + 545, + 182 + ], + "type": "text", + "content": "Table 1. Model flip rate study. The initial attribute space " + }, + { + "bbox": [ + 306, + 148, + 545, + 182 + ], + "type": "inline_equation", + "content": "\\mathcal{A} =" + }, + { + "bbox": [ + 306, + 148, + 545, + 182 + ], + "type": "text", + "content": " {Bangs, Blond Hair, Bushy Eyebrows, Pale Skin, Pointy Nose}. AC is the perceived age classifier and BC is the big lips classifier." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 307, + 186, + 547, + 270 + ], + "lines": [ + { + "bbox": [ + 307, + 186, + 547, + 270 + ], + "spans": [ + { + "bbox": [ + 307, + 186, + 547, + 270 + ], + "type": "table", + "html": "
AttributeMetricRegular (%)CT (Ours) (%)
Perceived AgeCelebA Accuracy86.1086.29
ZOOM FR-2519.5497.36
ZOOM FR-1009.0495.65
Big LipsCelebA Accuracy74.3675.39
ZOOM FR-2514.1299.19
ZOOM FR-1005.9388.91
", + "image_path": "019909ecbdeabb30964a6186068d59ea0fd09e455a4976227c0994ceed8f2d1e.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_body" + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 278, + 545, + 323 + ], + "lines": [ + { + "bbox": [ + 305, + 278, + 545, + 323 + ], + "spans": [ + { + "bbox": [ + 305, + 278, + 545, + 323 + ], + "type": "text", + "content": "Table 2. Results of network inference on CelebA original images and ZOOM-generated counterfactual. The CT classifier is significantly less prone to be flipped by counterfactual images, while test accuracy on CelebA remains performant." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 306, + 327, + 457, + 339 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 327, + 457, + 339 + ], + "spans": [ + { + "bbox": [ + 306, + 327, + 457, + 339 + ], + "type": "text", + "content": "5. Conclusion and Discussion" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 342, + 545, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 342, + 545, + 460 + ], + "spans": [ + { + "bbox": [ + 304, + 342, + 545, + 460 + ], + "type": "text", + "content": "In this paper, we present ZOOM, a zero-shot model diagnosis framework that generates sensitivity histograms based on user's input of natural language attributes. ZOOM assesses failures and generates corresponding sensitivity histograms for each attribute. A significant advantage of our technique is its ability to analyze the failures of a target deep model without the need for laborious collection and annotation of test sets. ZOOM effectively visualizes the correlation between attributes and model outputs, elucidating model behaviors and intrinsic biases." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 461, + 546, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 461, + 546, + 652 + ], + "spans": [ + { + "bbox": [ + 304, + 461, + 546, + 652 + ], + "type": "text", + "content": "Our work has three primary limitations. First, users should possess domain knowledge as their input (text of attributes of interest) should be relevant to the target domain. Recall that it is a small price to pay for model evaluation without an annotated test set. Second, StyleGAN2-ADA struggles with generating out-of-domain samples. Nevertheless, our adversarial learning framework can be adapted to other generative models (e.g., stable diffusion), and the generator can be improved by training on more images. We have rigorously tested our generator with various user inputs, confirming its effectiveness for regular diagnosis requests. Currently, we are exploring stable diffusion models to generate a broader range of classes while maintaining the core concept. Finally, we rely on a pre-trained model like CLIP which we presume to be free of bias and capable of encompassing all relevant attributes." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 653, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 653, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 653, + 545, + 713 + ], + "type": "text", + "content": "Acknowledgements: We would like to thank George Cazenavette, Tianyuan Zhang, Yinong Wang, Hanzhe Hu, Bharath Raj for suggestions in the presentation and experiments. We sincerely thank Ken Ziyu Liu, Jiashun Wang, Bowen Li, and Ce Zheng for revisions to improve this work." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "11638" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 286, + 712 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 53, + 91, + 286, + 123 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 286, + 123 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 286, + 123 + ], + "type": "text", + "content": "[1] Yunjey Choi, Youngjung Uh, Jaejun Yoo, and Jung-Woo Ha. StarGAN v2: Diverse Image Synthesis for Multiple Domains. In CVPR, 2020." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 124, + 286, + 156 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 124, + 286, + 156 + ], + "spans": [ + { + "bbox": [ + 53, + 124, + 286, + 156 + ], + "type": "text", + "content": "[2] Edo Collins, Raja Bala, Bob Price, and Sabine Susstrunk. Editing in Style: Uncovering the Local Semantics of GANs. In CVPR, 2020." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 54, + 158, + 286, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 158, + 286, + 201 + ], + "spans": [ + { + "bbox": [ + 54, + 158, + 286, + 201 + ], + "type": "text", + "content": "[3] Emily Denton and Ben Hutchinson and Margaret Mitchell and Timnit Gebru and Andrew Zaldivar. Image counterfactual sensitivity analysis for detecting unintended bias. arXiv preprint arXiv:1906.06439, 2019." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 54, + 202, + 286, + 224 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 202, + 286, + 224 + ], + "spans": [ + { + "bbox": [ + 54, + 202, + 286, + 224 + ], + "type": "text", + "content": "[4] Ian J. Goodfellow, Jonathon Shlens, and Christian Szegedy. Explaining and Harnessing Adversarial Examples. 2014." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 54, + 225, + 286, + 256 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 225, + 286, + 256 + ], + "spans": [ + { + "bbox": [ + 54, + 225, + 286, + 256 + ], + "type": "text", + "content": "[5] Yash Goyal, Ziyan Wu, Jan Ernst, Dhruv Batra, Devi Parikh, and Stefan Lee. Counterfactual Visual Explanations. In ICML, 2019." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 258, + 286, + 290 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 258, + 286, + 290 + ], + "spans": [ + { + "bbox": [ + 53, + 258, + 286, + 290 + ], + "type": "text", + "content": "[6] Z. He, W. Zuo, M. Kan, S. Shan, and X. Chen. AttGAN: Facial Attribute Editing by Only Changing What You Want. In IEEE TIP, 2019." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 291, + 286, + 334 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 291, + 286, + 334 + ], + "spans": [ + { + "bbox": [ + 53, + 291, + 286, + 334 + ], + "type": "text", + "content": "[7] Fangzhou Hong, Mingyuan Zhang, Liang Pan, Zhongang Cai, Lei Yang, and Ziwei Liu. AvatarCLIP: Zero-Shot Text-Driven Generation and Animation of 3D Avatars. In ACM TOG, 2022." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 335, + 286, + 368 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 335, + 286, + 368 + ], + "spans": [ + { + "bbox": [ + 53, + 335, + 286, + 368 + ], + "type": "text", + "content": "[8] Erik Härkönen, Aaron Hertzmann, Jaakko Lehtinen, and Sylvain Paris. GANSpace: Discovering Interpretable GAN Controls. In NeurIPS, 2020." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 369, + 286, + 402 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 369, + 286, + 402 + ], + "spans": [ + { + "bbox": [ + 53, + 369, + 286, + 402 + ], + "type": "text", + "content": "[9] Ameya Joshi, Amitangshu Mukherjee, Soumik Sarkar, and Chinmay Hegde. Semantic Adversarial Attacks: Parametric Transformations That Fool Deep Classifiers. In ICCV, 2019." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 403, + 286, + 435 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 403, + 286, + 435 + ], + "spans": [ + { + "bbox": [ + 48, + 403, + 286, + 435 + ], + "type": "text", + "content": "[10] Kimmo Karkkainen and Jungseock Joo. FairFace: Face Attribute Dataset for Balanced Race, Gender, and Age for Bias Measurement and Mitigation. In WACV, 2021." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 436, + 286, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 436, + 286, + 468 + ], + "spans": [ + { + "bbox": [ + 48, + 436, + 286, + 468 + ], + "type": "text", + "content": "[11] Tero Karras, Miika Aittala, Janne Hellsten, Samuli Laine, Jaakko Lehtinen, and Timo Aila. Training Generative Adversarial Networks with Limited Data. In NeurIPS, 2020." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 469, + 286, + 501 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 469, + 286, + 501 + ], + "spans": [ + { + "bbox": [ + 48, + 469, + 286, + 501 + ], + "type": "text", + "content": "[12] Tero Karras, Samuli Laine, and Timo Aila. A Style-Based Generator Architecture for Generative Adversarial Networks. In CVPR, 2019." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 502, + 286, + 556 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 502, + 286, + 556 + ], + "spans": [ + { + "bbox": [ + 48, + 502, + 286, + 556 + ], + "type": "text", + "content": "[13] Oran Lang, Yossi Gandelsman, Michal Yarom, Yoav Wald, Gal Elidan, Avinatan Hassidim, William T. Freeman, Phillip Isola, Amir Globerson, Michal Irani, and Inbar Mosseri. Explaining in Style: Training a GAN To Explain a Classifier in StyleSpace. In ICCV, 2021." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 558, + 286, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 558, + 286, + 601 + ], + "spans": [ + { + "bbox": [ + 48, + 558, + 286, + 601 + ], + "type": "text", + "content": "[14] Bo Li, Qiulin Wang, Jiquan Pei, Yu Yang, and Xiangyang Ji. Which Style Makes Me Attractive? Interpretable Control Discovery and Counterfactual Explanation on StyleGAN. arXiv preprint arXiv:2201.09689, 2022." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 48, + 602, + 286, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 602, + 286, + 624 + ], + "spans": [ + { + "bbox": [ + 48, + 602, + 286, + 624 + ], + "type": "text", + "content": "[15] Zhiheng Li and Chenliang Xu. Discover the Unknown Biased Attribute of an Image Classifier. In ICCV, 2021." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 48, + 624, + 286, + 646 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 624, + 286, + 646 + ], + "spans": [ + { + "bbox": [ + 48, + 624, + 286, + 646 + ], + "type": "text", + "content": "[16] Ziwei Liu, Ping Luo, Xiaogang Wang, and Xiaou Tang. Deep Learning Face Attributes in the Wild. In ICCV, 2015." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 48, + 647, + 286, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 647, + 286, + 679 + ], + "spans": [ + { + "bbox": [ + 48, + 647, + 286, + 679 + ], + "type": "text", + "content": "[17] Jinqi Luo, Zhaoning Wang, Chen Henry Wu, Dong Huang, and Fernando De la Torre. Semantic image attack for visual model diagnosis. arXiv preprint arXiv:2303.13010, 2023." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 48, + 681, + 286, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 681, + 286, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 681, + 286, + 712 + ], + "type": "text", + "content": "[18] Aleksander Madry, Aleksandar Makelov, Ludwig Schmidt, Dimitris Tsipras, and Adrian Vladu. Towards Deep Learning Models Resistant to Adversarial Attacks. In ICLR, 2018." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 712 + ], + "type": "list", + "angle": 0, + "index": 37, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 105 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 105 + ], + "type": "text", + "content": "[19] Joanna Materzynska, Antonio Torralba, and David Bau. Disentangling Visual and Written Concepts in CLIP. In CVPR, 2022." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 107, + 545, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 107, + 545, + 140 + ], + "spans": [ + { + "bbox": [ + 307, + 107, + 545, + 140 + ], + "type": "text", + "content": "[20] Ramaravind K. Mothilal, Amit Sharma, and Chenhao Tan. Explaining Machine Learning Classifiers through Diverse Counterfactual Explanations. In ACM FAccT, 2020." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 141, + 545, + 175 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 141, + 545, + 175 + ], + "spans": [ + { + "bbox": [ + 307, + 141, + 545, + 175 + ], + "type": "text", + "content": "[21] Or Patashnik, Zongze Wu, Eli Shechtman, Daniel Cohen-Or, and Dani Lischinski. StyleCLIP: Text-Driven Manipulation of StyleGAN Imagery. In ICCV, 2021." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 177, + 545, + 209 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 177, + 545, + 209 + ], + "spans": [ + { + "bbox": [ + 307, + 177, + 545, + 209 + ], + "type": "text", + "content": "[22] Ben Poole, Ajay Jain, Jonathan T. Barron, and Ben Mildenhall. DreamFusion: Text-to-3D using 2D Diffusion. arXiv preprint arXiv:2209.14988, 2022." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 211, + 545, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 211, + 545, + 255 + ], + "spans": [ + { + "bbox": [ + 307, + 211, + 545, + 255 + ], + "type": "text", + "content": "[23] Haonan Qiu, Chaowei Xiao, Lei Yang, Xinchen Yan, Honglak Lee, and Bo Li. SemanticAdv: Generating Adversarial Examples via Attribute-conditioned Image Editing. In ECCV, 2020." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 256, + 545, + 322 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 256, + 545, + 322 + ], + "spans": [ + { + "bbox": [ + 307, + 256, + 545, + 322 + ], + "type": "text", + "content": "[24] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning Transferable Visual Models From Natural Language Supervision. In ICML, 2021." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 323, + 545, + 357 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 323, + 545, + 357 + ], + "spans": [ + { + "bbox": [ + 307, + 323, + 545, + 357 + ], + "type": "text", + "content": "[25] Vikram V. Ramaswamy, Sunnie S. Y. Kim, and Olga Russakovsky. Fair Attribute Classification Through Latent Space De-Biasing. In CVPR, 2021." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 358, + 545, + 379 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 358, + 545, + 379 + ], + "spans": [ + { + "bbox": [ + 307, + 358, + 545, + 379 + ], + "type": "text", + "content": "[26] Axel Sauer and Andreas Geiger. Counterfactual Generative Networks. In ICLR, 2021." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 381, + 545, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 381, + 545, + 415 + ], + "spans": [ + { + "bbox": [ + 307, + 381, + 545, + 415 + ], + "type": "text", + "content": "[27] Yujun Shen, Ceyuan Yang, Xiaou Tang, and Bolei Zhou. InterFaceGAN: Interpreting the Disentangled Face Representation Learned by GANs. In IEEE TPAMI, 2020." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 307, + 416, + 545, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 416, + 545, + 437 + ], + "spans": [ + { + "bbox": [ + 307, + 416, + 545, + 437 + ], + "type": "text", + "content": "[28] Yujun Shen and Bolei Zhou. Closed-Form Factorization of Latent Semantics in GANs. In CVPR, 2021." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 307, + 440, + 545, + 483 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 440, + 545, + 483 + ], + "spans": [ + { + "bbox": [ + 307, + 440, + 545, + 483 + ], + "type": "text", + "content": "[29] Philipp Terhörst, Daniel Fährmann, Jan Niklas Kolf, Naser Damer, Florian Kirchbuchner, and Arjan Kuijper. MAAD-Face: A Massively Annotated Attribute Dataset for Face Images. In IEEE TIFS, 2021." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 307, + 485, + 545, + 518 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 485, + 545, + 518 + ], + "spans": [ + { + "bbox": [ + 307, + 485, + 545, + 518 + ], + "type": "text", + "content": "[30] Can Wang, Menglei Chai, Mingming He, Dongdong Chen, and Jing Liao. CLIP-NeRF: Text-and-Image Driven Manipulation of Neural Radiance Fields. In CVPR, 2022." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 307, + 520, + 545, + 574 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 520, + 545, + 574 + ], + "spans": [ + { + "bbox": [ + 307, + 520, + 545, + 574 + ], + "type": "text", + "content": "[31] Jingdong Wang, Ke Sun, Tianheng Cheng, Borui Jiang, Chaorui Deng, Yang Zhao, Dong Liu, Yadong Mu, Mingkui Tan, Xinggang Wang, Wenyu Liu, and Bin Xiao. Deep High-Resolution Representation Learning for Visual Recognition. In IEEE TPAMI, 2019." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 307, + 576, + 545, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 576, + 545, + 609 + ], + "spans": [ + { + "bbox": [ + 307, + 576, + 545, + 609 + ], + "type": "text", + "content": "[32] Zhou Wang, A.C. Bovik, H.R. Sheikh, and E.P. Simoncelli. Image Quality Assessment: from Error Visibility to Structural Similarity. In IEEE TIP, 2004." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 307, + 611, + 545, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 611, + 545, + 643 + ], + "spans": [ + { + "bbox": [ + 307, + 611, + 545, + 643 + ], + "type": "text", + "content": "[33] Wayne Wu, Chen Qian, Shuo Yang, Quan Wang, Yici Cai, and Qiang Zhou. Look at Boundary: A Boundary-Aware Face Alignment Algorithm. In CVPR, 2018." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 307, + 646, + 545, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 646, + 545, + 678 + ], + "spans": [ + { + "bbox": [ + 307, + 646, + 545, + 678 + ], + "type": "text", + "content": "[34] Zongze Wu, Dani Lischinski, and Eli Shechtman. StyleSpace Analysis: Disentangled Controls for StyleGAN Image Generation. In CVPR, 2021." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 307, + 680, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 680, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 307, + 680, + 545, + 712 + ], + "type": "text", + "content": "[35] Weihao Xia, Yulun Zhang, Yujiu Yang, Jing-Hao Xue, Bolei Zhou, and Ming-Hsuan Yang. GAN Inversion: A Survey. In IEEE TPAMI, 2022." + } + ] + } + ], + "index": 36 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "type": "text", + "content": "11639" + } + ] + } + ], + "index": 38 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 150 + ], + "type": "list", + "angle": 0, + "index": 2, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 105 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 105 + ], + "type": "text", + "content": "[36] Chaowei Xiao, Bo Li, Jun-yan Zhu, Warren He, Mingyan Liu, and Dawn Song. Generating Adversarial Examples with Adversarial Networks. In *IJCAI*, 2018." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 107, + 287, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 107, + 287, + 150 + ], + "spans": [ + { + "bbox": [ + 48, + 107, + 287, + 150 + ], + "type": "text", + "content": "[37] Mingyuan Zhang, Zhongang Cai, Liang Pan, Fangzhou Hong, Xinying Guo, Lei Yang, and Ziwei Liu. MotionDiffuse: Text-Driven Human Motion Generation with Diffusion Model. arXiv preprint arXiv:2208.15001, 2022." + } + ] + } + ], + "index": 1 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "type": "text", + "content": "11640" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/Zero-Shot Noise2Noise_ Efficient Image Denoising Without Any Data/2d536e4a-ee00-4291-84f0-1f5cbbcd1b0f_content_list.json b/2023/Zero-Shot Noise2Noise_ Efficient Image Denoising Without Any Data/2d536e4a-ee00-4291-84f0-1f5cbbcd1b0f_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..f19822a72ba369afc975b820f06ca2c791e4f074 --- /dev/null +++ b/2023/Zero-Shot Noise2Noise_ Efficient Image Denoising Without Any Data/2d536e4a-ee00-4291-84f0-1f5cbbcd1b0f_content_list.json @@ -0,0 +1,1645 @@ +[ + { + "type": "text", + "text": "Zero-Shot Noise2Noise: Efficient Image Denoising without any Data", + "text_level": 1, + "bbox": [ + 155, + 130, + 841, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Youssef Mansour and Reinhard Heckel \nTechnical University of Munich and Munich Center for Machine Learning \nMunich, Germany", + "bbox": [ + 205, + 181, + 790, + 233 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "y.mansour@tum.de, reinhard.heckel@tum.de", + "bbox": [ + 316, + 237, + 673, + 250 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 259, + 286, + 336, + 301 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recently, self-supervised neural networks have shown excellent image denoising performance. However, current dataset free methods are either computationally expensive, require a noise model, or have inadequate image quality. In this work we show that a simple 2-layer network, without any training data or knowledge of the noise distribution, can enable high-quality image denoising at low computational cost. Our approach is motivated by Noise2Noise and Neighbor2Neighbor and works well for denoising pixel-wise independent noise. Our experiments on artificial, real-world camera, and microscope noise show that our method termed ZS-N2N (Zero Shot Noise2Noise) often outperforms existing dataset-free methods at a reduced cost, making it suitable for use cases with scarce data availability and limited compute.", + "bbox": [ + 111, + 319, + 485, + 561 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 112, + 594, + 243, + 609 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Image denoising is the process of removing distortions from images, to enhance them visually and to reconstruct fine details. The latter is especially important for medical images, where fine details are necessary for an accurate diagnosis.", + "bbox": [ + 111, + 621, + 483, + 696 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Current state-of-the-art image denoising techniques rely on large data sets of clean-noisy image pairs and often consist of a neural network trained to map the noisy to the clean image. The drawbacks of dataset-based methods are that data collection, even without ground truths, is expensive and time-consuming, and second, a network trained on dataset suffers from a performance drop if the test images come from a different distribution of images. These drawbacks motivate research in dataset-free methods.", + "bbox": [ + 111, + 699, + 482, + 849 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "All current zero-shot models are either suitable only for specific noise distributions and need previous knowledge of the noise level [7, 20], require a lot of compute (time, memory, GPU) to denoise an image [24], have", + "bbox": [ + 111, + 851, + 483, + 912 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "poor denoising quality [28], or do not generalise to different noise distributions or levels [15, 24]. We propose a method that builds on the recent Noise2Noise [17] and Neighbour2Neighbour [12] papers and aims to circumvent these issues to reach a good trade-off between denoising quality and computational resources. We make only minimal assumptions on the noise statistics (pixel-wise independence), and do not require training data. Our method does not require an explicit noise model, and is therefore suitable for various noise types and can be employed when the noise distribution or level are unknown. The only assumption we make about the noise is that it is unstructured and has zero mean.", + "bbox": [ + 511, + 287, + 883, + 482 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In a nutshell, we convolve the noisy test image with two fixed filters, which yields two downsampled images. We next train a lightweight network with regularization to map one downsampled image to the other. Our strategy builds on the recent Noise2Noise [17] and Neighbour2Neighbour [12] papers, however we take those methods one step further by enabling denoising without any training data. Even with an extremely small network and without any training data, our method achieves good denoising quality and often even outperforms large networks trained on datasets.", + "bbox": [ + 511, + 484, + 883, + 648 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The key attributes of our work are as follows:", + "bbox": [ + 532, + 650, + 834, + 664 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "- Compute. Dataset free neural network based algorithms [24, 28] require solving an optimization problem involving millions of parameters to denoise an image. The huge parameter count requires large memory storage, advanced GPUs, and long denoising times. In this work we show that our method, that utilizes a simple 2 layer network, with only $20\\mathrm{k}$ parameters, can often outperform networks with millions of parameters while reducing the computational cost significantly and being easily executable on a CPU.", + "bbox": [ + 532, + 675, + 883, + 843 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "- Generalisation. Existing zero-shot methods often to do not generalise well. For example, BM3D [7], a classical denoising algorithm does not generalize well to non-Gaussian noise, and blind spot net", + "bbox": [ + 532, + 851, + 885, + 912 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "14018", + "bbox": [ + 480, + 950, + 519, + 963 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/2e40134f5190ea5ff9d19f16d882ce0c33535e6e560d24636f8dec3bd8054069.jpg", + "image_caption": [ + "Figure 1. Left and middle plots: PSNR scores for Gaussian and Poisson denoising for different noise levels. Note BM3D's poor performance on Poisson compared to Gaussian noise. Right plot: Time required in seconds to denoise one $256 \\times 256$ colour image on CPU and GPU, tested on Poisson noise with $\\lambda = 50$ . Except for BM3D, all methods have shorter times on GPU. Only S2S in some cases outperforms our method, however it is about 100 times slower. S2S* denotes the ensemble free version of S2S." + ], + "image_footnote": [], + "bbox": [ + 117, + 90, + 500, + 200 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "works [15] [24] (discussed later in detail) fail to denoise well in the regime of low noise level. Extensive experiments on different noise distributions and noise levels show that our proposed approach can generalise better to different conditions better than existing methods.", + "bbox": [ + 143, + 381, + 483, + 472 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In summary, our proposed method is dataset and noise model-free, and achieves a better trade-off between generalization, denoising quality, and computational resources compared to existing zero-shot methods, as displayed in Figure 1. We compare to the standard zero shot baselines, including BM3D, and the recent neural network-based algorithms DIP [28] and S2S [24]. Only BM3D is faster than our method but achieves poor results on non-Gaussian noise. Only S2S sometimes outperforms our method, but is orders of magnitude slower, often fails on low noise levels [14], and requires assembling to achieve acceptable performance.", + "bbox": [ + 111, + 489, + 483, + 672 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 112, + 686, + 253, + 704 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Supervised methods achieve state-of-the-art performance by training a network end-to-end to map a noisy image to a clean one. Networks that work well are CNNs [3, 32], vision transformers [19], or MLP based architectures [21, 27].", + "bbox": [ + 111, + 714, + 482, + 789 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Noise2Noise [17] yields excellent performance from training on two noisy images of the same static scene, without any ground truth images. Given that the noise has zero mean, training a network to map one noisy image to another noisy image of the same scene performs as well as mapping to the ground truth. While having access to a pair of noisy images of the same scene is in practice hard to achieve, the Noise2Noise method", + "bbox": [ + 111, + 791, + 482, + 911 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "has motivated further research in self-supervised methods [12] that require only single noisy images.", + "bbox": [ + 511, + 90, + 882, + 121 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Self-supervised methods are trained on datasets consisting of only noisy images. Noise2Void [15] and Noise2Self [2] are two blind spot prediction approaches for image denoising. Given a set of noisy images $\\{\\mathbf{y}^i\\}_{1}^n$ , The idea is to minimize the loss $\\frac{1}{n}\\sum_{i=1}^{n}\\mathcal{L}(f_{\\theta}(M^i(\\mathbf{y}^i)),\\mathbf{y}^i)$ , where $\\mathcal{L}$ is a loss function, $f_{\\theta}$ is a network, and $M^i$ is an operator that masks some pixels, hence the name blind spot. Assuming that the neighbouring pixels of a clean image are highly correlated, and that the noise pixels are independent, a network trained to reconstruct a masked pixel, can only predict the signal value from the neighbouring visible pixels, but not the noise.", + "bbox": [ + 511, + 138, + 883, + 334 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Recently, several works [4, 26, 34] attempted to use Stein's unbiased risk estimator for Gaussian denoising. Such methods work well only for Gaussian noise and require the noise level to be known in advance. A more general framework is Noisier2Noise [23] which works for any noise distribution, but the distribution must be known in advance.", + "bbox": [ + 511, + 335, + 883, + 439 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The newly proposed Neighbour2Neighbour [12] builds on the Noise2Noise [17] method, where the assumptions are that the noise has zero mean and is pixel-wise independent. Neighbour2Neighbour extends Noise2Noise by enabling training without noisy image pairs. It does so by sub-sampling single noisy images to create pairs of noisy images, where Noise2Noise can be applied.", + "bbox": [ + 511, + 441, + 883, + 561 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Zero-Shot/ Dataset free Methods. Most similar to our work is Noise2Fast [18], which also builds on Noise2Noise and Neighbour2Neighbour to achieve dataset-free denoising. However, the method is only evaluated on grayscale images, uses a relatively large network, and requires an early stopping criterion. Our work improves on Noise2Fast by easily denoising grayscale or RGB images, and designing a consistency loss that alleviates the need to early stop. Moreover, we use a much smaller network which saves compute. Specifically, our network is twelve times smaller and a forward pass through it is seven times faster. To the best of our knowledge, our work is the first to utilize a small 2-layer network and achieve competitive quality for image restoration. We show that on grayscale images, our method despite achieving similar scores to Noise2Fast [18], produces better quality images. This is likely due to Noise2Fast dropping pixel values when downsampling, whereas our method always keeps all information retained.", + "bbox": [ + 511, + 579, + 883, + 878 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Besides this work, classical non-learning-based methods, such as BM3D [7] and Anscombe [20], work", + "bbox": [ + 511, + 881, + 883, + 912 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "14019", + "bbox": [ + 480, + 950, + 517, + 962 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "well for Gaussian and Poisson noise, respectively, and require the noise level as an input.", + "bbox": [ + 111, + 90, + 480, + 119 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "DIP (Deep Image Prior) [28] and its variants such as the Deep Decoder [11] build on the fact that CNNs have an inductive bias towards natural images, in that they can fit natural images much faster than noise. Therefore, a network trained, with early stopping, to map a random input to the noisy image will denoise the image. The denoising performance of DIP is often poor, and is dependent on the number of training epochs, which is hard to determine in advance.", + "bbox": [ + 111, + 122, + 480, + 256 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Self2Self [24] utilizes the idea of the blind spot networks (reconstructing masked pixels) on a single image, but with dropout ensembling. However, this method is not computationally efficient, in that it requires long durations to denoise an image. According to the authors, it takes 1.2 hours to denoise one $256 \\times 256$ image on a GPU. Compared to other blind spot networks, Self2Self achieves significantly better denoising scores, since it relies on ensembling, i.e., averaging the output of several networks. However, ensemble learning over smoothens the image, causing a loss of some details, despite the improvement in PSNR scores [8].", + "bbox": [ + 111, + 258, + 480, + 440 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Similar to almost all supervised and self-supervised methods, both Self2Self and DIP use a UNet [25] or a variant of it as the backbone network in their architectures. A UNet typically has millions of parameters, making it unsuitable for compute limited applications. Our work departs from this scheme, by designing a shallow and simple network with few parameters.", + "bbox": [ + 111, + 441, + 480, + 547 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 112, + 564, + 202, + 579 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our method builds on the Noise2Noise [17], for training a network on pairs of noisy images, and the Neighbour2Neighbour (NB2NB) [12], which generates such pairs from a single noisy image. Our main idea is to generate a pair of noisy images from a single noisy image and train a small network only on this pair. We start with a brief summary of Noise2Noise and then introduce our method.", + "bbox": [ + 111, + 590, + 480, + 710 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Background: Noise2Noise and Neighbour2Neighbour", + "text_level": 1, + "bbox": [ + 112, + 724, + 480, + 756 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Supervised denoising methods are typically neural networks $f_{\\theta}$ that map a noisy image $\\mathbf{y}$ to an estimate $f_{\\theta}(\\mathbf{y})$ of the clean image $\\mathbf{x}$ . Supervised denoising methods are typically trained on pairs of clean images $\\mathbf{x}$ and noisy measurements $\\mathbf{y} = \\mathbf{x} + \\mathbf{e}$ , where $\\mathbf{e}$ is noise. We refer to supervised denoising as Noise2Clean (N2C).", + "bbox": [ + 111, + 765, + 480, + 854 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Neural networks can also be trained on different noisy observations of the same clean image. Noise2Noise (N2N) [17] assumes access to a set of pairs of noisy images $\\mathbf{y}_1 = \\mathbf{x} + \\mathbf{e}_1, \\mathbf{y}_2 = \\mathbf{x} + \\mathbf{e}_2$", + "bbox": [ + 111, + 856, + 480, + 912 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\mathbf{e}_1, \\mathbf{e}_2$ are independent noise vectors. A network $f_{\\theta}$ is then trained to minimize the empirical risk $\\frac{1}{n} \\sum_{i=1}^{n} \\left\\| f_{\\theta}(\\mathbf{y}_1^i) - \\mathbf{y}_2^i \\right\\|_2^2$ . This makes sense, since in expectation over such noisy instances, and assuming zero mean noise, training a network in a supervised manner to map a noisy image to another noisy image is equivalent to mapping it to a clean image i.e.,", + "bbox": [ + 511, + 90, + 882, + 193 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\underset {\\theta} {\\arg \\min } \\mathbb {E} \\left[ \\| f _ {\\theta} (\\mathbf {y} _ {1}) - \\mathbf {x} \\| _ {2} ^ {2} \\right] = \\underset {\\theta} {\\arg \\min } \\mathbb {E} \\left[ \\| f _ {\\theta} (\\mathbf {y} _ {1}) - \\mathbf {y} _ {2} \\| _ {2} ^ {2} \\right]. \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 513, + 202, + 885, + 238 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The proof is given in the supplementary material.", + "bbox": [ + 532, + 239, + 857, + 253 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In theory N2N training reaches the same performance as N2C training if the dataset is infinitely large. In practice, since the training set is limited in size, N2N falls slightly short of N2C. For example, N2N training with a UNet on 50k images gives a performance drop of only about $0.02\\mathrm{dB}$ compared to N2C with a UNet.", + "bbox": [ + 511, + 255, + 882, + 344 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Despite the great performance of N2N, its usability is often limited, since it is difficult to obtain a pair of noisy images of the same static scene. For instance, the object being captured might be non-static, or the lighting conditions change rapidly.", + "bbox": [ + 511, + 345, + 882, + 421 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Neighbour2Neighbour (NB2NB) [12] extends N2N and allows training only on a set of single noisy images, by sub-sampling a noisy image to create a pair of noisy images. Similar to N2N, NB2NB exhibits strong denoising performance when trained on many images.", + "bbox": [ + 511, + 422, + 882, + 498 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. Zero-Shot Noise2Noise", + "text_level": 1, + "bbox": [ + 513, + 508, + 723, + 523 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our work extends Noise2Noise [17] and Neighbour2Neighbour [12] by enabling training on only one single noisy image. To avoid overfitting to the single image, we use a very shallow network and an explicit regularization term.", + "bbox": [ + 511, + 532, + 882, + 607 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Almost all self- or un-supervised denoising methods, including ours, rely on the premise that a clean natural image has different attributes than random noise. As shown in [12], a noisy image can be decomposed into a pair of downsampled images. Based on the premise that nearby pixels of a clean image are highly correlated and often have similar values, while the noise pixels are unstructured and independent, the downsampled pair of noisy images has similar signal but independent noise. This pair can therefore serve as an approximation of two noisy observations of the same scene, where one observation is used as the input, and the other as the target, as in N2N.", + "bbox": [ + 511, + 609, + 882, + 803 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our approach is to first decompose the image into a pair of downsampled images and second train a lightweight network with regularization to map one downsampled image to the other. Applying the so-trained network to a noisy image yields the denoised image. We first explain how we generate the downsampled images, and then how we fit the network.", + "bbox": [ + 511, + 806, + 882, + 911 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "14020", + "bbox": [ + 480, + 950, + 517, + 962 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Image Pair Down sampler The pair down sampler takes as input an image $\\mathbf{y}$ of size $H\\times W\\times C$ and generates two images $D_{1}(\\mathbf{y})$ and $D_{2}(\\mathbf{y})$ , each of size $H / 2\\times W / 2\\times C$ . The down sampler generates those images by dividing the image into non-overlapping patches of size $2\\times 2$ , taking an average of the diagonal pixels of each patch and assigning it to the first low-resolution image, then the average of the anti-diagonal pixels and assigning it to the second low-resolution image. See Figure 2 for an illustration of the pair down sampler.", + "bbox": [ + 111, + 90, + 480, + 241 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The downsample is implemented with convolutions as follows. The first low-resolution image is obtained by applying a 2D convolution with stride two and fixed kernel $\\mathbf{k}_1 = \\begin{bmatrix} 0 & 0.5 \\\\ 0.5 & 0 \\end{bmatrix}$ to the original image as $D_1(\\mathbf{y}) = \\mathbf{y} * \\mathbf{k}_1$ , and the second image is obtained by applying a 2D convolution with stride two and fixed kernel $\\mathbf{k}_2 = \\begin{bmatrix} 0.5 & 0 \\\\ 0 & 0.5 \\end{bmatrix}$ to the original image as $D_2(\\mathbf{y}) = \\mathbf{y} * \\mathbf{k}_2$ . The convolutions are implemented channel-wise and therefore the downsampling scheme is applicable to any arbitrary number of input channels.", + "bbox": [ + 111, + 242, + 483, + 421 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/072dc66dcb427118e22a6c61690194a433223b48e11010108bfa7e63e6b1f977.jpg", + "image_caption": [ + "Figure 2. The Image Pair Downsampler decomposes an image into two images of half the spatial resolution by averaging diagonal pixels of $2 \\times 2$ non-overlapping patches. In the above example the input is a $4 \\times 4$ image, and the output is two $2 \\times 2$ images." + ], + "image_footnote": [], + "bbox": [ + 124, + 431, + 468, + 568 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Zero-shot-image denoising method. Given a test image $\\mathbf{y}$ to denoise, our method is conceptually similar to first fitting a small image-to-image neural network $f_{\\theta}$ to map the first downsampled image $D_{1}(\\mathbf{y})$ to the second one, $D_{2}(\\mathbf{y})$ by minimizing the loss", + "bbox": [ + 111, + 680, + 482, + 756 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} (\\boldsymbol {\\theta}) = \\left\\| f _ {\\boldsymbol {\\theta}} \\left(D _ {1} (\\mathbf {y})\\right) - D _ {2} (\\mathbf {y}) \\right\\| _ {2} ^ {2}. \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 184, + 763, + 480, + 782 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Once we fitted the network, we can apply it to the original noisy observation to estimate the denoised image as $\\hat{\\mathbf{x}} = f_{\\hat{\\theta}}(\\mathbf{y})$", + "bbox": [ + 111, + 791, + 482, + 835 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "However, our experiments showed that residual learning, a symmetric loss, and an additional consistency-enforcing term are critical for good performance. We next explain the elements of our loss function. In residual learning, the network is optimized to fit", + "bbox": [ + 111, + 837, + 483, + 912 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "the noise instead of the image. The loss then becomes", + "bbox": [ + 511, + 90, + 870, + 106 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} (\\boldsymbol {\\theta}) = \\left\\| D _ {1} (\\mathbf {y}) - f _ {\\boldsymbol {\\theta}} \\left(D _ {1} (\\mathbf {y})\\right) - D _ {2} (\\mathbf {y}) \\right\\| _ {2} ^ {2}. \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 553, + 116, + 882, + 135 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Following [6], where a symmetric loss was used in the context of self-supervised pretraining of a siamese network, we additionally adopt a symmetric loss, which yields the residual loss:", + "bbox": [ + 511, + 146, + 883, + 207 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {L} _ {\\text {r e s .}} (\\boldsymbol {\\theta}) = \\frac {1}{2} \\left(\\left\\| D _ {1} (\\mathbf {y}) - f _ {\\boldsymbol {\\theta}} \\left(D _ {1} (\\mathbf {y})\\right) - D _ {2} (\\mathbf {y}) \\right\\| _ {2} ^ {2} + \\right. \\\\ \\left. \\left\\| D _ {2} (\\mathbf {y}) - f _ {\\boldsymbol {\\theta}} \\left(D _ {2} (\\mathbf {y})\\right) - D _ {1} (\\mathbf {y}) \\right\\| _ {2} ^ {2}\\right). \\tag {4} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 527, + 217, + 880, + 285 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In addition, we enforce consistency by ensuring that first denoising the image $\\mathbf{y}$ and then downsampling it, is similar to what we get when first downsampling $\\mathbf{y}$ and then denoising it, i.e., we consider a loss of the form:", + "bbox": [ + 511, + 287, + 883, + 347 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} (\\boldsymbol {\\theta}) = \\left\\| D (\\mathbf {y}) - f _ {\\boldsymbol {\\theta}} (D (\\mathbf {y})) - D (\\mathbf {y} - f _ {\\boldsymbol {\\theta}} (\\mathbf {y})) \\right\\| _ {2} ^ {2}. \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 524, + 357, + 882, + 377 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Again adopting a symmetric loss, the consistency loss becomes:", + "bbox": [ + 511, + 386, + 883, + 412 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {L} _ {\\mathrm {c o n s .}} (\\pmb {\\theta}) = \\frac {1}{2} \\Big (\\| D _ {1} (\\mathbf {y}) - f _ {\\pmb {\\theta}} (D _ {1} (\\mathbf {y})) - D _ {1} (\\mathbf {y} - f _ {\\pmb {\\theta}} (\\mathbf {y})) \\| _ {2} ^ {2} \\\\ \\left. + \\left\\| D _ {2} (\\mathbf {y}) - f _ {\\boldsymbol {\\theta}} \\left(D _ {2} (\\mathbf {y})\\right) - D _ {2} (\\mathbf {y} - f _ {\\boldsymbol {\\theta}} (\\mathbf {y})) \\right\\| _ {2} ^ {2}\\right). \\tag {6} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 514, + 421, + 888, + 483 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Note that for the residual loss, the network only has the downsampled images as input. Only in the consistency loss, the network gets to see the image in full spatial resolution. Including the consistency loss enables better denoising performance and helps to avoid overfitting. It can therefore be seen as a regularizing term.", + "bbox": [ + 511, + 484, + 883, + 573 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In summary, we minimize the loss $\\mathcal{L}(\\pmb{\\theta}) = \\mathcal{L}_{\\mathrm{res.}}(\\pmb{\\theta}) + \\mathcal{L}_{\\mathrm{cons.}}(\\pmb{\\theta})$ using gradient descent, which yields the network parameters $\\hat{\\pmb{\\theta}}$ . With those, we estimate the denoised image as $\\hat{\\mathbf{x}} = \\mathbf{y} - f_{\\hat{\\pmb{\\theta}}}(\\mathbf{y})$ . Note that only the network parameters $\\pmb{\\theta}$ are optimized during the gradient descent updates, since the downsampling operations $D_{1}$ and $D_{2}$ are fixed. Convergence typically requires 1k to 2k iterations, which thanks to using a lightweight network takes less than half a minute on a GPU and around one minute on a CPU.", + "bbox": [ + 511, + 575, + 883, + 727 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Network Many supervised and self-supervised methods use a relatively large network, often a UNet [25]. Instead, we use a very simple two-layer image-to-image network. It consists of only two convolutional operators with kernel size $3 \\times 3$ followed by one operator of $1 \\times 1$ convolutions. This network has about $20k$ parameters, which is small compared to typical denoising networks. An exact comparison of the network sizes can be found in section 4.4. There are no normalization or pooling layers. The low parameter count and simple structure enables fast denoising even when deployed on a CPU.", + "bbox": [ + 511, + 744, + 883, + 912 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "14021", + "bbox": [ + 480, + 950, + 517, + 962 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In the ablation studies we show that using a UNet instead of a lightweight network leads to overfitting and much worse denoising performance.", + "bbox": [ + 112, + 90, + 480, + 137 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 112, + 152, + 243, + 169 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We compare our denoising algorithm (ZS-N2N) to several baselines. The baselines include dataset based methods, as well as other zero-shot methods. For the dataset based methods, we include both supervised (with clean images) and self-supervised (only noisy images) methods. We test all methods on artificial and real-world noise. We provide ablation studies in the supplementary material.", + "bbox": [ + 111, + 178, + 480, + 297 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The results highlight the dependency of dataset based methods on the dataset they are trained on and suggest that given a small training set, they are outperformed by dataset free ones. Furthermore, the experiments show that methods based on noise models achieve good performance for the specific noise model, but do not generalise to other distributions.", + "bbox": [ + 111, + 300, + 480, + 404 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Concerning the dataset and noise model free methods, our proposed method is either on par or better than other baselines on Gaussian, Poisson, and real world camera and microscope noise. Our method only falls short of Self2Self [24] on high noise levels, however, it requires only $\\frac{1}{200}$ of the denoising time of Self2Self and $2\\%$ of its memory. Moreover, Self2self's performance on low noise levels is insufficient. Therefore, considering denoising quality, generalisition, and computational resources, our method achieves a better trade-off compared to existing methods as shown in Figure 1.", + "bbox": [ + 111, + 407, + 480, + 571 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Baselines", + "text_level": 1, + "bbox": [ + 112, + 584, + 220, + 599 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We compare to Noise2Clean (N2C) with a UNet, which is the current state-of-the-art denoising algorithm. There exists several other networks that perform on par with the UNet, such as DnCNN [32] and RED30 [22], but the UNet is orders of magnitude faster, since it is not very deep, and has a multi-resolution structure. The UNet is therefore the standard choice in all recent denoising papers [12, 15, 17, 23].", + "bbox": [ + 111, + 608, + 480, + 728 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "For the self-supervised methods, we compare to Neighbour2Neighbour (NB2NB) [12] and Noise2Void (N2V) [15]. We exclude the methods that require an explicit noise model, such as [4, 16, 23, 34], since these methods work well on synthetic denoising tasks for the given noise distribution, but fail to generalize to unknown noise distributions or real-world noise [12, 30]. This is due to the fact that the synthetic noise is insufficient for simulating real camera noise, which is signal-dependent and substantially altered by the camera's imaging system.", + "bbox": [ + 111, + 729, + 480, + 895 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Regarding the zero-shot methods, which are most", + "bbox": [ + 130, + 897, + 480, + 911 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "similar to ours, we compare to the deep learning based algorithms: DIP [28] and Self2Self (S2S) [24], and also to the classical algorithm: BM3D [7]. Note that apart of our method (and BM3D), all baselines use a U-Net or a variation of it as a denoising backbone.", + "bbox": [ + 511, + 90, + 883, + 167 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The performance of DIP is very sensitive to the number of gradient descent steps. We used the ground truth images to determine the best early stopping iteration. The DIP results can therefore be seen as an over optimistic performance of the method. For a fair comparison, we report the results of the best performing model for the other baselines. A comparison of the sensitivity of the methods to the number of optimization steps can be found in the supplementary material.", + "bbox": [ + 511, + 167, + 883, + 303 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The original implementation of S2S uses an ensemble of multiple networks, i.e., averaging the outputs of several networks. All other baselines do not utilize ensembling or averaging. For a fair comparison, we additionally report the results of S2S without any ensembling, which we denote by S2S*. S2S denotes the original implementation with an ensemble of 50 networks.", + "bbox": [ + 511, + 304, + 880, + 409 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.2. Synthetic Noise", + "text_level": 1, + "bbox": [ + 511, + 421, + 668, + 436 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The dataset based methods (N2C, NB2NB, N2V) are trained on 500 colour images from ImageNet [10]. All methods are tested on the Kodak24 ${}^{1}$ and McMaster18 [13] datasets. All training and test images are centercropped to patches of size ${256} \\times {256}$ .", + "bbox": [ + 511, + 446, + 883, + 520 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We examine Gaussian and Poisson noise with noise levels $\\sigma$ and $\\lambda$ respectively. We consider the fixed noise levels $\\sigma, \\lambda = 10, 25, 50$ . The $\\sigma$ values for Gaussian noise correspond to pixel values in the interval [0,255], while the $\\lambda$ values for Poisson noise correspond to values in the interval [0,1].", + "bbox": [ + 511, + 521, + 883, + 612 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "For the dataset based methods, we also consider blind denoising during training with the range of noise levels $\\sigma, \\lambda \\in [10, 50]$ . During training, a $\\sigma, \\lambda$ value is sampled uniformly from the given range for each image in each training epoch, unlike the fixed noise levels, where all training images are contaminated with the same noise level. Blind denoising is what is used in practice, since an exact noise level is typically not given, but rather a range of noise levels.", + "bbox": [ + 511, + 613, + 880, + 748 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In table 1, we present the denoising performance of the different methods. For the dataset based methods, $\\sigma, \\lambda$ is known, denotes that the network trained on that exact noise level is used for testing, while unknown denotes the blind denoising, where the network trained on the range of noise levels [10,50] is used for testing. BM3D requires as input the value of the noise level. For Gaussian denoising the known $\\sigma$ value was used, while for Possion denoising the noise level was estimated us", + "bbox": [ + 511, + 750, + 883, + 885 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "http://r0k.us/graphics/kodak/", + "bbox": [ + 529, + 898, + 767, + 911 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "14022", + "bbox": [ + 480, + 950, + 517, + 962 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "ing the method in [5]. Note that ZS-N2N, DIP, and S2S do not utilize any prior information on the noise distribution or level.", + "bbox": [ + 109, + 90, + 480, + 133 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "As seen from the results, the dataset based methods often fall slightly short of the dataset free methods. This is due to the fact that they were only trained on 500 images, whereas they reach good performance when trained on larger datasets. In the supplementary material, we show that when N2C is trained on 4000 images, it outperforms all other baselines and its performance can keep improving with more training data. Another drawback of dataset based methods is that they are sensitive to the data they are trained on. They experience a performance drop when trained on a range of noise levels as opposed to a specific noise level as the test set.", + "bbox": [ + 109, + 137, + 480, + 316 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Regarding the zero-shot methods, DIP exhibited worse scores in all simulations. BM3D is tailored to work well for Gaussian denoising, where the exact noise variance is known and required as input. However, its performance dropped for Poisson noise, where the noise level was estimated.", + "bbox": [ + 109, + 318, + 480, + 407 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "ZS-N2N and S2S do not rely on a specific noise model and therefore work consistently well for both Gaussian and Poisson noise. However, S2S suffers from at least two drawbacks. The first is it heavily relies on ensembling to achieve good scores as seen by comparing the results of S2S with $\\mathrm{S2S^{*}}$ . Despite improving the scores, ensembling oversmoothens the image causing a loss in some visual features [8]. Note that all other baselines are ensemble free. The second drawback is that it performs worse than all other baselines on low noise levels, as seen in the Gaussian noise with $\\sigma = 10$ .", + "bbox": [ + 109, + 409, + 480, + 574 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Considering that DIP performs poorly, that BM3D only works well for Gaussian noise, and that S2S's performance without ensembling and on low noise levels is unsatisfactory, our method, ZS-N2N is the only dataset free denoising algorithm that performs well on different noise distributions and levels.", + "bbox": [ + 109, + 575, + 480, + 665 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3. Real-World Noise", + "text_level": 1, + "bbox": [ + 112, + 676, + 284, + 690 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Camera noise: Following [24], we evaluate on the PolyU dataset [29] which consists of high-resolution images from various scenes captured by 5 cameras from the 3 leading brands of cameras: Canon, Nikon, and Sony. We also consider the SIDD [1], which consists of images captured by several smartphone cameras under different lighting conditions and noise patterns.", + "bbox": [ + 109, + 700, + 480, + 805 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Since the computational cost for running S2S is high, we randomly choose 20 images from both datasets to test on. The SIDD validation set has images of size $256 \\times 256$ . For consistency, we center-crop the PolyU images to patches of size $256 \\times 256$ . The results are shown in table 2. All methods perform similarly except for BM3D and the ensemble free version of S2S, which", + "bbox": [ + 109, + 806, + 480, + 910 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "exhibit a notable performance drop.", + "bbox": [ + 513, + 90, + 750, + 106 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/f798b3ecd447631810d171f9a3b4d6c1a19f4ba843f1beabd42eac8175e5f65f.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
DatasetZS-N2NDIPS2SS2S*BM3D
PolyU36.9237.0737.0133.1236.11
SIDD34.0734.3133.9830.7728.19
", + "bbox": [ + 514, + 119, + 893, + 169 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Microscope noise: We additionally evaluate on the Fluorescence Microscopy dataset [33], which contains real grayscale fluorescence images obtained with commercial confocal, two-photon, and wide-field microscopes and representative biological samples such as cells, zebrafish, and mouse brain tissues. We pick random images from the test set to test on. We also compare to Noise2Fast (N2F) [18], for which code for denoising grayscale is available. The results are depicted in table 3.", + "bbox": [ + 511, + 215, + 880, + 366 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/d52d06630cbb8303c97dab9b3a1657560e8a49ea2aecb308b3f8d7323837fa26.jpg", + "table_caption": [ + "Table 2. Denoising PSNR in dB on real world camera noise." + ], + "table_footnote": [], + "table_body": "
ImagePhoton BPAEPhoton MiceConfocal BPAEAverage
ZS-N2N30.7331.4235.8532.67
DIP29.2230.0135.5131.58
S2S30.9031.5131.0131.14
S2S*29.4929.9929.5429.67
BM3D27.1929.4833.2329.97
N2F30.9331.0736.0132.67
", + "bbox": [ + 521, + 378, + 874, + 502 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 3. PSNR in dB on real world microscope noise.", + "bbox": [ + 535, + 513, + 857, + 527 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Our method and Noise2Fast achieve similar scores and slightly outperform the other baselines. Despite the similarity in scores, when inspecting the denoised images visually, we see differences: Our method produces visually sharper images and preserves slightly more details, while the Noise2Fast images are relatively smooth. This is most noticeable on images with fine details, such as MRI images, see Figure 3 for a knee image from the fastMRI dataset [31]. The blurriness in the Noise2Fast images is likely due to the downsampling scheme used, which drops some pixel values, and the ensembling performed to obtain the final image estimate, which oversmoothens the image [8]. Our method, on the other hand, preserves all pixel values during downsampling, and is ensemble free.", + "bbox": [ + 511, + 547, + 880, + 772 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.4. Computational Efficiency", + "text_level": 1, + "bbox": [ + 511, + 782, + 743, + 799 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In this section we focus on the computational efficiency. We consider the denoising time and the memory requirements represented by the number of network parameters. Since in some applications a GPU is not available [9], we additionally consider the denoising time on a CPU. The GPU tested is Quadro RTX 6000 and the CPU is Intel Core i9-9940X 3.30GHz.", + "bbox": [ + 511, + 806, + 880, + 910 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "14023", + "bbox": [ + 480, + 950, + 517, + 962 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/fcee7752d5dbd5a40edbe27f76aa63bbda08283f3781384aeaed670d00f8c964.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
NoiseMethodKodak24McMaster18
Gaussiandataset-basedN2Cσ known?σ = 10σ = 25σ = 50σ = 10σ = 25
yes33.4528.2725.4733.0328.46
no32.1628.1824.4531.9728.26
NB2NByes33.0127.9025.0232.6328.01
no31.7927.8024.1531.1927.85
N2Vyes30.1926.2124.0730.9526.50
no28.9526.0323.1929.6426.31
dataset-freeZS-N2N (ours)-33.6929.0724.8134.2128.80
DIP-32.2827.3823.9533.0727.61
S2S-29.5428.3926.2230.7828.71
S2S*-26.9326.2924.8327.6426.48
BM3Dyes33.7429.0225.5134.5129.21
Poissondataset-basedN2Cλ known?λ = 50λ = 25λ = 10λ = 50λ = 25
yes29.4227.4926.2529.8928.20
no28.9227.1423.1328.6227.51
NB2NByes29.1927.0125.7129.4127.79
no28.5326.8823.6028.0327.66
N2Vyes27.7325.5523.7727.8625.65
no27.0425.2821.9326.3425.52
dataset-freeZS-N2N (ours)-29.4527.5224.9230.3628.41
DIP-27.5125.8423.8128.7327.37
S2S-28.8928.3127.2930.1129.40
S2S*-26.7526.4025.6327.5527.24
BM3Dno28.3626.5824.2027.3324.77
", + "bbox": [ + 138, + 88, + 859, + 474 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 1. PSNR scores in dB for Gaussian and Poisson denoising. Best result is in bold, second best result is underlined. The dataset based methods are italicized. Note DIP's mediocre scores and BM3D's performance drop between Gaussian and Poisson noise. S2S has significantly lower scores in low noise as seen with $\\sigma = 10$ and its ensemble free version S2S* has inadequate performance. Denoised samples can be found in the supplementary material.", + "bbox": [ + 116, + 487, + 880, + 542 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In table 4 we display the time required to denoise one colour image of size $256 \\times 256$ at inference, as well as the total number of trainable parameters of a model. The dataset based methods are trained for long durations, but after training, the network parameters are fixed, and inference is almost instantaneous, since it is just a forward pass through the model. The time taken for denoising is therefore negligible compared to the zero-shot methods, whose parameters are optimized for each test image separately.", + "bbox": [ + 116, + 569, + 480, + 719 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In the original implementation of S2S, the authors report a denoising time of 1.2 hours for a $256 \\times 256$ colour image on GPU. However, we noticed that only half of the gradient update iterations are needed for convergence. We therefore report only half of their GPU time.", + "bbox": [ + 116, + 726, + 480, + 815 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Concerning the denoising time, dataset based methods are the fastest, since a forward pass through a fixed network requires only milli seconds. Regarding the deep learning based zero-shot methods, ZS-N2N is significantly more computationally efficient. Specifically, on CPU it is 200 times and 35 times faster than S2S and", + "bbox": [ + 116, + 821, + 480, + 910 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "DIP respectively and has only $2\\%$ and $1\\%$ of their memory requirements. Only the classical BM3D is computationally more efficient than ZS-N2N.", + "bbox": [ + 516, + 569, + 880, + 614 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.5. Discussion", + "text_level": 1, + "bbox": [ + 516, + 628, + 629, + 643 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Dataset based methods typically achieve state-of-the-art results but our experiments manifested two of their shortcomings: They don't perform well when trained on small datasets, and the performance drops when the test data differs from the training data, as seen by varying the noise levels. This highlights the importance of dataset free denoising algorithms.", + "bbox": [ + 516, + 654, + 880, + 758 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Methods that rely on an explicit model of the noise distribution such as Noisier2Noise [23] and Anscombe [20] or those tailored to work well for specific distributions such as BM3D, do not generalize well to other distributions. Their performance therefore degrades when the noise distribution is unknown, or the noise level must be estimated. This has been manifested by BM3D's competitive performance on Gaussian noise, but its failure to keep up with the other baselines on Poisson and real world noise. These findings highlight the advantage", + "bbox": [ + 516, + 761, + 880, + 910 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "14024", + "bbox": [ + 480, + 952, + 517, + 962 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/4a90a39e1e5aeffe1bf4ef2585b249e2f75711e91028c17d4c61cba64f880f8f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 114, + 87, + 269, + 220 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/57e4001787a4600cc5a18ca0d13e3c647553819bbdcfed802a4b23c56bad574f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 316, + 87, + 475, + 220 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/fbf1a105137312093bd3be1650c690ce8c329005a5b4e06f253480600247281b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 522, + 87, + 681, + 220 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/8fca2ab14f3f86eebb388395846f2c72490c0887387c6152e6f80ca46cd37363.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 727, + 88, + 885, + 220 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/6af9e111c367a366653d7ded1785a29f770a7bcd717b48de0b280d3906ca8995.jpg", + "image_caption": [ + "Figure 3. Visual comparison between our method and Noise2Fast for denoising Gaussian noise on a knee MRI. Both methods achieve similar PSNR, but notice how the center and left edge are blurry and oversmooth in Noise2Fast. Our method produces a sharper image with less loss of details." + ], + "image_footnote": [], + "bbox": [ + 114, + 232, + 272, + 351 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/032512022491e34fb7120f464f3eb60cc120ffd18fa3528ffcf08ecfe9e618f1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 318, + 232, + 473, + 351 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/a1998f7d1439aa188a577ca18ed8ea70f6dad3c491d2fb0ded4fa0f08d2cebd6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 524, + 232, + 679, + 351 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/a171e5ab676a203e8e42fff539343f738e4e985ae75b8fad3afa4d505e479059.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 728, + 232, + 883, + 351 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/a12a00be32866bf89f6184853791d22ebab695665a3566592800701682f7ddaf.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodN2CNB2NBN2VZS-N2NDIPS2SBM3D
GPU time---20 sec.3 min.35 min.4 sec.
CPU time---80 sec.45 min.4.5 hr.4 sec.
Network size3.3M1.3M2.2M22k2.2M1M-
", + "bbox": [ + 210, + 433, + 787, + 500 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 4. Computational Resources. First and Second Rows: Time taken to denoise one image on average on GPU and CPU. The time for the dataset based methods is discarded, since it is negligible. BM3D does not benefit from the GPU, as there is no optimization involved. Bottom Row: Number of parameters of a network.", + "bbox": [ + 109, + 508, + 883, + 551 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "of noise model free techniques.", + "bbox": [ + 109, + 578, + 320, + 593 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Regarding the three dataset free and noise model free methods considered, DIP was often lagging behind S2S and ZS-N2N, despite using the ground truths to find the best possible early stopping iteration. S2S's performance without ensembling is inadequate, and even with ensembling, it does not work well on low noise levels. Moreover, it requires more than 0.5 hours to denoise an image on a GPU and 4.5 hours on a CPU.", + "bbox": [ + 109, + 594, + 482, + 714 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Except for ZS-N2N, all deep learning based baselines have millions of parameters, making them computationally expensive. Considering ZS-N2N's ability to generalize to various denoising conditions with relatively fast denoising time, very few parameters, and CPU compatibility, we can conclude that it offers a good trade-off between denoising quality and computational resources.", + "bbox": [ + 109, + 715, + 482, + 821 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 112, + 839, + 232, + 854 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We proposed a novel zero-shot image denoising algorithm that does not require any training examples or knowledge of the noise model or level. Our work uses a", + "bbox": [ + 109, + 866, + 482, + 912 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "simple 2-layer network, and allows denoising in a relatively short period of time even when executed without a GPU. The method can perform well on simulated noise as well as real-world camera and microscope noise, and achieves a good trade-off between generalization, denoising quality and computational resources compared to existing dataset free methods. A demo of our implementation including our code and hyperparameters can be found in the following colab notebook: https://colab.research.google.com/drive/1i82nyizTdszyHkaHBuKPbWnTzao8HF9b", + "bbox": [ + 511, + 578, + 883, + 743 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgements", + "text_level": 1, + "bbox": [ + 513, + 763, + 681, + 780 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The authors are supported by the Institute of Advanced Studies at the Technical University of Munich, the Deutsche Forschungsgemeinschaft (DFG, German Research Foundation) - 456465471, 464123524, the German Federal Ministry of Education and Research, and the Bavarian State Ministry for Science and the Arts. The authors of this work take full responsibility for its content.", + "bbox": [ + 511, + 791, + 883, + 910 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "14025", + "bbox": [ + 480, + 950, + 517, + 962 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 114, + 89, + 209, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Abdelrahman Abdelhamed, Stephen Lin, and Michael S. Brown. A high-quality denoising dataset for smartphone cameras. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018. 6", + "[2] Joshua Batson and Loic Royer. Noise2Self: Blind denoising by self-supervision. In Kamalika Chaudhuri and Ruslan Salakhutdinov, editors, Proceedings of the 36th International Conference on Machine Learning, volume 97 of Proceedings of Machine Learning Research, pages 524-533. PMLR, 2019. 2", + "[3] Tim Brooks, Ben Mildenhall, Tianfan Xue, Jiawen Chen, Dillon Sharlet, and Jonathan T. Barron. Unprocessing Images for Learned Raw Denoising. In IEEE Conference on Computer Vision and Pattern Recognition, pages 11036-11045, 2019. 2", + "[4] Sungmin Cha and Taesup Moon. Fully convolutional pixel adaptive image denoiser. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), 2019. 2, 5", + "[5] Guangyong Chen, Fengyuan Zhu, and Pheng Ann Heng. An efficient statistical method for image noise level estimation. In 2015 IEEE International Conference on Computer Vision (ICCV), pages 477-485, 2015. 6", + "[6] Xinlei Chen and Kaiming He. Exploring simple siamese representation learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 15750-15758, 2021. 4", + "[7] K. Dabov, A. Foi, V. Katkovnik, and K. Egiazarian. Image Denoising by Sparse 3-D Transform-Domain Collaborative Filtering. IEEE Transactions on Image Processing, 16(8):2080-2095, 2007. 1, 2, 5", + "[8] Mohammad Zalbagi Darestani and Reinhard Heckel. Accelerated mri with un-trained neural networks. IEEE Transactions on Computational Imaging, 7:724-733, 2021. 3, 6", + "[9] Mauricio Delbracio, Ignacio Garcia-Dorado, SungJoon Choi, Damien Kelly, and Peyman Milanfar. Polyblur: Removing mild blur by polynomial reblurring. IEEE Transactions on Computational Imaging, 7:837-848, 2021. 6", + "[10] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE Conference on Computer Vision and Pattern Recognition, pages 248-255, 2009. 5", + "[11] Reinhard Heckel and Paul Hand. Deep decoder: Concise image representations from untrained non-convolutional networks. International Conference on Learning Representations, 2019. 3", + "[12] Tao Huang, Songjiang Li, Xu Jia, Huchuan Lu, and Jianzhuang Liu. Neighbor2neighbor: Self-supervised denoising from single noisy images. In 2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 14776-14785, 2021. 1, 2, 3, 5", + "[13] Sandip M. Kasar and Sachin D. Ruikar. Image demosaicking by nonlocal adaptive thresholding. In 2013 International Conference on Signal Processing, Image Processing Pattern Recognition, pages 34-38, 2013. 5" + ], + "bbox": [ + 114, + 114, + 483, + 911 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[14] Chaewon Kim, Jaeho Lee, and Jinwoo Shin. Zero-shot blind image denoising via implicit neural representations, 2022. 2", + "[15] Alexander Krull, Tim-Oliver Buchholz, and Florian Jug. Noise2void - learning denoising from single noisy images. In 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2124-2132, 2019. 1, 2, 5", + "[16] Samuli Laine, Tero Karras, Jaakko Lehtinen, and Timo Aila. High-quality self-supervised deep image denoising. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'Alché-Buc, E. Fox, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 32. Curran Associates, Inc., 2019. 5", + "[17] Jaakko Lehtinen, Jacob Munkberg, Jon Hasselgren, Samuli Laine, Tero Karras, Miika Aittala, and Timo Aila. Noise2Noise: Learning image restoration without clean data. In Jennifer Dy and Andreas Krause, editors, Proceedings of the 35th International Conference on Machine Learning, volume 80 of Proceedings of Machine Learning Research, pages 2965-2974. PMLR, 2018. 1, 2, 3, 5", + "[18] Jason Lequyer, Reuben Philip, Amit Sharma, Wen-Hsin Hsu, and Laurence Pelletier. A fast blind zero-shot denoiser. Nature Machine Intelligence, oct 2022. 2, 6", + "[19] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 10012-10022, 2021. 2", + "[20] Markku Makitalo and Alessandro Foi. Optimal inversion of the anscombe transformation in low-countoisson image denoising. IEEE Transactions on Image Processing, 20(1):99-109, 2011. 1, 2, 7", + "[21] Youssef Mansour, Kang Lin, and Reinhard Heckel. Image-to-image mlp-mixer for image reconstruction. CoRR, abs/2202.02018, 2022. 2", + "[22] Xiaojiao Mao, Chunhua Shen, and Yu-Bin Yang. Image restoration using very deep convolutional encoder-decoder networks with symmetric skip connections. In D. Lee, M. Sugiyama, U. Luxburg, I. Guyon, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 29. Curran Associates, Inc., 2016. 5", + "[23] Nick Moran, Dan Schmidt, Yu Zhong, and Patrick Coady. Noisier2noise: Learning to denoise from unpaired noisy data. In 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 12061-12069, 2020. 2, 5, 7", + "[24] Yuhui Quan, Mingqin Chen, Tongyao Pang, and Hui Ji. Self2self with dropout: Learning self-supervised denoising from single image. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 1, 2, 3, 5, 6", + "[25] Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-net: Convolutional networks for biomedical image segmentation. In Nassir Navab, Joachim Hornegger, William M. Wells, and Alejandro F. Frangi, editors," + ], + "bbox": [ + 516, + 92, + 883, + 911 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "14026", + "bbox": [ + 480, + 950, + 519, + 963 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Medical Image Computing and Computer-Assisted Intervention - MICCAI 2015, pages 234-241, Cham, 2015. Springer International Publishing. 3, 4", + "[26] Shakarim Soltanayev and Se Young Chun. Training deep learning based denoisers without ground truth data. In S. Bengio, H. Wallach, H. Larochelle, K. Grauman, N. Cesa-Bianchi, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 31. Curran Associates, Inc., 2018. 2", + "[27] Zhengzhong Tu, Hossein Talebi, Han Zhang, Feng Yang, Peyman Milanfar, Alan Bovik, and Yinxiao Li. Maxim: Multi-axis mlp for image processing. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 5769-5780, June 2022. 2", + "[28] Dmitry Ulyanov, Andrea Vedaldi, and Victor Lempitsky. Deep image prior. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018. 1, 2, 3, 5", + "[29] Jun Xu, Hui Li, Zhetong Liang, David Zhang, and Lei Zhang. Real-world noisy image denoising: A new benchmark, 2018. 6", + "[30] Syed Waqas Zamir, Aditya Arora, Salman Khan, Munawar Hayat, Fahad Shahbaz Khan, Ming-Hsuan Yang, and Ling Shao. Cycleisp: Real image restoration via improved data synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 5", + "[31] Jure Zbontar, Florian Knoll, Anuroop Sriram, Tullie Murrell, Zhengnan Huang, Matthew J. Muckley, Aaron Defazio, Ruben Stern, Patricia Johnson, Mary Bruno, Marc Parente, Krzysztof J. Geras, Joe Katsnelson, Hersh Chandarana, Zizhao Zhang, Michal Drozdzal, Adriana Romero, Michael Rabbat, Pascal Vincent, Nafissa Yakubova, James Pinkerton, Duo Wang, Erich Owens, C. Lawrence Zitnick, Michael P. Recht, Daniel K. Sodickson, and Yvonne W. Lui. fastMRI: An open dataset and benchmarks for accelerated MRI. 2018. 6", + "[32] K. Zhang, W. Zuo, Y. Chen, D. Meng, and L. Zhang. Beyond a Gaussian Denoiser: Residual Learning of Deep CNN for Image Denoising. IEEE Transactions on Image Processing, 26(7):3142-3155, 2017. 2, 5", + "[33] Yide Zhang, Yinhao Zhu, Evan Nichols, Qingfei Wang, Siyuan Zhang, Cody Smith, and Scott Howard. Aoisson-gaussian denoising dataset with real fluorescence microscopy images. In CVPR, 2019. 6", + "[34] Magaiya Zhussip, Shakarim Soltanayev, and Se Young Chun. Extending stein's unbiased risk estimator to train deep denoisers with correlated pairs of noisy images. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'Alché-Buc, E. Fox, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 32. Curran Associates, Inc., 2019. 2, 5" + ], + "bbox": [ + 114, + 92, + 482, + 835 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "14027", + "bbox": [ + 480, + 950, + 517, + 962 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2023/Zero-Shot Noise2Noise_ Efficient Image Denoising Without Any Data/2d536e4a-ee00-4291-84f0-1f5cbbcd1b0f_model.json b/2023/Zero-Shot Noise2Noise_ Efficient Image Denoising Without Any Data/2d536e4a-ee00-4291-84f0-1f5cbbcd1b0f_model.json new file mode 100644 index 0000000000000000000000000000000000000000..88af2abbcd08e5a9e93420da209633f1db1171a5 --- /dev/null +++ b/2023/Zero-Shot Noise2Noise_ Efficient Image Denoising Without Any Data/2d536e4a-ee00-4291-84f0-1f5cbbcd1b0f_model.json @@ -0,0 +1,1991 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.812, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.156, + 0.131, + 0.842, + 0.153 + ], + "angle": 0, + "content": "Zero-Shot Noise2Noise: Efficient Image Denoising without any Data" + }, + { + "type": "text", + "bbox": [ + 0.206, + 0.182, + 0.792, + 0.234 + ], + "angle": 0, + "content": "Youssef Mansour and Reinhard Heckel \nTechnical University of Munich and Munich Center for Machine Learning \nMunich, Germany" + }, + { + "type": "text", + "bbox": [ + 0.318, + 0.238, + 0.674, + 0.251 + ], + "angle": 0, + "content": "y.mansour@tum.de, reinhard.heckel@tum.de" + }, + { + "type": "title", + "bbox": [ + 0.26, + 0.287, + 0.337, + 0.303 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.32, + 0.486, + 0.562 + ], + "angle": 0, + "content": "Recently, self-supervised neural networks have shown excellent image denoising performance. However, current dataset free methods are either computationally expensive, require a noise model, or have inadequate image quality. In this work we show that a simple 2-layer network, without any training data or knowledge of the noise distribution, can enable high-quality image denoising at low computational cost. Our approach is motivated by Noise2Noise and Neighbor2Neighbor and works well for denoising pixel-wise independent noise. Our experiments on artificial, real-world camera, and microscope noise show that our method termed ZS-N2N (Zero Shot Noise2Noise) often outperforms existing dataset-free methods at a reduced cost, making it suitable for use cases with scarce data availability and limited compute." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.595, + 0.245, + 0.611 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.622, + 0.484, + 0.698 + ], + "angle": 0, + "content": "Image denoising is the process of removing distortions from images, to enhance them visually and to reconstruct fine details. The latter is especially important for medical images, where fine details are necessary for an accurate diagnosis." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.7, + 0.483, + 0.85 + ], + "angle": 0, + "content": "Current state-of-the-art image denoising techniques rely on large data sets of clean-noisy image pairs and often consist of a neural network trained to map the noisy to the clean image. The drawbacks of dataset-based methods are that data collection, even without ground truths, is expensive and time-consuming, and second, a network trained on dataset suffers from a performance drop if the test images come from a different distribution of images. These drawbacks motivate research in dataset-free methods." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.852, + 0.484, + 0.913 + ], + "angle": 0, + "content": "All current zero-shot models are either suitable only for specific noise distributions and need previous knowledge of the noise level [7, 20], require a lot of compute (time, memory, GPU) to denoise an image [24], have" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.288, + 0.885, + 0.483 + ], + "angle": 0, + "content": "poor denoising quality [28], or do not generalise to different noise distributions or levels [15, 24]. We propose a method that builds on the recent Noise2Noise [17] and Neighbour2Neighbour [12] papers and aims to circumvent these issues to reach a good trade-off between denoising quality and computational resources. We make only minimal assumptions on the noise statistics (pixel-wise independence), and do not require training data. Our method does not require an explicit noise model, and is therefore suitable for various noise types and can be employed when the noise distribution or level are unknown. The only assumption we make about the noise is that it is unstructured and has zero mean." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.485, + 0.884, + 0.649 + ], + "angle": 0, + "content": "In a nutshell, we convolve the noisy test image with two fixed filters, which yields two downsampled images. We next train a lightweight network with regularization to map one downsampled image to the other. Our strategy builds on the recent Noise2Noise [17] and Neighbour2Neighbour [12] papers, however we take those methods one step further by enabling denoising without any training data. Even with an extremely small network and without any training data, our method achieves good denoising quality and often even outperforms large networks trained on datasets." + }, + { + "type": "text", + "bbox": [ + 0.534, + 0.651, + 0.835, + 0.665 + ], + "angle": 0, + "content": "The key attributes of our work are as follows:" + }, + { + "type": "text", + "bbox": [ + 0.533, + 0.676, + 0.885, + 0.844 + ], + "angle": 0, + "content": "- Compute. Dataset free neural network based algorithms [24, 28] require solving an optimization problem involving millions of parameters to denoise an image. The huge parameter count requires large memory storage, advanced GPUs, and long denoising times. In this work we show that our method, that utilizes a simple 2 layer network, with only \\(20\\mathrm{k}\\) parameters, can often outperform networks with millions of parameters while reducing the computational cost significantly and being easily executable on a CPU." + }, + { + "type": "text", + "bbox": [ + 0.533, + 0.852, + 0.887, + 0.913 + ], + "angle": 0, + "content": "- Generalisation. Existing zero-shot methods often to do not generalise well. For example, BM3D [7], a classical denoising algorithm does not generalize well to non-Gaussian noise, and blind spot net" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.952, + 0.52, + 0.964 + ], + "angle": 0, + "content": "14018" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.118, + 0.092, + 0.501, + 0.201 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.112, + 0.228, + 0.484, + 0.352 + ], + "angle": 0, + "content": "Figure 1. Left and middle plots: PSNR scores for Gaussian and Poisson denoising for different noise levels. Note BM3D's poor performance on Poisson compared to Gaussian noise. Right plot: Time required in seconds to denoise one \\(256 \\times 256\\) colour image on CPU and GPU, tested on Poisson noise with \\(\\lambda = 50\\). Except for BM3D, all methods have shorter times on GPU. Only S2S in some cases outperforms our method, however it is about 100 times slower. S2S* denotes the ensemble free version of S2S." + }, + { + "type": "text", + "bbox": [ + 0.145, + 0.382, + 0.484, + 0.473 + ], + "angle": 0, + "content": "works [15] [24] (discussed later in detail) fail to denoise well in the regime of low noise level. Extensive experiments on different noise distributions and noise levels show that our proposed approach can generalise better to different conditions better than existing methods." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.49, + 0.484, + 0.673 + ], + "angle": 0, + "content": "In summary, our proposed method is dataset and noise model-free, and achieves a better trade-off between generalization, denoising quality, and computational resources compared to existing zero-shot methods, as displayed in Figure 1. We compare to the standard zero shot baselines, including BM3D, and the recent neural network-based algorithms DIP [28] and S2S [24]. Only BM3D is faster than our method but achieves poor results on non-Gaussian noise. Only S2S sometimes outperforms our method, but is orders of magnitude slower, often fails on low noise levels [14], and requires assembling to achieve acceptable performance." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.688, + 0.254, + 0.705 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.715, + 0.483, + 0.79 + ], + "angle": 0, + "content": "Supervised methods achieve state-of-the-art performance by training a network end-to-end to map a noisy image to a clean one. Networks that work well are CNNs [3, 32], vision transformers [19], or MLP based architectures [21, 27]." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.792, + 0.483, + 0.912 + ], + "angle": 0, + "content": "Noise2Noise [17] yields excellent performance from training on two noisy images of the same static scene, without any ground truth images. Given that the noise has zero mean, training a network to map one noisy image to another noisy image of the same scene performs as well as mapping to the ground truth. While having access to a pair of noisy images of the same scene is in practice hard to achieve, the Noise2Noise method" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.092, + 0.883, + 0.122 + ], + "angle": 0, + "content": "has motivated further research in self-supervised methods [12] that require only single noisy images." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.139, + 0.885, + 0.335 + ], + "angle": 0, + "content": "Self-supervised methods are trained on datasets consisting of only noisy images. Noise2Void [15] and Noise2Self [2] are two blind spot prediction approaches for image denoising. Given a set of noisy images \\(\\{\\mathbf{y}^i\\}_{1}^n\\), The idea is to minimize the loss \\(\\frac{1}{n}\\sum_{i=1}^{n}\\mathcal{L}(f_{\\theta}(M^i(\\mathbf{y}^i)),\\mathbf{y}^i)\\), where \\(\\mathcal{L}\\) is a loss function, \\(f_{\\theta}\\) is a network, and \\(M^i\\) is an operator that masks some pixels, hence the name blind spot. Assuming that the neighbouring pixels of a clean image are highly correlated, and that the noise pixels are independent, a network trained to reconstruct a masked pixel, can only predict the signal value from the neighbouring visible pixels, but not the noise." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.336, + 0.884, + 0.44 + ], + "angle": 0, + "content": "Recently, several works [4, 26, 34] attempted to use Stein's unbiased risk estimator for Gaussian denoising. Such methods work well only for Gaussian noise and require the noise level to be known in advance. A more general framework is Noisier2Noise [23] which works for any noise distribution, but the distribution must be known in advance." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.442, + 0.884, + 0.563 + ], + "angle": 0, + "content": "The newly proposed Neighbour2Neighbour [12] builds on the Noise2Noise [17] method, where the assumptions are that the noise has zero mean and is pixel-wise independent. Neighbour2Neighbour extends Noise2Noise by enabling training without noisy image pairs. It does so by sub-sampling single noisy images to create pairs of noisy images, where Noise2Noise can be applied." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.58, + 0.884, + 0.88 + ], + "angle": 0, + "content": "Zero-Shot/ Dataset free Methods. Most similar to our work is Noise2Fast [18], which also builds on Noise2Noise and Neighbour2Neighbour to achieve dataset-free denoising. However, the method is only evaluated on grayscale images, uses a relatively large network, and requires an early stopping criterion. Our work improves on Noise2Fast by easily denoising grayscale or RGB images, and designing a consistency loss that alleviates the need to early stop. Moreover, we use a much smaller network which saves compute. Specifically, our network is twelve times smaller and a forward pass through it is seven times faster. To the best of our knowledge, our work is the first to utilize a small 2-layer network and achieve competitive quality for image restoration. We show that on grayscale images, our method despite achieving similar scores to Noise2Fast [18], produces better quality images. This is likely due to Noise2Fast dropping pixel values when downsampling, whereas our method always keeps all information retained." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.882, + 0.884, + 0.913 + ], + "angle": 0, + "content": "Besides this work, classical non-learning-based methods, such as BM3D [7] and Anscombe [20], work" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.952, + 0.519, + 0.963 + ], + "angle": 0, + "content": "14019" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.112, + 0.092, + 0.482, + 0.121 + ], + "angle": 0, + "content": "well for Gaussian and Poisson noise, respectively, and require the noise level as an input." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.123, + 0.482, + 0.257 + ], + "angle": 0, + "content": "DIP (Deep Image Prior) [28] and its variants such as the Deep Decoder [11] build on the fact that CNNs have an inductive bias towards natural images, in that they can fit natural images much faster than noise. Therefore, a network trained, with early stopping, to map a random input to the noisy image will denoise the image. The denoising performance of DIP is often poor, and is dependent on the number of training epochs, which is hard to determine in advance." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.26, + 0.482, + 0.441 + ], + "angle": 0, + "content": "Self2Self [24] utilizes the idea of the blind spot networks (reconstructing masked pixels) on a single image, but with dropout ensembling. However, this method is not computationally efficient, in that it requires long durations to denoise an image. According to the authors, it takes 1.2 hours to denoise one \\(256 \\times 256\\) image on a GPU. Compared to other blind spot networks, Self2Self achieves significantly better denoising scores, since it relies on ensembling, i.e., averaging the output of several networks. However, ensemble learning over smoothens the image, causing a loss of some details, despite the improvement in PSNR scores [8]." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.442, + 0.482, + 0.548 + ], + "angle": 0, + "content": "Similar to almost all supervised and self-supervised methods, both Self2Self and DIP use a UNet [25] or a variant of it as the backbone network in their architectures. A UNet typically has millions of parameters, making it unsuitable for compute limited applications. Our work departs from this scheme, by designing a shallow and simple network with few parameters." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.565, + 0.204, + 0.58 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.592, + 0.482, + 0.712 + ], + "angle": 0, + "content": "Our method builds on the Noise2Noise [17], for training a network on pairs of noisy images, and the Neighbour2Neighbour (NB2NB) [12], which generates such pairs from a single noisy image. Our main idea is to generate a pair of noisy images from a single noisy image and train a small network only on this pair. We start with a brief summary of Noise2Noise and then introduce our method." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.726, + 0.482, + 0.757 + ], + "angle": 0, + "content": "3.1. Background: Noise2Noise and Neighbour2Neighbour" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.766, + 0.482, + 0.856 + ], + "angle": 0, + "content": "Supervised denoising methods are typically neural networks \\( f_{\\theta} \\) that map a noisy image \\( \\mathbf{y} \\) to an estimate \\( f_{\\theta}(\\mathbf{y}) \\) of the clean image \\( \\mathbf{x} \\). Supervised denoising methods are typically trained on pairs of clean images \\( \\mathbf{x} \\) and noisy measurements \\( \\mathbf{y} = \\mathbf{x} + \\mathbf{e} \\), where \\( \\mathbf{e} \\) is noise. We refer to supervised denoising as Noise2Clean (N2C)." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.857, + 0.482, + 0.913 + ], + "angle": 0, + "content": "Neural networks can also be trained on different noisy observations of the same clean image. Noise2Noise (N2N) [17] assumes access to a set of pairs of noisy images \\(\\mathbf{y}_1 = \\mathbf{x} + \\mathbf{e}_1, \\mathbf{y}_2 = \\mathbf{x} + \\mathbf{e}_2\\)" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.092, + 0.883, + 0.194 + ], + "angle": 0, + "content": "where \\(\\mathbf{e}_1, \\mathbf{e}_2\\) are independent noise vectors. A network \\(f_{\\theta}\\) is then trained to minimize the empirical risk \\(\\frac{1}{n} \\sum_{i=1}^{n} \\left\\| f_{\\theta}(\\mathbf{y}_1^i) - \\mathbf{y}_2^i \\right\\|_2^2\\). This makes sense, since in expectation over such noisy instances, and assuming zero mean noise, training a network in a supervised manner to map a noisy image to another noisy image is equivalent to mapping it to a clean image i.e.," + }, + { + "type": "equation", + "bbox": [ + 0.514, + 0.203, + 0.887, + 0.239 + ], + "angle": 0, + "content": "\\[\n\\underset {\\theta} {\\arg \\min } \\mathbb {E} \\left[ \\| f _ {\\theta} (\\mathbf {y} _ {1}) - \\mathbf {x} \\| _ {2} ^ {2} \\right] = \\underset {\\theta} {\\arg \\min } \\mathbb {E} \\left[ \\| f _ {\\theta} (\\mathbf {y} _ {1}) - \\mathbf {y} _ {2} \\| _ {2} ^ {2} \\right]. \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.533, + 0.24, + 0.859, + 0.255 + ], + "angle": 0, + "content": "The proof is given in the supplementary material." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.256, + 0.883, + 0.345 + ], + "angle": 0, + "content": "In theory N2N training reaches the same performance as N2C training if the dataset is infinitely large. In practice, since the training set is limited in size, N2N falls slightly short of N2C. For example, N2N training with a UNet on 50k images gives a performance drop of only about \\(0.02\\mathrm{dB}\\) compared to N2C with a UNet." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.347, + 0.883, + 0.422 + ], + "angle": 0, + "content": "Despite the great performance of N2N, its usability is often limited, since it is difficult to obtain a pair of noisy images of the same static scene. For instance, the object being captured might be non-static, or the lighting conditions change rapidly." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.423, + 0.883, + 0.499 + ], + "angle": 0, + "content": "Neighbour2Neighbour (NB2NB) [12] extends N2N and allows training only on a set of single noisy images, by sub-sampling a noisy image to create a pair of noisy images. Similar to N2N, NB2NB exhibits strong denoising performance when trained on many images." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.51, + 0.725, + 0.525 + ], + "angle": 0, + "content": "3.2. Zero-Shot Noise2Noise" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.534, + 0.883, + 0.608 + ], + "angle": 0, + "content": "Our work extends Noise2Noise [17] and Neighbour2Neighbour [12] by enabling training on only one single noisy image. To avoid overfitting to the single image, we use a very shallow network and an explicit regularization term." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.61, + 0.883, + 0.804 + ], + "angle": 0, + "content": "Almost all self- or un-supervised denoising methods, including ours, rely on the premise that a clean natural image has different attributes than random noise. As shown in [12], a noisy image can be decomposed into a pair of downsampled images. Based on the premise that nearby pixels of a clean image are highly correlated and often have similar values, while the noise pixels are unstructured and independent, the downsampled pair of noisy images has similar signal but independent noise. This pair can therefore serve as an approximation of two noisy observations of the same scene, where one observation is used as the input, and the other as the target, as in N2N." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.807, + 0.883, + 0.912 + ], + "angle": 0, + "content": "Our approach is to first decompose the image into a pair of downsampled images and second train a lightweight network with regularization to map one downsampled image to the other. Applying the so-trained network to a noisy image yields the denoised image. We first explain how we generate the downsampled images, and then how we fit the network." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.952, + 0.519, + 0.963 + ], + "angle": 0, + "content": "14020" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.112, + 0.092, + 0.482, + 0.242 + ], + "angle": 0, + "content": "Image Pair Down sampler The pair down sampler takes as input an image \\(\\mathbf{y}\\) of size \\(H\\times W\\times C\\) and generates two images \\(D_{1}(\\mathbf{y})\\) and \\(D_{2}(\\mathbf{y})\\), each of size \\(H / 2\\times W / 2\\times C\\). The down sampler generates those images by dividing the image into non-overlapping patches of size \\(2\\times 2\\), taking an average of the diagonal pixels of each patch and assigning it to the first low-resolution image, then the average of the anti-diagonal pixels and assigning it to the second low-resolution image. See Figure 2 for an illustration of the pair down sampler." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.243, + 0.484, + 0.422 + ], + "angle": 0, + "content": "The downsample is implemented with convolutions as follows. The first low-resolution image is obtained by applying a 2D convolution with stride two and fixed kernel \\(\\mathbf{k}_1 = \\begin{bmatrix} 0 & 0.5 \\\\ 0.5 & 0 \\end{bmatrix}\\) to the original image as \\(D_1(\\mathbf{y}) = \\mathbf{y} * \\mathbf{k}_1\\), and the second image is obtained by applying a 2D convolution with stride two and fixed kernel \\(\\mathbf{k}_2 = \\begin{bmatrix} 0.5 & 0 \\\\ 0 & 0.5 \\end{bmatrix}\\) to the original image as \\(D_2(\\mathbf{y}) = \\mathbf{y} * \\mathbf{k}_2\\). The convolutions are implemented channel-wise and therefore the downsampling scheme is applicable to any arbitrary number of input channels." + }, + { + "type": "image", + "bbox": [ + 0.125, + 0.433, + 0.47, + 0.569 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.112, + 0.578, + 0.483, + 0.648 + ], + "angle": 0, + "content": "Figure 2. The Image Pair Downsampler decomposes an image into two images of half the spatial resolution by averaging diagonal pixels of \\(2 \\times 2\\) non-overlapping patches. In the above example the input is a \\(4 \\times 4\\) image, and the output is two \\(2 \\times 2\\) images." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.681, + 0.483, + 0.757 + ], + "angle": 0, + "content": "Zero-shot-image denoising method. Given a test image \\(\\mathbf{y}\\) to denoise, our method is conceptually similar to first fitting a small image-to-image neural network \\(f_{\\theta}\\) to map the first downsampled image \\(D_{1}(\\mathbf{y})\\) to the second one, \\(D_{2}(\\mathbf{y})\\) by minimizing the loss" + }, + { + "type": "equation", + "bbox": [ + 0.185, + 0.765, + 0.482, + 0.784 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} (\\boldsymbol {\\theta}) = \\left\\| f _ {\\boldsymbol {\\theta}} \\left(D _ {1} (\\mathbf {y})\\right) - D _ {2} (\\mathbf {y}) \\right\\| _ {2} ^ {2}. \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.792, + 0.483, + 0.837 + ], + "angle": 0, + "content": "Once we fitted the network, we can apply it to the original noisy observation to estimate the denoised image as \\(\\hat{\\mathbf{x}} = f_{\\hat{\\theta}}(\\mathbf{y})\\)" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.838, + 0.484, + 0.913 + ], + "angle": 0, + "content": "However, our experiments showed that residual learning, a symmetric loss, and an additional consistency-enforcing term are critical for good performance. We next explain the elements of our loss function. In residual learning, the network is optimized to fit" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.092, + 0.871, + 0.107 + ], + "angle": 0, + "content": "the noise instead of the image. The loss then becomes" + }, + { + "type": "equation", + "bbox": [ + 0.554, + 0.117, + 0.883, + 0.136 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} (\\boldsymbol {\\theta}) = \\left\\| D _ {1} (\\mathbf {y}) - f _ {\\boldsymbol {\\theta}} \\left(D _ {1} (\\mathbf {y})\\right) - D _ {2} (\\mathbf {y}) \\right\\| _ {2} ^ {2}. \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.147, + 0.884, + 0.208 + ], + "angle": 0, + "content": "Following [6], where a symmetric loss was used in the context of self-supervised pretraining of a siamese network, we additionally adopt a symmetric loss, which yields the residual loss:" + }, + { + "type": "equation", + "bbox": [ + 0.528, + 0.218, + 0.882, + 0.286 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {L} _ {\\text {r e s .}} (\\boldsymbol {\\theta}) = \\frac {1}{2} \\left(\\left\\| D _ {1} (\\mathbf {y}) - f _ {\\boldsymbol {\\theta}} \\left(D _ {1} (\\mathbf {y})\\right) - D _ {2} (\\mathbf {y}) \\right\\| _ {2} ^ {2} + \\right. \\\\ \\left. \\left\\| D _ {2} (\\mathbf {y}) - f _ {\\boldsymbol {\\theta}} \\left(D _ {2} (\\mathbf {y})\\right) - D _ {1} (\\mathbf {y}) \\right\\| _ {2} ^ {2}\\right). \\tag {4} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.288, + 0.884, + 0.348 + ], + "angle": 0, + "content": "In addition, we enforce consistency by ensuring that first denoising the image \\(\\mathbf{y}\\) and then downsampling it, is similar to what we get when first downsampling \\(\\mathbf{y}\\) and then denoising it, i.e., we consider a loss of the form:" + }, + { + "type": "equation", + "bbox": [ + 0.525, + 0.358, + 0.883, + 0.378 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} (\\boldsymbol {\\theta}) = \\left\\| D (\\mathbf {y}) - f _ {\\boldsymbol {\\theta}} (D (\\mathbf {y})) - D (\\mathbf {y} - f _ {\\boldsymbol {\\theta}} (\\mathbf {y})) \\right\\| _ {2} ^ {2}. \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.387, + 0.884, + 0.414 + ], + "angle": 0, + "content": "Again adopting a symmetric loss, the consistency loss becomes:" + }, + { + "type": "equation", + "bbox": [ + 0.515, + 0.422, + 0.89, + 0.484 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {L} _ {\\mathrm {c o n s .}} (\\pmb {\\theta}) = \\frac {1}{2} \\Big (\\| D _ {1} (\\mathbf {y}) - f _ {\\pmb {\\theta}} (D _ {1} (\\mathbf {y})) - D _ {1} (\\mathbf {y} - f _ {\\pmb {\\theta}} (\\mathbf {y})) \\| _ {2} ^ {2} \\\\ \\left. + \\left\\| D _ {2} (\\mathbf {y}) - f _ {\\boldsymbol {\\theta}} \\left(D _ {2} (\\mathbf {y})\\right) - D _ {2} (\\mathbf {y} - f _ {\\boldsymbol {\\theta}} (\\mathbf {y})) \\right\\| _ {2} ^ {2}\\right). \\tag {6} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.485, + 0.884, + 0.574 + ], + "angle": 0, + "content": "Note that for the residual loss, the network only has the downsampled images as input. Only in the consistency loss, the network gets to see the image in full spatial resolution. Including the consistency loss enables better denoising performance and helps to avoid overfitting. It can therefore be seen as a regularizing term." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.576, + 0.884, + 0.728 + ], + "angle": 0, + "content": "In summary, we minimize the loss \\(\\mathcal{L}(\\pmb{\\theta}) = \\mathcal{L}_{\\mathrm{res.}}(\\pmb{\\theta}) + \\mathcal{L}_{\\mathrm{cons.}}(\\pmb{\\theta})\\) using gradient descent, which yields the network parameters \\(\\hat{\\pmb{\\theta}}\\). With those, we estimate the denoised image as \\(\\hat{\\mathbf{x}} = \\mathbf{y} - f_{\\hat{\\pmb{\\theta}}}(\\mathbf{y})\\). Note that only the network parameters \\(\\pmb{\\theta}\\) are optimized during the gradient descent updates, since the downsampling operations \\(D_{1}\\) and \\(D_{2}\\) are fixed. Convergence typically requires 1k to 2k iterations, which thanks to using a lightweight network takes less than half a minute on a GPU and around one minute on a CPU." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.746, + 0.885, + 0.913 + ], + "angle": 0, + "content": "Network Many supervised and self-supervised methods use a relatively large network, often a UNet [25]. Instead, we use a very simple two-layer image-to-image network. It consists of only two convolutional operators with kernel size \\(3 \\times 3\\) followed by one operator of \\(1 \\times 1\\) convolutions. This network has about \\(20k\\) parameters, which is small compared to typical denoising networks. An exact comparison of the network sizes can be found in section 4.4. There are no normalization or pooling layers. The low parameter count and simple structure enables fast denoising even when deployed on a CPU." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.952, + 0.518, + 0.963 + ], + "angle": 0, + "content": "14021" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.113, + 0.092, + 0.482, + 0.138 + ], + "angle": 0, + "content": "In the ablation studies we show that using a UNet instead of a lightweight network leads to overfitting and much worse denoising performance." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.154, + 0.245, + 0.17 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.179, + 0.482, + 0.299 + ], + "angle": 0, + "content": "We compare our denoising algorithm (ZS-N2N) to several baselines. The baselines include dataset based methods, as well as other zero-shot methods. For the dataset based methods, we include both supervised (with clean images) and self-supervised (only noisy images) methods. We test all methods on artificial and real-world noise. We provide ablation studies in the supplementary material." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.301, + 0.482, + 0.405 + ], + "angle": 0, + "content": "The results highlight the dependency of dataset based methods on the dataset they are trained on and suggest that given a small training set, they are outperformed by dataset free ones. Furthermore, the experiments show that methods based on noise models achieve good performance for the specific noise model, but do not generalise to other distributions." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.408, + 0.482, + 0.573 + ], + "angle": 0, + "content": "Concerning the dataset and noise model free methods, our proposed method is either on par or better than other baselines on Gaussian, Poisson, and real world camera and microscope noise. Our method only falls short of Self2Self [24] on high noise levels, however, it requires only \\(\\frac{1}{200}\\) of the denoising time of Self2Self and \\(2\\%\\) of its memory. Moreover, Self2self's performance on low noise levels is insufficient. Therefore, considering denoising quality, generalisition, and computational resources, our method achieves a better trade-off compared to existing methods as shown in Figure 1." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.585, + 0.221, + 0.6 + ], + "angle": 0, + "content": "4.1. Baselines" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.609, + 0.482, + 0.729 + ], + "angle": 0, + "content": "We compare to Noise2Clean (N2C) with a UNet, which is the current state-of-the-art denoising algorithm. There exists several other networks that perform on par with the UNet, such as DnCNN [32] and RED30 [22], but the UNet is orders of magnitude faster, since it is not very deep, and has a multi-resolution structure. The UNet is therefore the standard choice in all recent denoising papers [12, 15, 17, 23]." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.731, + 0.482, + 0.896 + ], + "angle": 0, + "content": "For the self-supervised methods, we compare to Neighbour2Neighbour (NB2NB) [12] and Noise2Void (N2V) [15]. We exclude the methods that require an explicit noise model, such as [4, 16, 23, 34], since these methods work well on synthetic denoising tasks for the given noise distribution, but fail to generalize to unknown noise distributions or real-world noise [12, 30]. This is due to the fact that the synthetic noise is insufficient for simulating real camera noise, which is signal-dependent and substantially altered by the camera's imaging system." + }, + { + "type": "text", + "bbox": [ + 0.132, + 0.898, + 0.482, + 0.912 + ], + "angle": 0, + "content": "Regarding the zero-shot methods, which are most" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.884, + 0.168 + ], + "angle": 0, + "content": "similar to ours, we compare to the deep learning based algorithms: DIP [28] and Self2Self (S2S) [24], and also to the classical algorithm: BM3D [7]. Note that apart of our method (and BM3D), all baselines use a U-Net or a variation of it as a denoising backbone." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.168, + 0.884, + 0.304 + ], + "angle": 0, + "content": "The performance of DIP is very sensitive to the number of gradient descent steps. We used the ground truth images to determine the best early stopping iteration. The DIP results can therefore be seen as an over optimistic performance of the method. For a fair comparison, we report the results of the best performing model for the other baselines. A comparison of the sensitivity of the methods to the number of optimization steps can be found in the supplementary material." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.305, + 0.882, + 0.41 + ], + "angle": 0, + "content": "The original implementation of S2S uses an ensemble of multiple networks, i.e., averaging the outputs of several networks. All other baselines do not utilize ensembling or averaging. For a fair comparison, we additionally report the results of S2S without any ensembling, which we denote by S2S*. S2S denotes the original implementation with an ensemble of 50 networks." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.422, + 0.669, + 0.437 + ], + "angle": 0, + "content": "4.2. Synthetic Noise" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.447, + 0.884, + 0.521 + ], + "angle": 0, + "content": "The dataset based methods (N2C, NB2NB, N2V) are trained on 500 colour images from ImageNet [10]. All methods are tested on the Kodak24 \\( {}^{1} \\) and McMaster18 [13] datasets. All training and test images are centercropped to patches of size \\( {256} \\times {256} \\) ." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.522, + 0.884, + 0.613 + ], + "angle": 0, + "content": "We examine Gaussian and Poisson noise with noise levels \\(\\sigma\\) and \\(\\lambda\\) respectively. We consider the fixed noise levels \\(\\sigma, \\lambda = 10, 25, 50\\). The \\(\\sigma\\) values for Gaussian noise correspond to pixel values in the interval [0,255], while the \\(\\lambda\\) values for Poisson noise correspond to values in the interval [0,1]." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.614, + 0.882, + 0.749 + ], + "angle": 0, + "content": "For the dataset based methods, we also consider blind denoising during training with the range of noise levels \\(\\sigma, \\lambda \\in [10, 50]\\). During training, a \\(\\sigma, \\lambda\\) value is sampled uniformly from the given range for each image in each training epoch, unlike the fixed noise levels, where all training images are contaminated with the same noise level. Blind denoising is what is used in practice, since an exact noise level is typically not given, but rather a range of noise levels." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.751, + 0.884, + 0.886 + ], + "angle": 0, + "content": "In table 1, we present the denoising performance of the different methods. For the dataset based methods, \\(\\sigma, \\lambda\\) is known, denotes that the network trained on that exact noise level is used for testing, while unknown denotes the blind denoising, where the network trained on the range of noise levels [10,50] is used for testing. BM3D requires as input the value of the noise level. For Gaussian denoising the known \\(\\sigma\\) value was used, while for Possion denoising the noise level was estimated us" + }, + { + "type": "page_footnote", + "bbox": [ + 0.531, + 0.899, + 0.768, + 0.912 + ], + "angle": 0, + "content": "http://r0k.us/graphics/kodak/" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.952, + 0.519, + 0.963 + ], + "angle": 0, + "content": "14022" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.091, + 0.482, + 0.135 + ], + "angle": 0, + "content": "ing the method in [5]. Note that ZS-N2N, DIP, and S2S do not utilize any prior information on the noise distribution or level." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.138, + 0.482, + 0.318 + ], + "angle": 0, + "content": "As seen from the results, the dataset based methods often fall slightly short of the dataset free methods. This is due to the fact that they were only trained on 500 images, whereas they reach good performance when trained on larger datasets. In the supplementary material, we show that when N2C is trained on 4000 images, it outperforms all other baselines and its performance can keep improving with more training data. Another drawback of dataset based methods is that they are sensitive to the data they are trained on. They experience a performance drop when trained on a range of noise levels as opposed to a specific noise level as the test set." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.319, + 0.482, + 0.408 + ], + "angle": 0, + "content": "Regarding the zero-shot methods, DIP exhibited worse scores in all simulations. BM3D is tailored to work well for Gaussian denoising, where the exact noise variance is known and required as input. However, its performance dropped for Poisson noise, where the noise level was estimated." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.41, + 0.482, + 0.575 + ], + "angle": 0, + "content": "ZS-N2N and S2S do not rely on a specific noise model and therefore work consistently well for both Gaussian and Poisson noise. However, S2S suffers from at least two drawbacks. The first is it heavily relies on ensembling to achieve good scores as seen by comparing the results of S2S with \\(\\mathrm{S2S^{*}}\\). Despite improving the scores, ensembling oversmoothens the image causing a loss in some visual features [8]. Note that all other baselines are ensemble free. The second drawback is that it performs worse than all other baselines on low noise levels, as seen in the Gaussian noise with \\(\\sigma = 10\\)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.576, + 0.482, + 0.666 + ], + "angle": 0, + "content": "Considering that DIP performs poorly, that BM3D only works well for Gaussian noise, and that S2S's performance without ensembling and on low noise levels is unsatisfactory, our method, ZS-N2N is the only dataset free denoising algorithm that performs well on different noise distributions and levels." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.677, + 0.285, + 0.691 + ], + "angle": 0, + "content": "4.3. Real-World Noise" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.701, + 0.482, + 0.806 + ], + "angle": 0, + "content": "Camera noise: Following [24], we evaluate on the PolyU dataset [29] which consists of high-resolution images from various scenes captured by 5 cameras from the 3 leading brands of cameras: Canon, Nikon, and Sony. We also consider the SIDD [1], which consists of images captured by several smartphone cameras under different lighting conditions and noise patterns." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.807, + 0.482, + 0.911 + ], + "angle": 0, + "content": "Since the computational cost for running S2S is high, we randomly choose 20 images from both datasets to test on. The SIDD validation set has images of size \\(256 \\times 256\\). For consistency, we center-crop the PolyU images to patches of size \\(256 \\times 256\\). The results are shown in table 2. All methods perform similarly except for BM3D and the ensemble free version of S2S, which" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.092, + 0.75, + 0.107 + ], + "angle": 0, + "content": "exhibit a notable performance drop." + }, + { + "type": "table", + "bbox": [ + 0.515, + 0.12, + 0.895, + 0.17 + ], + "angle": 0, + "content": "
DatasetZS-N2NDIPS2SS2S*BM3D
PolyU36.9237.0737.0133.1236.11
SIDD34.0734.3133.9830.7728.19
" + }, + { + "type": "table_caption", + "bbox": [ + 0.516, + 0.181, + 0.878, + 0.194 + ], + "angle": 0, + "content": "Table 2. Denoising PSNR in dB on real world camera noise." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.217, + 0.882, + 0.367 + ], + "angle": 0, + "content": "Microscope noise: We additionally evaluate on the Fluorescence Microscopy dataset [33], which contains real grayscale fluorescence images obtained with commercial confocal, two-photon, and wide-field microscopes and representative biological samples such as cells, zebrafish, and mouse brain tissues. We pick random images from the test set to test on. We also compare to Noise2Fast (N2F) [18], for which code for denoising grayscale is available. The results are depicted in table 3." + }, + { + "type": "table", + "bbox": [ + 0.522, + 0.379, + 0.875, + 0.503 + ], + "angle": 0, + "content": "
ImagePhoton BPAEPhoton MiceConfocal BPAEAverage
ZS-N2N30.7331.4235.8532.67
DIP29.2230.0135.5131.58
S2S30.9031.5131.0131.14
S2S*29.4929.9929.5429.67
BM3D27.1929.4833.2329.97
N2F30.9331.0736.0132.67
" + }, + { + "type": "table_caption", + "bbox": [ + 0.537, + 0.515, + 0.859, + 0.529 + ], + "angle": 0, + "content": "Table 3. PSNR in dB on real world microscope noise." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.548, + 0.882, + 0.773 + ], + "angle": 0, + "content": "Our method and Noise2Fast achieve similar scores and slightly outperform the other baselines. Despite the similarity in scores, when inspecting the denoised images visually, we see differences: Our method produces visually sharper images and preserves slightly more details, while the Noise2Fast images are relatively smooth. This is most noticeable on images with fine details, such as MRI images, see Figure 3 for a knee image from the fastMRI dataset [31]. The blurriness in the Noise2Fast images is likely due to the downsampling scheme used, which drops some pixel values, and the ensembling performed to obtain the final image estimate, which oversmoothens the image [8]. Our method, on the other hand, preserves all pixel values during downsampling, and is ensemble free." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.784, + 0.745, + 0.8 + ], + "angle": 0, + "content": "4.4. Computational Efficiency" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.807, + 0.882, + 0.911 + ], + "angle": 0, + "content": "In this section we focus on the computational efficiency. We consider the denoising time and the memory requirements represented by the number of network parameters. Since in some applications a GPU is not available [9], we additionally consider the denoising time on a CPU. The GPU tested is Quadro RTX 6000 and the CPU is Intel Core i9-9940X 3.30GHz." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.952, + 0.518, + 0.963 + ], + "angle": 0, + "content": "14023" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.139, + 0.089, + 0.86, + 0.476 + ], + "angle": 0, + "content": "
NoiseMethodKodak24McMaster18
Gaussiandataset-basedN2Cσ known?σ = 10σ = 25σ = 50σ = 10σ = 25
yes33.4528.2725.4733.0328.46
no32.1628.1824.4531.9728.26
NB2NByes33.0127.9025.0232.6328.01
no31.7927.8024.1531.1927.85
N2Vyes30.1926.2124.0730.9526.50
no28.9526.0323.1929.6426.31
dataset-freeZS-N2N (ours)-33.6929.0724.8134.2128.80
DIP-32.2827.3823.9533.0727.61
S2S-29.5428.3926.2230.7828.71
S2S*-26.9326.2924.8327.6426.48
BM3Dyes33.7429.0225.5134.5129.21
Poissondataset-basedN2Cλ known?λ = 50λ = 25λ = 10λ = 50λ = 25
yes29.4227.4926.2529.8928.20
no28.9227.1423.1328.6227.51
NB2NByes29.1927.0125.7129.4127.79
no28.5326.8823.6028.0327.66
N2Vyes27.7325.5523.7727.8625.65
no27.0425.2821.9326.3425.52
dataset-freeZS-N2N (ours)-29.4527.5224.9230.3628.41
DIP-27.5125.8423.8128.7327.37
S2S-28.8928.3127.2930.1129.40
S2S*-26.7526.4025.6327.5527.24
BM3Dno28.3626.5824.2027.3324.77
" + }, + { + "type": "table_caption", + "bbox": [ + 0.117, + 0.488, + 0.882, + 0.543 + ], + "angle": 0, + "content": "Table 1. PSNR scores in dB for Gaussian and Poisson denoising. Best result is in bold, second best result is underlined. The dataset based methods are italicized. Note DIP's mediocre scores and BM3D's performance drop between Gaussian and Poisson noise. S2S has significantly lower scores in low noise as seen with \\(\\sigma = 10\\) and its ensemble free version S2S* has inadequate performance. Denoised samples can be found in the supplementary material." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.57, + 0.482, + 0.72 + ], + "angle": 0, + "content": "In table 4 we display the time required to denoise one colour image of size \\(256 \\times 256\\) at inference, as well as the total number of trainable parameters of a model. The dataset based methods are trained for long durations, but after training, the network parameters are fixed, and inference is almost instantaneous, since it is just a forward pass through the model. The time taken for denoising is therefore negligible compared to the zero-shot methods, whose parameters are optimized for each test image separately." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.727, + 0.482, + 0.816 + ], + "angle": 0, + "content": "In the original implementation of S2S, the authors report a denoising time of 1.2 hours for a \\(256 \\times 256\\) colour image on GPU. However, we noticed that only half of the gradient update iterations are needed for convergence. We therefore report only half of their GPU time." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.822, + 0.482, + 0.911 + ], + "angle": 0, + "content": "Concerning the denoising time, dataset based methods are the fastest, since a forward pass through a fixed network requires only milli seconds. Regarding the deep learning based zero-shot methods, ZS-N2N is significantly more computationally efficient. Specifically, on CPU it is 200 times and 35 times faster than S2S and" + }, + { + "type": "text", + "bbox": [ + 0.517, + 0.57, + 0.882, + 0.615 + ], + "angle": 0, + "content": "DIP respectively and has only \\(2\\%\\) and \\(1\\%\\) of their memory requirements. Only the classical BM3D is computationally more efficient than ZS-N2N." + }, + { + "type": "title", + "bbox": [ + 0.517, + 0.63, + 0.63, + 0.644 + ], + "angle": 0, + "content": "4.5. Discussion" + }, + { + "type": "text", + "bbox": [ + 0.517, + 0.655, + 0.882, + 0.759 + ], + "angle": 0, + "content": "Dataset based methods typically achieve state-of-the-art results but our experiments manifested two of their shortcomings: They don't perform well when trained on small datasets, and the performance drops when the test data differs from the training data, as seen by varying the noise levels. This highlights the importance of dataset free denoising algorithms." + }, + { + "type": "text", + "bbox": [ + 0.517, + 0.762, + 0.882, + 0.911 + ], + "angle": 0, + "content": "Methods that rely on an explicit model of the noise distribution such as Noisier2Noise [23] and Anscombe [20] or those tailored to work well for specific distributions such as BM3D, do not generalize well to other distributions. Their performance therefore degrades when the noise distribution is unknown, or the noise level must be estimated. This has been manifested by BM3D's competitive performance on Gaussian noise, but its failure to keep up with the other baselines on Poisson and real world noise. These findings highlight the advantage" + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.953, + 0.518, + 0.963 + ], + "angle": 0, + "content": "14024" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.115, + 0.088, + 0.271, + 0.221 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.318, + 0.088, + 0.477, + 0.222 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.524, + 0.088, + 0.682, + 0.222 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.728, + 0.089, + 0.886, + 0.222 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.115, + 0.233, + 0.273, + 0.352 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.319, + 0.233, + 0.474, + 0.352 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.525, + 0.233, + 0.68, + 0.352 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.729, + 0.233, + 0.884, + 0.352 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.378, + 0.884, + 0.42 + ], + "angle": 0, + "content": "Figure 3. Visual comparison between our method and Noise2Fast for denoising Gaussian noise on a knee MRI. Both methods achieve similar PSNR, but notice how the center and left edge are blurry and oversmooth in Noise2Fast. Our method produces a sharper image with less loss of details." + }, + { + "type": "table", + "bbox": [ + 0.211, + 0.434, + 0.789, + 0.5 + ], + "angle": 0, + "content": "
MethodN2CNB2NBN2VZS-N2NDIPS2SBM3D
GPU time---20 sec.3 min.35 min.4 sec.
CPU time---80 sec.45 min.4.5 hr.4 sec.
Network size3.3M1.3M2.2M22k2.2M1M-
" + }, + { + "type": "table_caption", + "bbox": [ + 0.111, + 0.509, + 0.884, + 0.553 + ], + "angle": 0, + "content": "Table 4. Computational Resources. First and Second Rows: Time taken to denoise one image on average on GPU and CPU. The time for the dataset based methods is discarded, since it is negligible. BM3D does not benefit from the GPU, as there is no optimization involved. Bottom Row: Number of parameters of a network." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.579, + 0.321, + 0.594 + ], + "angle": 0, + "content": "of noise model free techniques." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.595, + 0.483, + 0.715 + ], + "angle": 0, + "content": "Regarding the three dataset free and noise model free methods considered, DIP was often lagging behind S2S and ZS-N2N, despite using the ground truths to find the best possible early stopping iteration. S2S's performance without ensembling is inadequate, and even with ensembling, it does not work well on low noise levels. Moreover, it requires more than 0.5 hours to denoise an image on a GPU and 4.5 hours on a CPU." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.717, + 0.483, + 0.822 + ], + "angle": 0, + "content": "Except for ZS-N2N, all deep learning based baselines have millions of parameters, making them computationally expensive. Considering ZS-N2N's ability to generalize to various denoising conditions with relatively fast denoising time, very few parameters, and CPU compatibility, we can conclude that it offers a good trade-off between denoising quality and computational resources." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.84, + 0.233, + 0.856 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.867, + 0.483, + 0.913 + ], + "angle": 0, + "content": "We proposed a novel zero-shot image denoising algorithm that does not require any training examples or knowledge of the noise model or level. Our work uses a" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.579, + 0.885, + 0.744 + ], + "angle": 0, + "content": "simple 2-layer network, and allows denoising in a relatively short period of time even when executed without a GPU. The method can perform well on simulated noise as well as real-world camera and microscope noise, and achieves a good trade-off between generalization, denoising quality and computational resources compared to existing dataset free methods. A demo of our implementation including our code and hyperparameters can be found in the following colab notebook: https://colab.research.google.com/drive/1i82nyizTdszyHkaHBuKPbWnTzao8HF9b" + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.764, + 0.682, + 0.781 + ], + "angle": 0, + "content": "Acknowledgements" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.792, + 0.884, + 0.911 + ], + "angle": 0, + "content": "The authors are supported by the Institute of Advanced Studies at the Technical University of Munich, the Deutsche Forschungsgemeinschaft (DFG, German Research Foundation) - 456465471, 464123524, the German Federal Ministry of Education and Research, and the Bavarian State Ministry for Science and the Arts. The authors of this work take full responsibility for its content." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.952, + 0.518, + 0.963 + ], + "angle": 0, + "content": "14025" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.115, + 0.09, + 0.21, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.123, + 0.115, + 0.484, + 0.17 + ], + "angle": 0, + "content": "[1] Abdelrahman Abdelhamed, Stephen Lin, and Michael S. Brown. A high-quality denoising dataset for smartphone cameras. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.123, + 0.172, + 0.484, + 0.254 + ], + "angle": 0, + "content": "[2] Joshua Batson and Loic Royer. Noise2Self: Blind denoising by self-supervision. In Kamalika Chaudhuri and Ruslan Salakhutdinov, editors, Proceedings of the 36th International Conference on Machine Learning, volume 97 of Proceedings of Machine Learning Research, pages 524-533. PMLR, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.123, + 0.256, + 0.482, + 0.323 + ], + "angle": 0, + "content": "[3] Tim Brooks, Ben Mildenhall, Tianfan Xue, Jiawen Chen, Dillon Sharlet, and Jonathan T. Barron. Unprocessing Images for Learned Raw Denoising. In IEEE Conference on Computer Vision and Pattern Recognition, pages 11036-11045, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.123, + 0.326, + 0.482, + 0.379 + ], + "angle": 0, + "content": "[4] Sungmin Cha and Taesup Moon. Fully convolutional pixel adaptive image denoiser. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), 2019. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.123, + 0.381, + 0.482, + 0.436 + ], + "angle": 0, + "content": "[5] Guangyong Chen, Fengyuan Zhu, and Pheng Ann Heng. An efficient statistical method for image noise level estimation. In 2015 IEEE International Conference on Computer Vision (ICCV), pages 477-485, 2015. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.123, + 0.438, + 0.482, + 0.492 + ], + "angle": 0, + "content": "[6] Xinlei Chen and Kaiming He. Exploring simple siamese representation learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 15750-15758, 2021. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.123, + 0.494, + 0.482, + 0.548 + ], + "angle": 0, + "content": "[7] K. Dabov, A. Foi, V. Katkovnik, and K. Egiazarian. Image Denoising by Sparse 3-D Transform-Domain Collaborative Filtering. IEEE Transactions on Image Processing, 16(8):2080-2095, 2007. 1, 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.123, + 0.55, + 0.482, + 0.604 + ], + "angle": 0, + "content": "[8] Mohammad Zalbagi Darestani and Reinhard Heckel. Accelerated mri with un-trained neural networks. IEEE Transactions on Computational Imaging, 7:724-733, 2021. 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.123, + 0.606, + 0.482, + 0.673 + ], + "angle": 0, + "content": "[9] Mauricio Delbracio, Ignacio Garcia-Dorado, SungJoon Choi, Damien Kelly, and Peyman Milanfar. Polyblur: Removing mild blur by polynomial reblurring. IEEE Transactions on Computational Imaging, 7:837-848, 2021. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.676, + 0.482, + 0.731 + ], + "angle": 0, + "content": "[10] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE Conference on Computer Vision and Pattern Recognition, pages 248-255, 2009. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.732, + 0.482, + 0.785 + ], + "angle": 0, + "content": "[11] Reinhard Heckel and Paul Hand. Deep decoder: Concise image representations from untrained non-convolutional networks. International Conference on Learning Representations, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.788, + 0.482, + 0.856 + ], + "angle": 0, + "content": "[12] Tao Huang, Songjiang Li, Xu Jia, Huchuan Lu, and Jianzhuang Liu. Neighbor2neighbor: Self-supervised denoising from single noisy images. In 2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 14776-14785, 2021. 1, 2, 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.858, + 0.482, + 0.912 + ], + "angle": 0, + "content": "[13] Sandip M. Kasar and Sachin D. Ruikar. Image demosaicking by nonlocal adaptive thresholding. In 2013 International Conference on Signal Processing, Image Processing Pattern Recognition, pages 34-38, 2013. 5" + }, + { + "type": "list", + "bbox": [ + 0.115, + 0.115, + 0.484, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.093, + 0.884, + 0.133 + ], + "angle": 0, + "content": "[14] Chaewon Kim, Jaeho Lee, and Jinwoo Shin. Zero-shot blind image denoising via implicit neural representations, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.136, + 0.884, + 0.204 + ], + "angle": 0, + "content": "[15] Alexander Krull, Tim-Oliver Buchholz, and Florian Jug. Noise2void - learning denoising from single noisy images. In 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2124-2132, 2019. 1, 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.207, + 0.884, + 0.289 + ], + "angle": 0, + "content": "[16] Samuli Laine, Tero Karras, Jaakko Lehtinen, and Timo Aila. High-quality self-supervised deep image denoising. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'Alché-Buc, E. Fox, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 32. Curran Associates, Inc., 2019. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.291, + 0.884, + 0.401 + ], + "angle": 0, + "content": "[17] Jaakko Lehtinen, Jacob Munkberg, Jon Hasselgren, Samuli Laine, Tero Karras, Miika Aittala, and Timo Aila. Noise2Noise: Learning image restoration without clean data. In Jennifer Dy and Andreas Krause, editors, Proceedings of the 35th International Conference on Machine Learning, volume 80 of Proceedings of Machine Learning Research, pages 2965-2974. PMLR, 2018. 1, 2, 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.404, + 0.883, + 0.445 + ], + "angle": 0, + "content": "[18] Jason Lequyer, Reuben Philip, Amit Sharma, Wen-Hsin Hsu, and Laurence Pelletier. A fast blind zero-shot denoiser. Nature Machine Intelligence, oct 2022. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.447, + 0.884, + 0.529 + ], + "angle": 0, + "content": "[19] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 10012-10022, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.531, + 0.883, + 0.585 + ], + "angle": 0, + "content": "[20] Markku Makitalo and Alessandro Foi. Optimal inversion of the anscombe transformation in low-countoisson image denoising. IEEE Transactions on Image Processing, 20(1):99-109, 2011. 1, 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.588, + 0.883, + 0.629 + ], + "angle": 0, + "content": "[21] Youssef Mansour, Kang Lin, and Reinhard Heckel. Image-to-image mlp-mixer for image reconstruction. CoRR, abs/2202.02018, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.631, + 0.884, + 0.713 + ], + "angle": 0, + "content": "[22] Xiaojiao Mao, Chunhua Shen, and Yu-Bin Yang. Image restoration using very deep convolutional encoder-decoder networks with symmetric skip connections. In D. Lee, M. Sugiyama, U. Luxburg, I. Guyon, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 29. Curran Associates, Inc., 2016. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.716, + 0.883, + 0.783 + ], + "angle": 0, + "content": "[23] Nick Moran, Dan Schmidt, Yu Zhong, and Patrick Coady. Noisier2noise: Learning to denoise from unpaired noisy data. In 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 12061-12069, 2020. 2, 5, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.786, + 0.883, + 0.855 + ], + "angle": 0, + "content": "[24] Yuhui Quan, Mingqin Chen, Tongyao Pang, and Hui Ji. Self2self with dropout: Learning self-supervised denoising from single image. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 1, 2, 3, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.857, + 0.883, + 0.912 + ], + "angle": 0, + "content": "[25] Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-net: Convolutional networks for biomedical image segmentation. In Nassir Navab, Joachim Hornegger, William M. Wells, and Alejandro F. Frangi, editors," + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.093, + 0.884, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.952, + 0.52, + 0.964 + ], + "angle": 0, + "content": "14026" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.145, + 0.093, + 0.482, + 0.135 + ], + "angle": 0, + "content": "Medical Image Computing and Computer-Assisted Intervention - MICCAI 2015, pages 234-241, Cham, 2015. Springer International Publishing. 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.136, + 0.483, + 0.218 + ], + "angle": 0, + "content": "[26] Shakarim Soltanayev and Se Young Chun. Training deep learning based denoisers without ground truth data. In S. Bengio, H. Wallach, H. Larochelle, K. Grauman, N. Cesa-Bianchi, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 31. Curran Associates, Inc., 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.22, + 0.483, + 0.301 + ], + "angle": 0, + "content": "[27] Zhengzhong Tu, Hossein Talebi, Han Zhang, Feng Yang, Peyman Milanfar, Alan Bovik, and Yinxiao Li. Maxim: Multi-axis mlp for image processing. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 5769-5780, June 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.305, + 0.483, + 0.358 + ], + "angle": 0, + "content": "[28] Dmitry Ulyanov, Andrea Vedaldi, and Victor Lempitsky. Deep image prior. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018. 1, 2, 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.361, + 0.482, + 0.401 + ], + "angle": 0, + "content": "[29] Jun Xu, Hui Li, Zhetong Liang, David Zhang, and Lei Zhang. Real-world noisy image denoising: A new benchmark, 2018. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.404, + 0.482, + 0.485 + ], + "angle": 0, + "content": "[30] Syed Waqas Zamir, Aditya Arora, Salman Khan, Munawar Hayat, Fahad Shahbaz Khan, Ming-Hsuan Yang, and Ling Shao. Cycleisp: Real image restoration via improved data synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.488, + 0.482, + 0.625 + ], + "angle": 0, + "content": "[31] Jure Zbontar, Florian Knoll, Anuroop Sriram, Tullie Murrell, Zhengnan Huang, Matthew J. Muckley, Aaron Defazio, Ruben Stern, Patricia Johnson, Mary Bruno, Marc Parente, Krzysztof J. Geras, Joe Katsnelson, Hersh Chandarana, Zizhao Zhang, Michal Drozdzal, Adriana Romero, Michael Rabbat, Pascal Vincent, Nafissa Yakubova, James Pinkerton, Duo Wang, Erich Owens, C. Lawrence Zitnick, Michael P. Recht, Daniel K. Sodickson, and Yvonne W. Lui. fastMRI: An open dataset and benchmarks for accelerated MRI. 2018. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.628, + 0.482, + 0.682 + ], + "angle": 0, + "content": "[32] K. Zhang, W. Zuo, Y. Chen, D. Meng, and L. Zhang. Beyond a Gaussian Denoiser: Residual Learning of Deep CNN for Image Denoising. IEEE Transactions on Image Processing, 26(7):3142-3155, 2017. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.684, + 0.482, + 0.739 + ], + "angle": 0, + "content": "[33] Yide Zhang, Yinhao Zhu, Evan Nichols, Qingfei Wang, Siyuan Zhang, Cody Smith, and Scott Howard. Aoisson-gaussian denoising dataset with real fluorescence microscopy images. In CVPR, 2019. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.741, + 0.482, + 0.837 + ], + "angle": 0, + "content": "[34] Magaiya Zhussip, Shakarim Soltanayev, and Se Young Chun. Extending stein's unbiased risk estimator to train deep denoisers with correlated pairs of noisy images. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'Alché-Buc, E. Fox, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 32. Curran Associates, Inc., 2019. 2, 5" + }, + { + "type": "list", + "bbox": [ + 0.116, + 0.093, + 0.483, + 0.837 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.952, + 0.519, + 0.963 + ], + "angle": 0, + "content": "14027" + } + ] +] \ No newline at end of file diff --git a/2023/Zero-Shot Noise2Noise_ Efficient Image Denoising Without Any Data/2d536e4a-ee00-4291-84f0-1f5cbbcd1b0f_origin.pdf b/2023/Zero-Shot Noise2Noise_ Efficient Image Denoising Without Any Data/2d536e4a-ee00-4291-84f0-1f5cbbcd1b0f_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..adeff1229d910b7242da7f474313157290a89e08 --- /dev/null +++ b/2023/Zero-Shot Noise2Noise_ Efficient Image Denoising Without Any Data/2d536e4a-ee00-4291-84f0-1f5cbbcd1b0f_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22714e447fe6b118c10e07bbb0dfec478148ce738e49d0303dfdf5d0d21075f5 +size 757803 diff --git a/2023/Zero-Shot Noise2Noise_ Efficient Image Denoising Without Any Data/full.md b/2023/Zero-Shot Noise2Noise_ Efficient Image Denoising Without Any Data/full.md new file mode 100644 index 0000000000000000000000000000000000000000..783a4fb2a249207c403983edbf4699502aa8a684 --- /dev/null +++ b/2023/Zero-Shot Noise2Noise_ Efficient Image Denoising Without Any Data/full.md @@ -0,0 +1,304 @@ +# Zero-Shot Noise2Noise: Efficient Image Denoising without any Data + +Youssef Mansour and Reinhard Heckel +Technical University of Munich and Munich Center for Machine Learning +Munich, Germany + +y.mansour@tum.de, reinhard.heckel@tum.de + +# Abstract + +Recently, self-supervised neural networks have shown excellent image denoising performance. However, current dataset free methods are either computationally expensive, require a noise model, or have inadequate image quality. In this work we show that a simple 2-layer network, without any training data or knowledge of the noise distribution, can enable high-quality image denoising at low computational cost. Our approach is motivated by Noise2Noise and Neighbor2Neighbor and works well for denoising pixel-wise independent noise. Our experiments on artificial, real-world camera, and microscope noise show that our method termed ZS-N2N (Zero Shot Noise2Noise) often outperforms existing dataset-free methods at a reduced cost, making it suitable for use cases with scarce data availability and limited compute. + +# 1. Introduction + +Image denoising is the process of removing distortions from images, to enhance them visually and to reconstruct fine details. The latter is especially important for medical images, where fine details are necessary for an accurate diagnosis. + +Current state-of-the-art image denoising techniques rely on large data sets of clean-noisy image pairs and often consist of a neural network trained to map the noisy to the clean image. The drawbacks of dataset-based methods are that data collection, even without ground truths, is expensive and time-consuming, and second, a network trained on dataset suffers from a performance drop if the test images come from a different distribution of images. These drawbacks motivate research in dataset-free methods. + +All current zero-shot models are either suitable only for specific noise distributions and need previous knowledge of the noise level [7, 20], require a lot of compute (time, memory, GPU) to denoise an image [24], have + +poor denoising quality [28], or do not generalise to different noise distributions or levels [15, 24]. We propose a method that builds on the recent Noise2Noise [17] and Neighbour2Neighbour [12] papers and aims to circumvent these issues to reach a good trade-off between denoising quality and computational resources. We make only minimal assumptions on the noise statistics (pixel-wise independence), and do not require training data. Our method does not require an explicit noise model, and is therefore suitable for various noise types and can be employed when the noise distribution or level are unknown. The only assumption we make about the noise is that it is unstructured and has zero mean. + +In a nutshell, we convolve the noisy test image with two fixed filters, which yields two downsampled images. We next train a lightweight network with regularization to map one downsampled image to the other. Our strategy builds on the recent Noise2Noise [17] and Neighbour2Neighbour [12] papers, however we take those methods one step further by enabling denoising without any training data. Even with an extremely small network and without any training data, our method achieves good denoising quality and often even outperforms large networks trained on datasets. + +The key attributes of our work are as follows: + +- Compute. Dataset free neural network based algorithms [24, 28] require solving an optimization problem involving millions of parameters to denoise an image. The huge parameter count requires large memory storage, advanced GPUs, and long denoising times. In this work we show that our method, that utilizes a simple 2 layer network, with only $20\mathrm{k}$ parameters, can often outperform networks with millions of parameters while reducing the computational cost significantly and being easily executable on a CPU. + +- Generalisation. Existing zero-shot methods often to do not generalise well. For example, BM3D [7], a classical denoising algorithm does not generalize well to non-Gaussian noise, and blind spot net + +![](images/2e40134f5190ea5ff9d19f16d882ce0c33535e6e560d24636f8dec3bd8054069.jpg) +Figure 1. Left and middle plots: PSNR scores for Gaussian and Poisson denoising for different noise levels. Note BM3D's poor performance on Poisson compared to Gaussian noise. Right plot: Time required in seconds to denoise one $256 \times 256$ colour image on CPU and GPU, tested on Poisson noise with $\lambda = 50$ . Except for BM3D, all methods have shorter times on GPU. Only S2S in some cases outperforms our method, however it is about 100 times slower. S2S* denotes the ensemble free version of S2S. + +works [15] [24] (discussed later in detail) fail to denoise well in the regime of low noise level. Extensive experiments on different noise distributions and noise levels show that our proposed approach can generalise better to different conditions better than existing methods. + +In summary, our proposed method is dataset and noise model-free, and achieves a better trade-off between generalization, denoising quality, and computational resources compared to existing zero-shot methods, as displayed in Figure 1. We compare to the standard zero shot baselines, including BM3D, and the recent neural network-based algorithms DIP [28] and S2S [24]. Only BM3D is faster than our method but achieves poor results on non-Gaussian noise. Only S2S sometimes outperforms our method, but is orders of magnitude slower, often fails on low noise levels [14], and requires assembling to achieve acceptable performance. + +# 2. Related Work + +Supervised methods achieve state-of-the-art performance by training a network end-to-end to map a noisy image to a clean one. Networks that work well are CNNs [3, 32], vision transformers [19], or MLP based architectures [21, 27]. + +Noise2Noise [17] yields excellent performance from training on two noisy images of the same static scene, without any ground truth images. Given that the noise has zero mean, training a network to map one noisy image to another noisy image of the same scene performs as well as mapping to the ground truth. While having access to a pair of noisy images of the same scene is in practice hard to achieve, the Noise2Noise method + +has motivated further research in self-supervised methods [12] that require only single noisy images. + +Self-supervised methods are trained on datasets consisting of only noisy images. Noise2Void [15] and Noise2Self [2] are two blind spot prediction approaches for image denoising. Given a set of noisy images $\{\mathbf{y}^i\}_{1}^n$ , The idea is to minimize the loss $\frac{1}{n}\sum_{i=1}^{n}\mathcal{L}(f_{\theta}(M^i(\mathbf{y}^i)),\mathbf{y}^i)$ , where $\mathcal{L}$ is a loss function, $f_{\theta}$ is a network, and $M^i$ is an operator that masks some pixels, hence the name blind spot. Assuming that the neighbouring pixels of a clean image are highly correlated, and that the noise pixels are independent, a network trained to reconstruct a masked pixel, can only predict the signal value from the neighbouring visible pixels, but not the noise. + +Recently, several works [4, 26, 34] attempted to use Stein's unbiased risk estimator for Gaussian denoising. Such methods work well only for Gaussian noise and require the noise level to be known in advance. A more general framework is Noisier2Noise [23] which works for any noise distribution, but the distribution must be known in advance. + +The newly proposed Neighbour2Neighbour [12] builds on the Noise2Noise [17] method, where the assumptions are that the noise has zero mean and is pixel-wise independent. Neighbour2Neighbour extends Noise2Noise by enabling training without noisy image pairs. It does so by sub-sampling single noisy images to create pairs of noisy images, where Noise2Noise can be applied. + +Zero-Shot/ Dataset free Methods. Most similar to our work is Noise2Fast [18], which also builds on Noise2Noise and Neighbour2Neighbour to achieve dataset-free denoising. However, the method is only evaluated on grayscale images, uses a relatively large network, and requires an early stopping criterion. Our work improves on Noise2Fast by easily denoising grayscale or RGB images, and designing a consistency loss that alleviates the need to early stop. Moreover, we use a much smaller network which saves compute. Specifically, our network is twelve times smaller and a forward pass through it is seven times faster. To the best of our knowledge, our work is the first to utilize a small 2-layer network and achieve competitive quality for image restoration. We show that on grayscale images, our method despite achieving similar scores to Noise2Fast [18], produces better quality images. This is likely due to Noise2Fast dropping pixel values when downsampling, whereas our method always keeps all information retained. + +Besides this work, classical non-learning-based methods, such as BM3D [7] and Anscombe [20], work + +well for Gaussian and Poisson noise, respectively, and require the noise level as an input. + +DIP (Deep Image Prior) [28] and its variants such as the Deep Decoder [11] build on the fact that CNNs have an inductive bias towards natural images, in that they can fit natural images much faster than noise. Therefore, a network trained, with early stopping, to map a random input to the noisy image will denoise the image. The denoising performance of DIP is often poor, and is dependent on the number of training epochs, which is hard to determine in advance. + +Self2Self [24] utilizes the idea of the blind spot networks (reconstructing masked pixels) on a single image, but with dropout ensembling. However, this method is not computationally efficient, in that it requires long durations to denoise an image. According to the authors, it takes 1.2 hours to denoise one $256 \times 256$ image on a GPU. Compared to other blind spot networks, Self2Self achieves significantly better denoising scores, since it relies on ensembling, i.e., averaging the output of several networks. However, ensemble learning over smoothens the image, causing a loss of some details, despite the improvement in PSNR scores [8]. + +Similar to almost all supervised and self-supervised methods, both Self2Self and DIP use a UNet [25] or a variant of it as the backbone network in their architectures. A UNet typically has millions of parameters, making it unsuitable for compute limited applications. Our work departs from this scheme, by designing a shallow and simple network with few parameters. + +# 3. Method + +Our method builds on the Noise2Noise [17], for training a network on pairs of noisy images, and the Neighbour2Neighbour (NB2NB) [12], which generates such pairs from a single noisy image. Our main idea is to generate a pair of noisy images from a single noisy image and train a small network only on this pair. We start with a brief summary of Noise2Noise and then introduce our method. + +# 3.1. Background: Noise2Noise and Neighbour2Neighbour + +Supervised denoising methods are typically neural networks $f_{\theta}$ that map a noisy image $\mathbf{y}$ to an estimate $f_{\theta}(\mathbf{y})$ of the clean image $\mathbf{x}$ . Supervised denoising methods are typically trained on pairs of clean images $\mathbf{x}$ and noisy measurements $\mathbf{y} = \mathbf{x} + \mathbf{e}$ , where $\mathbf{e}$ is noise. We refer to supervised denoising as Noise2Clean (N2C). + +Neural networks can also be trained on different noisy observations of the same clean image. Noise2Noise (N2N) [17] assumes access to a set of pairs of noisy images $\mathbf{y}_1 = \mathbf{x} + \mathbf{e}_1, \mathbf{y}_2 = \mathbf{x} + \mathbf{e}_2$ + +where $\mathbf{e}_1, \mathbf{e}_2$ are independent noise vectors. A network $f_{\theta}$ is then trained to minimize the empirical risk $\frac{1}{n} \sum_{i=1}^{n} \left\| f_{\theta}(\mathbf{y}_1^i) - \mathbf{y}_2^i \right\|_2^2$ . This makes sense, since in expectation over such noisy instances, and assuming zero mean noise, training a network in a supervised manner to map a noisy image to another noisy image is equivalent to mapping it to a clean image i.e., + +$$ +\underset {\theta} {\arg \min } \mathbb {E} \left[ \| f _ {\theta} (\mathbf {y} _ {1}) - \mathbf {x} \| _ {2} ^ {2} \right] = \underset {\theta} {\arg \min } \mathbb {E} \left[ \| f _ {\theta} (\mathbf {y} _ {1}) - \mathbf {y} _ {2} \| _ {2} ^ {2} \right]. \tag {1} +$$ + +The proof is given in the supplementary material. + +In theory N2N training reaches the same performance as N2C training if the dataset is infinitely large. In practice, since the training set is limited in size, N2N falls slightly short of N2C. For example, N2N training with a UNet on 50k images gives a performance drop of only about $0.02\mathrm{dB}$ compared to N2C with a UNet. + +Despite the great performance of N2N, its usability is often limited, since it is difficult to obtain a pair of noisy images of the same static scene. For instance, the object being captured might be non-static, or the lighting conditions change rapidly. + +Neighbour2Neighbour (NB2NB) [12] extends N2N and allows training only on a set of single noisy images, by sub-sampling a noisy image to create a pair of noisy images. Similar to N2N, NB2NB exhibits strong denoising performance when trained on many images. + +# 3.2. Zero-Shot Noise2Noise + +Our work extends Noise2Noise [17] and Neighbour2Neighbour [12] by enabling training on only one single noisy image. To avoid overfitting to the single image, we use a very shallow network and an explicit regularization term. + +Almost all self- or un-supervised denoising methods, including ours, rely on the premise that a clean natural image has different attributes than random noise. As shown in [12], a noisy image can be decomposed into a pair of downsampled images. Based on the premise that nearby pixels of a clean image are highly correlated and often have similar values, while the noise pixels are unstructured and independent, the downsampled pair of noisy images has similar signal but independent noise. This pair can therefore serve as an approximation of two noisy observations of the same scene, where one observation is used as the input, and the other as the target, as in N2N. + +Our approach is to first decompose the image into a pair of downsampled images and second train a lightweight network with regularization to map one downsampled image to the other. Applying the so-trained network to a noisy image yields the denoised image. We first explain how we generate the downsampled images, and then how we fit the network. + +Image Pair Down sampler The pair down sampler takes as input an image $\mathbf{y}$ of size $H\times W\times C$ and generates two images $D_{1}(\mathbf{y})$ and $D_{2}(\mathbf{y})$ , each of size $H / 2\times W / 2\times C$ . The down sampler generates those images by dividing the image into non-overlapping patches of size $2\times 2$ , taking an average of the diagonal pixels of each patch and assigning it to the first low-resolution image, then the average of the anti-diagonal pixels and assigning it to the second low-resolution image. See Figure 2 for an illustration of the pair down sampler. + +The downsample is implemented with convolutions as follows. The first low-resolution image is obtained by applying a 2D convolution with stride two and fixed kernel $\mathbf{k}_1 = \begin{bmatrix} 0 & 0.5 \\ 0.5 & 0 \end{bmatrix}$ to the original image as $D_1(\mathbf{y}) = \mathbf{y} * \mathbf{k}_1$ , and the second image is obtained by applying a 2D convolution with stride two and fixed kernel $\mathbf{k}_2 = \begin{bmatrix} 0.5 & 0 \\ 0 & 0.5 \end{bmatrix}$ to the original image as $D_2(\mathbf{y}) = \mathbf{y} * \mathbf{k}_2$ . The convolutions are implemented channel-wise and therefore the downsampling scheme is applicable to any arbitrary number of input channels. + +![](images/072dc66dcb427118e22a6c61690194a433223b48e11010108bfa7e63e6b1f977.jpg) +Figure 2. The Image Pair Downsampler decomposes an image into two images of half the spatial resolution by averaging diagonal pixels of $2 \times 2$ non-overlapping patches. In the above example the input is a $4 \times 4$ image, and the output is two $2 \times 2$ images. + +Zero-shot-image denoising method. Given a test image $\mathbf{y}$ to denoise, our method is conceptually similar to first fitting a small image-to-image neural network $f_{\theta}$ to map the first downsampled image $D_{1}(\mathbf{y})$ to the second one, $D_{2}(\mathbf{y})$ by minimizing the loss + +$$ +\mathcal {L} (\boldsymbol {\theta}) = \left\| f _ {\boldsymbol {\theta}} \left(D _ {1} (\mathbf {y})\right) - D _ {2} (\mathbf {y}) \right\| _ {2} ^ {2}. \tag {2} +$$ + +Once we fitted the network, we can apply it to the original noisy observation to estimate the denoised image as $\hat{\mathbf{x}} = f_{\hat{\theta}}(\mathbf{y})$ + +However, our experiments showed that residual learning, a symmetric loss, and an additional consistency-enforcing term are critical for good performance. We next explain the elements of our loss function. In residual learning, the network is optimized to fit + +the noise instead of the image. The loss then becomes + +$$ +\mathcal {L} (\boldsymbol {\theta}) = \left\| D _ {1} (\mathbf {y}) - f _ {\boldsymbol {\theta}} \left(D _ {1} (\mathbf {y})\right) - D _ {2} (\mathbf {y}) \right\| _ {2} ^ {2}. \tag {3} +$$ + +Following [6], where a symmetric loss was used in the context of self-supervised pretraining of a siamese network, we additionally adopt a symmetric loss, which yields the residual loss: + +$$ +\begin{array}{l} \mathcal {L} _ {\text {r e s .}} (\boldsymbol {\theta}) = \frac {1}{2} \left(\left\| D _ {1} (\mathbf {y}) - f _ {\boldsymbol {\theta}} \left(D _ {1} (\mathbf {y})\right) - D _ {2} (\mathbf {y}) \right\| _ {2} ^ {2} + \right. \\ \left. \left\| D _ {2} (\mathbf {y}) - f _ {\boldsymbol {\theta}} \left(D _ {2} (\mathbf {y})\right) - D _ {1} (\mathbf {y}) \right\| _ {2} ^ {2}\right). \tag {4} \\ \end{array} +$$ + +In addition, we enforce consistency by ensuring that first denoising the image $\mathbf{y}$ and then downsampling it, is similar to what we get when first downsampling $\mathbf{y}$ and then denoising it, i.e., we consider a loss of the form: + +$$ +\mathcal {L} (\boldsymbol {\theta}) = \left\| D (\mathbf {y}) - f _ {\boldsymbol {\theta}} (D (\mathbf {y})) - D (\mathbf {y} - f _ {\boldsymbol {\theta}} (\mathbf {y})) \right\| _ {2} ^ {2}. \tag {5} +$$ + +Again adopting a symmetric loss, the consistency loss becomes: + +$$ +\begin{array}{l} \mathcal {L} _ {\mathrm {c o n s .}} (\pmb {\theta}) = \frac {1}{2} \Big (\| D _ {1} (\mathbf {y}) - f _ {\pmb {\theta}} (D _ {1} (\mathbf {y})) - D _ {1} (\mathbf {y} - f _ {\pmb {\theta}} (\mathbf {y})) \| _ {2} ^ {2} \\ \left. + \left\| D _ {2} (\mathbf {y}) - f _ {\boldsymbol {\theta}} \left(D _ {2} (\mathbf {y})\right) - D _ {2} (\mathbf {y} - f _ {\boldsymbol {\theta}} (\mathbf {y})) \right\| _ {2} ^ {2}\right). \tag {6} \\ \end{array} +$$ + +Note that for the residual loss, the network only has the downsampled images as input. Only in the consistency loss, the network gets to see the image in full spatial resolution. Including the consistency loss enables better denoising performance and helps to avoid overfitting. It can therefore be seen as a regularizing term. + +In summary, we minimize the loss $\mathcal{L}(\pmb{\theta}) = \mathcal{L}_{\mathrm{res.}}(\pmb{\theta}) + \mathcal{L}_{\mathrm{cons.}}(\pmb{\theta})$ using gradient descent, which yields the network parameters $\hat{\pmb{\theta}}$ . With those, we estimate the denoised image as $\hat{\mathbf{x}} = \mathbf{y} - f_{\hat{\pmb{\theta}}}(\mathbf{y})$ . Note that only the network parameters $\pmb{\theta}$ are optimized during the gradient descent updates, since the downsampling operations $D_{1}$ and $D_{2}$ are fixed. Convergence typically requires 1k to 2k iterations, which thanks to using a lightweight network takes less than half a minute on a GPU and around one minute on a CPU. + +Network Many supervised and self-supervised methods use a relatively large network, often a UNet [25]. Instead, we use a very simple two-layer image-to-image network. It consists of only two convolutional operators with kernel size $3 \times 3$ followed by one operator of $1 \times 1$ convolutions. This network has about $20k$ parameters, which is small compared to typical denoising networks. An exact comparison of the network sizes can be found in section 4.4. There are no normalization or pooling layers. The low parameter count and simple structure enables fast denoising even when deployed on a CPU. + +In the ablation studies we show that using a UNet instead of a lightweight network leads to overfitting and much worse denoising performance. + +# 4. Experiments + +We compare our denoising algorithm (ZS-N2N) to several baselines. The baselines include dataset based methods, as well as other zero-shot methods. For the dataset based methods, we include both supervised (with clean images) and self-supervised (only noisy images) methods. We test all methods on artificial and real-world noise. We provide ablation studies in the supplementary material. + +The results highlight the dependency of dataset based methods on the dataset they are trained on and suggest that given a small training set, they are outperformed by dataset free ones. Furthermore, the experiments show that methods based on noise models achieve good performance for the specific noise model, but do not generalise to other distributions. + +Concerning the dataset and noise model free methods, our proposed method is either on par or better than other baselines on Gaussian, Poisson, and real world camera and microscope noise. Our method only falls short of Self2Self [24] on high noise levels, however, it requires only $\frac{1}{200}$ of the denoising time of Self2Self and $2\%$ of its memory. Moreover, Self2self's performance on low noise levels is insufficient. Therefore, considering denoising quality, generalisition, and computational resources, our method achieves a better trade-off compared to existing methods as shown in Figure 1. + +# 4.1. Baselines + +We compare to Noise2Clean (N2C) with a UNet, which is the current state-of-the-art denoising algorithm. There exists several other networks that perform on par with the UNet, such as DnCNN [32] and RED30 [22], but the UNet is orders of magnitude faster, since it is not very deep, and has a multi-resolution structure. The UNet is therefore the standard choice in all recent denoising papers [12, 15, 17, 23]. + +For the self-supervised methods, we compare to Neighbour2Neighbour (NB2NB) [12] and Noise2Void (N2V) [15]. We exclude the methods that require an explicit noise model, such as [4, 16, 23, 34], since these methods work well on synthetic denoising tasks for the given noise distribution, but fail to generalize to unknown noise distributions or real-world noise [12, 30]. This is due to the fact that the synthetic noise is insufficient for simulating real camera noise, which is signal-dependent and substantially altered by the camera's imaging system. + +Regarding the zero-shot methods, which are most + +similar to ours, we compare to the deep learning based algorithms: DIP [28] and Self2Self (S2S) [24], and also to the classical algorithm: BM3D [7]. Note that apart of our method (and BM3D), all baselines use a U-Net or a variation of it as a denoising backbone. + +The performance of DIP is very sensitive to the number of gradient descent steps. We used the ground truth images to determine the best early stopping iteration. The DIP results can therefore be seen as an over optimistic performance of the method. For a fair comparison, we report the results of the best performing model for the other baselines. A comparison of the sensitivity of the methods to the number of optimization steps can be found in the supplementary material. + +The original implementation of S2S uses an ensemble of multiple networks, i.e., averaging the outputs of several networks. All other baselines do not utilize ensembling or averaging. For a fair comparison, we additionally report the results of S2S without any ensembling, which we denote by S2S*. S2S denotes the original implementation with an ensemble of 50 networks. + +# 4.2. Synthetic Noise + +The dataset based methods (N2C, NB2NB, N2V) are trained on 500 colour images from ImageNet [10]. All methods are tested on the Kodak24 ${}^{1}$ and McMaster18 [13] datasets. All training and test images are centercropped to patches of size ${256} \times {256}$ . + +We examine Gaussian and Poisson noise with noise levels $\sigma$ and $\lambda$ respectively. We consider the fixed noise levels $\sigma, \lambda = 10, 25, 50$ . The $\sigma$ values for Gaussian noise correspond to pixel values in the interval [0,255], while the $\lambda$ values for Poisson noise correspond to values in the interval [0,1]. + +For the dataset based methods, we also consider blind denoising during training with the range of noise levels $\sigma, \lambda \in [10, 50]$ . During training, a $\sigma, \lambda$ value is sampled uniformly from the given range for each image in each training epoch, unlike the fixed noise levels, where all training images are contaminated with the same noise level. Blind denoising is what is used in practice, since an exact noise level is typically not given, but rather a range of noise levels. + +In table 1, we present the denoising performance of the different methods. For the dataset based methods, $\sigma, \lambda$ is known, denotes that the network trained on that exact noise level is used for testing, while unknown denotes the blind denoising, where the network trained on the range of noise levels [10,50] is used for testing. BM3D requires as input the value of the noise level. For Gaussian denoising the known $\sigma$ value was used, while for Possion denoising the noise level was estimated us + +ing the method in [5]. Note that ZS-N2N, DIP, and S2S do not utilize any prior information on the noise distribution or level. + +As seen from the results, the dataset based methods often fall slightly short of the dataset free methods. This is due to the fact that they were only trained on 500 images, whereas they reach good performance when trained on larger datasets. In the supplementary material, we show that when N2C is trained on 4000 images, it outperforms all other baselines and its performance can keep improving with more training data. Another drawback of dataset based methods is that they are sensitive to the data they are trained on. They experience a performance drop when trained on a range of noise levels as opposed to a specific noise level as the test set. + +Regarding the zero-shot methods, DIP exhibited worse scores in all simulations. BM3D is tailored to work well for Gaussian denoising, where the exact noise variance is known and required as input. However, its performance dropped for Poisson noise, where the noise level was estimated. + +ZS-N2N and S2S do not rely on a specific noise model and therefore work consistently well for both Gaussian and Poisson noise. However, S2S suffers from at least two drawbacks. The first is it heavily relies on ensembling to achieve good scores as seen by comparing the results of S2S with $\mathrm{S2S^{*}}$ . Despite improving the scores, ensembling oversmoothens the image causing a loss in some visual features [8]. Note that all other baselines are ensemble free. The second drawback is that it performs worse than all other baselines on low noise levels, as seen in the Gaussian noise with $\sigma = 10$ . + +Considering that DIP performs poorly, that BM3D only works well for Gaussian noise, and that S2S's performance without ensembling and on low noise levels is unsatisfactory, our method, ZS-N2N is the only dataset free denoising algorithm that performs well on different noise distributions and levels. + +# 4.3. Real-World Noise + +Camera noise: Following [24], we evaluate on the PolyU dataset [29] which consists of high-resolution images from various scenes captured by 5 cameras from the 3 leading brands of cameras: Canon, Nikon, and Sony. We also consider the SIDD [1], which consists of images captured by several smartphone cameras under different lighting conditions and noise patterns. + +Since the computational cost for running S2S is high, we randomly choose 20 images from both datasets to test on. The SIDD validation set has images of size $256 \times 256$ . For consistency, we center-crop the PolyU images to patches of size $256 \times 256$ . The results are shown in table 2. All methods perform similarly except for BM3D and the ensemble free version of S2S, which + +exhibit a notable performance drop. + +
DatasetZS-N2NDIPS2SS2S*BM3D
PolyU36.9237.0737.0133.1236.11
SIDD34.0734.3133.9830.7728.19
+ +Microscope noise: We additionally evaluate on the Fluorescence Microscopy dataset [33], which contains real grayscale fluorescence images obtained with commercial confocal, two-photon, and wide-field microscopes and representative biological samples such as cells, zebrafish, and mouse brain tissues. We pick random images from the test set to test on. We also compare to Noise2Fast (N2F) [18], for which code for denoising grayscale is available. The results are depicted in table 3. + +Table 2. Denoising PSNR in dB on real world camera noise. + +
ImagePhoton BPAEPhoton MiceConfocal BPAEAverage
ZS-N2N30.7331.4235.8532.67
DIP29.2230.0135.5131.58
S2S30.9031.5131.0131.14
S2S*29.4929.9929.5429.67
BM3D27.1929.4833.2329.97
N2F30.9331.0736.0132.67
+ +Table 3. PSNR in dB on real world microscope noise. + +Our method and Noise2Fast achieve similar scores and slightly outperform the other baselines. Despite the similarity in scores, when inspecting the denoised images visually, we see differences: Our method produces visually sharper images and preserves slightly more details, while the Noise2Fast images are relatively smooth. This is most noticeable on images with fine details, such as MRI images, see Figure 3 for a knee image from the fastMRI dataset [31]. The blurriness in the Noise2Fast images is likely due to the downsampling scheme used, which drops some pixel values, and the ensembling performed to obtain the final image estimate, which oversmoothens the image [8]. Our method, on the other hand, preserves all pixel values during downsampling, and is ensemble free. + +# 4.4. Computational Efficiency + +In this section we focus on the computational efficiency. We consider the denoising time and the memory requirements represented by the number of network parameters. Since in some applications a GPU is not available [9], we additionally consider the denoising time on a CPU. The GPU tested is Quadro RTX 6000 and the CPU is Intel Core i9-9940X 3.30GHz. + +
NoiseMethodKodak24McMaster18
Gaussiandataset-basedN2Cσ known?σ = 10σ = 25σ = 50σ = 10σ = 25
yes33.4528.2725.4733.0328.46
no32.1628.1824.4531.9728.26
NB2NByes33.0127.9025.0232.6328.01
no31.7927.8024.1531.1927.85
N2Vyes30.1926.2124.0730.9526.50
no28.9526.0323.1929.6426.31
dataset-freeZS-N2N (ours)-33.6929.0724.8134.2128.80
DIP-32.2827.3823.9533.0727.61
S2S-29.5428.3926.2230.7828.71
S2S*-26.9326.2924.8327.6426.48
BM3Dyes33.7429.0225.5134.5129.21
Poissondataset-basedN2Cλ known?λ = 50λ = 25λ = 10λ = 50λ = 25
yes29.4227.4926.2529.8928.20
no28.9227.1423.1328.6227.51
NB2NByes29.1927.0125.7129.4127.79
no28.5326.8823.6028.0327.66
N2Vyes27.7325.5523.7727.8625.65
no27.0425.2821.9326.3425.52
dataset-freeZS-N2N (ours)-29.4527.5224.9230.3628.41
DIP-27.5125.8423.8128.7327.37
S2S-28.8928.3127.2930.1129.40
S2S*-26.7526.4025.6327.5527.24
BM3Dno28.3626.5824.2027.3324.77
+ +Table 1. PSNR scores in dB for Gaussian and Poisson denoising. Best result is in bold, second best result is underlined. The dataset based methods are italicized. Note DIP's mediocre scores and BM3D's performance drop between Gaussian and Poisson noise. S2S has significantly lower scores in low noise as seen with $\sigma = 10$ and its ensemble free version S2S* has inadequate performance. Denoised samples can be found in the supplementary material. + +In table 4 we display the time required to denoise one colour image of size $256 \times 256$ at inference, as well as the total number of trainable parameters of a model. The dataset based methods are trained for long durations, but after training, the network parameters are fixed, and inference is almost instantaneous, since it is just a forward pass through the model. The time taken for denoising is therefore negligible compared to the zero-shot methods, whose parameters are optimized for each test image separately. + +In the original implementation of S2S, the authors report a denoising time of 1.2 hours for a $256 \times 256$ colour image on GPU. However, we noticed that only half of the gradient update iterations are needed for convergence. We therefore report only half of their GPU time. + +Concerning the denoising time, dataset based methods are the fastest, since a forward pass through a fixed network requires only milli seconds. Regarding the deep learning based zero-shot methods, ZS-N2N is significantly more computationally efficient. Specifically, on CPU it is 200 times and 35 times faster than S2S and + +DIP respectively and has only $2\%$ and $1\%$ of their memory requirements. Only the classical BM3D is computationally more efficient than ZS-N2N. + +# 4.5. Discussion + +Dataset based methods typically achieve state-of-the-art results but our experiments manifested two of their shortcomings: They don't perform well when trained on small datasets, and the performance drops when the test data differs from the training data, as seen by varying the noise levels. This highlights the importance of dataset free denoising algorithms. + +Methods that rely on an explicit model of the noise distribution such as Noisier2Noise [23] and Anscombe [20] or those tailored to work well for specific distributions such as BM3D, do not generalize well to other distributions. Their performance therefore degrades when the noise distribution is unknown, or the noise level must be estimated. This has been manifested by BM3D's competitive performance on Gaussian noise, but its failure to keep up with the other baselines on Poisson and real world noise. These findings highlight the advantage + +![](images/4a90a39e1e5aeffe1bf4ef2585b249e2f75711e91028c17d4c61cba64f880f8f.jpg) + +![](images/57e4001787a4600cc5a18ca0d13e3c647553819bbdcfed802a4b23c56bad574f.jpg) + +![](images/fbf1a105137312093bd3be1650c690ce8c329005a5b4e06f253480600247281b.jpg) + +![](images/8fca2ab14f3f86eebb388395846f2c72490c0887387c6152e6f80ca46cd37363.jpg) + +![](images/6af9e111c367a366653d7ded1785a29f770a7bcd717b48de0b280d3906ca8995.jpg) +Figure 3. Visual comparison between our method and Noise2Fast for denoising Gaussian noise on a knee MRI. Both methods achieve similar PSNR, but notice how the center and left edge are blurry and oversmooth in Noise2Fast. Our method produces a sharper image with less loss of details. + +![](images/032512022491e34fb7120f464f3eb60cc120ffd18fa3528ffcf08ecfe9e618f1.jpg) + +![](images/a1998f7d1439aa188a577ca18ed8ea70f6dad3c491d2fb0ded4fa0f08d2cebd6.jpg) + +![](images/a171e5ab676a203e8e42fff539343f738e4e985ae75b8fad3afa4d505e479059.jpg) + +
MethodN2CNB2NBN2VZS-N2NDIPS2SBM3D
GPU time---20 sec.3 min.35 min.4 sec.
CPU time---80 sec.45 min.4.5 hr.4 sec.
Network size3.3M1.3M2.2M22k2.2M1M-
+ +Table 4. Computational Resources. First and Second Rows: Time taken to denoise one image on average on GPU and CPU. The time for the dataset based methods is discarded, since it is negligible. BM3D does not benefit from the GPU, as there is no optimization involved. Bottom Row: Number of parameters of a network. + +of noise model free techniques. + +Regarding the three dataset free and noise model free methods considered, DIP was often lagging behind S2S and ZS-N2N, despite using the ground truths to find the best possible early stopping iteration. S2S's performance without ensembling is inadequate, and even with ensembling, it does not work well on low noise levels. Moreover, it requires more than 0.5 hours to denoise an image on a GPU and 4.5 hours on a CPU. + +Except for ZS-N2N, all deep learning based baselines have millions of parameters, making them computationally expensive. Considering ZS-N2N's ability to generalize to various denoising conditions with relatively fast denoising time, very few parameters, and CPU compatibility, we can conclude that it offers a good trade-off between denoising quality and computational resources. + +# 5. Conclusion + +We proposed a novel zero-shot image denoising algorithm that does not require any training examples or knowledge of the noise model or level. Our work uses a + +simple 2-layer network, and allows denoising in a relatively short period of time even when executed without a GPU. The method can perform well on simulated noise as well as real-world camera and microscope noise, and achieves a good trade-off between generalization, denoising quality and computational resources compared to existing dataset free methods. A demo of our implementation including our code and hyperparameters can be found in the following colab notebook: https://colab.research.google.com/drive/1i82nyizTdszyHkaHBuKPbWnTzao8HF9b + +# Acknowledgements + +The authors are supported by the Institute of Advanced Studies at the Technical University of Munich, the Deutsche Forschungsgemeinschaft (DFG, German Research Foundation) - 456465471, 464123524, the German Federal Ministry of Education and Research, and the Bavarian State Ministry for Science and the Arts. The authors of this work take full responsibility for its content. + +# References + +[1] Abdelrahman Abdelhamed, Stephen Lin, and Michael S. Brown. A high-quality denoising dataset for smartphone cameras. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018. 6 +[2] Joshua Batson and Loic Royer. Noise2Self: Blind denoising by self-supervision. In Kamalika Chaudhuri and Ruslan Salakhutdinov, editors, Proceedings of the 36th International Conference on Machine Learning, volume 97 of Proceedings of Machine Learning Research, pages 524-533. PMLR, 2019. 2 +[3] Tim Brooks, Ben Mildenhall, Tianfan Xue, Jiawen Chen, Dillon Sharlet, and Jonathan T. Barron. Unprocessing Images for Learned Raw Denoising. In IEEE Conference on Computer Vision and Pattern Recognition, pages 11036-11045, 2019. 2 +[4] Sungmin Cha and Taesup Moon. Fully convolutional pixel adaptive image denoiser. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), 2019. 2, 5 +[5] Guangyong Chen, Fengyuan Zhu, and Pheng Ann Heng. An efficient statistical method for image noise level estimation. In 2015 IEEE International Conference on Computer Vision (ICCV), pages 477-485, 2015. 6 +[6] Xinlei Chen and Kaiming He. Exploring simple siamese representation learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 15750-15758, 2021. 4 +[7] K. Dabov, A. Foi, V. Katkovnik, and K. Egiazarian. Image Denoising by Sparse 3-D Transform-Domain Collaborative Filtering. IEEE Transactions on Image Processing, 16(8):2080-2095, 2007. 1, 2, 5 +[8] Mohammad Zalbagi Darestani and Reinhard Heckel. Accelerated mri with un-trained neural networks. IEEE Transactions on Computational Imaging, 7:724-733, 2021. 3, 6 +[9] Mauricio Delbracio, Ignacio Garcia-Dorado, SungJoon Choi, Damien Kelly, and Peyman Milanfar. Polyblur: Removing mild blur by polynomial reblurring. IEEE Transactions on Computational Imaging, 7:837-848, 2021. 6 +[10] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE Conference on Computer Vision and Pattern Recognition, pages 248-255, 2009. 5 +[11] Reinhard Heckel and Paul Hand. Deep decoder: Concise image representations from untrained non-convolutional networks. International Conference on Learning Representations, 2019. 3 +[12] Tao Huang, Songjiang Li, Xu Jia, Huchuan Lu, and Jianzhuang Liu. Neighbor2neighbor: Self-supervised denoising from single noisy images. In 2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 14776-14785, 2021. 1, 2, 3, 5 +[13] Sandip M. Kasar and Sachin D. Ruikar. Image demosaicking by nonlocal adaptive thresholding. In 2013 International Conference on Signal Processing, Image Processing Pattern Recognition, pages 34-38, 2013. 5 + +[14] Chaewon Kim, Jaeho Lee, and Jinwoo Shin. Zero-shot blind image denoising via implicit neural representations, 2022. 2 +[15] Alexander Krull, Tim-Oliver Buchholz, and Florian Jug. Noise2void - learning denoising from single noisy images. In 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2124-2132, 2019. 1, 2, 5 +[16] Samuli Laine, Tero Karras, Jaakko Lehtinen, and Timo Aila. High-quality self-supervised deep image denoising. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'Alché-Buc, E. Fox, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 32. Curran Associates, Inc., 2019. 5 +[17] Jaakko Lehtinen, Jacob Munkberg, Jon Hasselgren, Samuli Laine, Tero Karras, Miika Aittala, and Timo Aila. Noise2Noise: Learning image restoration without clean data. In Jennifer Dy and Andreas Krause, editors, Proceedings of the 35th International Conference on Machine Learning, volume 80 of Proceedings of Machine Learning Research, pages 2965-2974. PMLR, 2018. 1, 2, 3, 5 +[18] Jason Lequyer, Reuben Philip, Amit Sharma, Wen-Hsin Hsu, and Laurence Pelletier. A fast blind zero-shot denoiser. Nature Machine Intelligence, oct 2022. 2, 6 +[19] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 10012-10022, 2021. 2 +[20] Markku Makitalo and Alessandro Foi. Optimal inversion of the anscombe transformation in low-countoisson image denoising. IEEE Transactions on Image Processing, 20(1):99-109, 2011. 1, 2, 7 +[21] Youssef Mansour, Kang Lin, and Reinhard Heckel. Image-to-image mlp-mixer for image reconstruction. CoRR, abs/2202.02018, 2022. 2 +[22] Xiaojiao Mao, Chunhua Shen, and Yu-Bin Yang. Image restoration using very deep convolutional encoder-decoder networks with symmetric skip connections. In D. Lee, M. Sugiyama, U. Luxburg, I. Guyon, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 29. Curran Associates, Inc., 2016. 5 +[23] Nick Moran, Dan Schmidt, Yu Zhong, and Patrick Coady. Noisier2noise: Learning to denoise from unpaired noisy data. In 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 12061-12069, 2020. 2, 5, 7 +[24] Yuhui Quan, Mingqin Chen, Tongyao Pang, and Hui Ji. Self2self with dropout: Learning self-supervised denoising from single image. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 1, 2, 3, 5, 6 +[25] Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-net: Convolutional networks for biomedical image segmentation. In Nassir Navab, Joachim Hornegger, William M. Wells, and Alejandro F. Frangi, editors, + +Medical Image Computing and Computer-Assisted Intervention - MICCAI 2015, pages 234-241, Cham, 2015. Springer International Publishing. 3, 4 +[26] Shakarim Soltanayev and Se Young Chun. Training deep learning based denoisers without ground truth data. In S. Bengio, H. Wallach, H. Larochelle, K. Grauman, N. Cesa-Bianchi, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 31. Curran Associates, Inc., 2018. 2 +[27] Zhengzhong Tu, Hossein Talebi, Han Zhang, Feng Yang, Peyman Milanfar, Alan Bovik, and Yinxiao Li. Maxim: Multi-axis mlp for image processing. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 5769-5780, June 2022. 2 +[28] Dmitry Ulyanov, Andrea Vedaldi, and Victor Lempitsky. Deep image prior. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018. 1, 2, 3, 5 +[29] Jun Xu, Hui Li, Zhetong Liang, David Zhang, and Lei Zhang. Real-world noisy image denoising: A new benchmark, 2018. 6 +[30] Syed Waqas Zamir, Aditya Arora, Salman Khan, Munawar Hayat, Fahad Shahbaz Khan, Ming-Hsuan Yang, and Ling Shao. Cycleisp: Real image restoration via improved data synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 5 +[31] Jure Zbontar, Florian Knoll, Anuroop Sriram, Tullie Murrell, Zhengnan Huang, Matthew J. Muckley, Aaron Defazio, Ruben Stern, Patricia Johnson, Mary Bruno, Marc Parente, Krzysztof J. Geras, Joe Katsnelson, Hersh Chandarana, Zizhao Zhang, Michal Drozdzal, Adriana Romero, Michael Rabbat, Pascal Vincent, Nafissa Yakubova, James Pinkerton, Duo Wang, Erich Owens, C. Lawrence Zitnick, Michael P. Recht, Daniel K. Sodickson, and Yvonne W. Lui. fastMRI: An open dataset and benchmarks for accelerated MRI. 2018. 6 +[32] K. Zhang, W. Zuo, Y. Chen, D. Meng, and L. Zhang. Beyond a Gaussian Denoiser: Residual Learning of Deep CNN for Image Denoising. IEEE Transactions on Image Processing, 26(7):3142-3155, 2017. 2, 5 +[33] Yide Zhang, Yinhao Zhu, Evan Nichols, Qingfei Wang, Siyuan Zhang, Cody Smith, and Scott Howard. Aoisson-gaussian denoising dataset with real fluorescence microscopy images. In CVPR, 2019. 6 +[34] Magaiya Zhussip, Shakarim Soltanayev, and Se Young Chun. Extending stein's unbiased risk estimator to train deep denoisers with correlated pairs of noisy images. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'Alché-Buc, E. Fox, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 32. Curran Associates, Inc., 2019. 2, 5 \ No newline at end of file diff --git a/2023/Zero-Shot Noise2Noise_ Efficient Image Denoising Without Any Data/images.zip b/2023/Zero-Shot Noise2Noise_ Efficient Image Denoising Without Any Data/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..86ff745024a6dde075f5dc03e907d92c3792373a --- /dev/null +++ b/2023/Zero-Shot Noise2Noise_ Efficient Image Denoising Without Any Data/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:44d6a8257f97e0eb63faffdb2a4b7403d7427deddbce3d2d97f807bd8d054ae0 +size 420682 diff --git a/2023/Zero-Shot Noise2Noise_ Efficient Image Denoising Without Any Data/layout.json b/2023/Zero-Shot Noise2Noise_ Efficient Image Denoising Without Any Data/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..4bd88100832816990adad7204b4e510ce8471ae9 --- /dev/null +++ b/2023/Zero-Shot Noise2Noise_ Efficient Image Denoising Without Any Data/layout.json @@ -0,0 +1,7564 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 95, + 103, + 515, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 103, + 515, + 121 + ], + "spans": [ + { + "bbox": [ + 95, + 103, + 515, + 121 + ], + "type": "text", + "content": "Zero-Shot Noise2Noise: Efficient Image Denoising without any Data" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 126, + 144, + 484, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 126, + 144, + 484, + 185 + ], + "spans": [ + { + "bbox": [ + 126, + 144, + 484, + 185 + ], + "type": "text", + "content": "Youssef Mansour and Reinhard Heckel \nTechnical University of Munich and Munich Center for Machine Learning \nMunich, Germany" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 194, + 188, + 412, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 194, + 188, + 412, + 198 + ], + "spans": [ + { + "bbox": [ + 194, + 188, + 412, + 198 + ], + "type": "text", + "content": "y.mansour@tum.de, reinhard.heckel@tum.de" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 159, + 227, + 206, + 239 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 227, + 206, + 239 + ], + "spans": [ + { + "bbox": [ + 159, + 227, + 206, + 239 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 68, + 253, + 297, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 253, + 297, + 445 + ], + "spans": [ + { + "bbox": [ + 68, + 253, + 297, + 445 + ], + "type": "text", + "content": "Recently, self-supervised neural networks have shown excellent image denoising performance. However, current dataset free methods are either computationally expensive, require a noise model, or have inadequate image quality. In this work we show that a simple 2-layer network, without any training data or knowledge of the noise distribution, can enable high-quality image denoising at low computational cost. Our approach is motivated by Noise2Noise and Neighbor2Neighbor and works well for denoising pixel-wise independent noise. Our experiments on artificial, real-world camera, and microscope noise show that our method termed ZS-N2N (Zero Shot Noise2Noise) often outperforms existing dataset-free methods at a reduced cost, making it suitable for use cases with scarce data availability and limited compute." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 471, + 149, + 483 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 471, + 149, + 483 + ], + "spans": [ + { + "bbox": [ + 69, + 471, + 149, + 483 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 68, + 492, + 296, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 492, + 296, + 552 + ], + "spans": [ + { + "bbox": [ + 68, + 492, + 296, + 552 + ], + "type": "text", + "content": "Image denoising is the process of removing distortions from images, to enhance them visually and to reconstruct fine details. The latter is especially important for medical images, where fine details are necessary for an accurate diagnosis." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 68, + 554, + 295, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 554, + 295, + 673 + ], + "spans": [ + { + "bbox": [ + 68, + 554, + 295, + 673 + ], + "type": "text", + "content": "Current state-of-the-art image denoising techniques rely on large data sets of clean-noisy image pairs and often consist of a neural network trained to map the noisy to the clean image. The drawbacks of dataset-based methods are that data collection, even without ground truths, is expensive and time-consuming, and second, a network trained on dataset suffers from a performance drop if the test images come from a different distribution of images. These drawbacks motivate research in dataset-free methods." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 68, + 674, + 296, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 674, + 296, + 723 + ], + "spans": [ + { + "bbox": [ + 68, + 674, + 296, + 723 + ], + "type": "text", + "content": "All current zero-shot models are either suitable only for specific noise distributions and need previous knowledge of the noise level [7, 20], require a lot of compute (time, memory, GPU) to denoise an image [24], have" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 228, + 541, + 382 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 228, + 541, + 382 + ], + "spans": [ + { + "bbox": [ + 313, + 228, + 541, + 382 + ], + "type": "text", + "content": "poor denoising quality [28], or do not generalise to different noise distributions or levels [15, 24]. We propose a method that builds on the recent Noise2Noise [17] and Neighbour2Neighbour [12] papers and aims to circumvent these issues to reach a good trade-off between denoising quality and computational resources. We make only minimal assumptions on the noise statistics (pixel-wise independence), and do not require training data. Our method does not require an explicit noise model, and is therefore suitable for various noise types and can be employed when the noise distribution or level are unknown. The only assumption we make about the noise is that it is unstructured and has zero mean." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 384, + 541, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 384, + 541, + 514 + ], + "spans": [ + { + "bbox": [ + 313, + 384, + 541, + 514 + ], + "type": "text", + "content": "In a nutshell, we convolve the noisy test image with two fixed filters, which yields two downsampled images. We next train a lightweight network with regularization to map one downsampled image to the other. Our strategy builds on the recent Noise2Noise [17] and Neighbour2Neighbour [12] papers, however we take those methods one step further by enabling denoising without any training data. Even with an extremely small network and without any training data, our method achieves good denoising quality and often even outperforms large networks trained on datasets." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 326, + 515, + 511, + 526 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 515, + 511, + 526 + ], + "spans": [ + { + "bbox": [ + 326, + 515, + 511, + 526 + ], + "type": "text", + "content": "The key attributes of our work are as follows:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 326, + 535, + 541, + 668 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 535, + 541, + 668 + ], + "spans": [ + { + "bbox": [ + 326, + 535, + 541, + 668 + ], + "type": "text", + "content": "- Compute. Dataset free neural network based algorithms [24, 28] require solving an optimization problem involving millions of parameters to denoise an image. The huge parameter count requires large memory storage, advanced GPUs, and long denoising times. In this work we show that our method, that utilizes a simple 2 layer network, with only " + }, + { + "bbox": [ + 326, + 535, + 541, + 668 + ], + "type": "inline_equation", + "content": "20\\mathrm{k}" + }, + { + "bbox": [ + 326, + 535, + 541, + 668 + ], + "type": "text", + "content": " parameters, can often outperform networks with millions of parameters while reducing the computational cost significantly and being easily executable on a CPU." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 326, + 674, + 542, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 674, + 542, + 723 + ], + "spans": [ + { + "bbox": [ + 326, + 674, + 542, + 723 + ], + "type": "text", + "content": "- Generalisation. Existing zero-shot methods often to do not generalise well. For example, BM3D [7], a classical denoising algorithm does not generalize well to non-Gaussian noise, and blind spot net" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 294, + 753, + 318, + 763 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 753, + 318, + 763 + ], + "spans": [ + { + "bbox": [ + 294, + 753, + 318, + 763 + ], + "type": "text", + "content": "14018" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 72, + 72, + 306, + 159 + ], + "blocks": [ + { + "bbox": [ + 72, + 72, + 306, + 159 + ], + "lines": [ + { + "bbox": [ + 72, + 72, + 306, + 159 + ], + "spans": [ + { + "bbox": [ + 72, + 72, + 306, + 159 + ], + "type": "image", + "image_path": "2e40134f5190ea5ff9d19f16d882ce0c33535e6e560d24636f8dec3bd8054069.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 68, + 180, + 296, + 278 + ], + "lines": [ + { + "bbox": [ + 68, + 180, + 296, + 278 + ], + "spans": [ + { + "bbox": [ + 68, + 180, + 296, + 278 + ], + "type": "text", + "content": "Figure 1. Left and middle plots: PSNR scores for Gaussian and Poisson denoising for different noise levels. Note BM3D's poor performance on Poisson compared to Gaussian noise. Right plot: Time required in seconds to denoise one " + }, + { + "bbox": [ + 68, + 180, + 296, + 278 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 68, + 180, + 296, + 278 + ], + "type": "text", + "content": " colour image on CPU and GPU, tested on Poisson noise with " + }, + { + "bbox": [ + 68, + 180, + 296, + 278 + ], + "type": "inline_equation", + "content": "\\lambda = 50" + }, + { + "bbox": [ + 68, + 180, + 296, + 278 + ], + "type": "text", + "content": ". Except for BM3D, all methods have shorter times on GPU. Only S2S in some cases outperforms our method, however it is about 100 times slower. S2S* denotes the ensemble free version of S2S." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 88, + 302, + 296, + 374 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 302, + 296, + 374 + ], + "spans": [ + { + "bbox": [ + 88, + 302, + 296, + 374 + ], + "type": "text", + "content": "works [15] [24] (discussed later in detail) fail to denoise well in the regime of low noise level. Extensive experiments on different noise distributions and noise levels show that our proposed approach can generalise better to different conditions better than existing methods." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 388, + 296, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 388, + 296, + 533 + ], + "spans": [ + { + "bbox": [ + 68, + 388, + 296, + 533 + ], + "type": "text", + "content": "In summary, our proposed method is dataset and noise model-free, and achieves a better trade-off between generalization, denoising quality, and computational resources compared to existing zero-shot methods, as displayed in Figure 1. We compare to the standard zero shot baselines, including BM3D, and the recent neural network-based algorithms DIP [28] and S2S [24]. Only BM3D is faster than our method but achieves poor results on non-Gaussian noise. Only S2S sometimes outperforms our method, but is orders of magnitude slower, often fails on low noise levels [14], and requires assembling to achieve acceptable performance." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 544, + 155, + 558 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 544, + 155, + 558 + ], + "spans": [ + { + "bbox": [ + 69, + 544, + 155, + 558 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 68, + 566, + 295, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 566, + 295, + 625 + ], + "spans": [ + { + "bbox": [ + 68, + 566, + 295, + 625 + ], + "type": "text", + "content": "Supervised methods achieve state-of-the-art performance by training a network end-to-end to map a noisy image to a clean one. Networks that work well are CNNs [3, 32], vision transformers [19], or MLP based architectures [21, 27]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 68, + 627, + 295, + 722 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 627, + 295, + 722 + ], + "spans": [ + { + "bbox": [ + 68, + 627, + 295, + 722 + ], + "type": "text", + "content": "Noise2Noise [17] yields excellent performance from training on two noisy images of the same static scene, without any ground truth images. Given that the noise has zero mean, training a network to map one noisy image to another noisy image of the same scene performs as well as mapping to the ground truth. While having access to a pair of noisy images of the same scene is in practice hard to achieve, the Noise2Noise method" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 72, + 540, + 96 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 540, + 96 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 540, + 96 + ], + "type": "text", + "content": "has motivated further research in self-supervised methods [12] that require only single noisy images." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 110, + 541, + 265 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 110, + 541, + 265 + ], + "spans": [ + { + "bbox": [ + 313, + 110, + 541, + 265 + ], + "type": "text", + "content": "Self-supervised methods are trained on datasets consisting of only noisy images. Noise2Void [15] and Noise2Self [2] are two blind spot prediction approaches for image denoising. Given a set of noisy images " + }, + { + "bbox": [ + 313, + 110, + 541, + 265 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{y}^i\\}_{1}^n" + }, + { + "bbox": [ + 313, + 110, + 541, + 265 + ], + "type": "text", + "content": ", The idea is to minimize the loss " + }, + { + "bbox": [ + 313, + 110, + 541, + 265 + ], + "type": "inline_equation", + "content": "\\frac{1}{n}\\sum_{i=1}^{n}\\mathcal{L}(f_{\\theta}(M^i(\\mathbf{y}^i)),\\mathbf{y}^i)" + }, + { + "bbox": [ + 313, + 110, + 541, + 265 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 313, + 110, + 541, + 265 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 313, + 110, + 541, + 265 + ], + "type": "text", + "content": " is a loss function, " + }, + { + "bbox": [ + 313, + 110, + 541, + 265 + ], + "type": "inline_equation", + "content": "f_{\\theta}" + }, + { + "bbox": [ + 313, + 110, + 541, + 265 + ], + "type": "text", + "content": " is a network, and " + }, + { + "bbox": [ + 313, + 110, + 541, + 265 + ], + "type": "inline_equation", + "content": "M^i" + }, + { + "bbox": [ + 313, + 110, + 541, + 265 + ], + "type": "text", + "content": " is an operator that masks some pixels, hence the name blind spot. Assuming that the neighbouring pixels of a clean image are highly correlated, and that the noise pixels are independent, a network trained to reconstruct a masked pixel, can only predict the signal value from the neighbouring visible pixels, but not the noise." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 266, + 541, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 266, + 541, + 348 + ], + "spans": [ + { + "bbox": [ + 313, + 266, + 541, + 348 + ], + "type": "text", + "content": "Recently, several works [4, 26, 34] attempted to use Stein's unbiased risk estimator for Gaussian denoising. Such methods work well only for Gaussian noise and require the noise level to be known in advance. A more general framework is Noisier2Noise [23] which works for any noise distribution, but the distribution must be known in advance." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 350, + 541, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 350, + 541, + 445 + ], + "spans": [ + { + "bbox": [ + 313, + 350, + 541, + 445 + ], + "type": "text", + "content": "The newly proposed Neighbour2Neighbour [12] builds on the Noise2Noise [17] method, where the assumptions are that the noise has zero mean and is pixel-wise independent. Neighbour2Neighbour extends Noise2Noise by enabling training without noisy image pairs. It does so by sub-sampling single noisy images to create pairs of noisy images, where Noise2Noise can be applied." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 459, + 541, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 459, + 541, + 696 + ], + "spans": [ + { + "bbox": [ + 313, + 459, + 541, + 696 + ], + "type": "text", + "content": "Zero-Shot/ Dataset free Methods. Most similar to our work is Noise2Fast [18], which also builds on Noise2Noise and Neighbour2Neighbour to achieve dataset-free denoising. However, the method is only evaluated on grayscale images, uses a relatively large network, and requires an early stopping criterion. Our work improves on Noise2Fast by easily denoising grayscale or RGB images, and designing a consistency loss that alleviates the need to early stop. Moreover, we use a much smaller network which saves compute. Specifically, our network is twelve times smaller and a forward pass through it is seven times faster. To the best of our knowledge, our work is the first to utilize a small 2-layer network and achieve competitive quality for image restoration. We show that on grayscale images, our method despite achieving similar scores to Noise2Fast [18], produces better quality images. This is likely due to Noise2Fast dropping pixel values when downsampling, whereas our method always keeps all information retained." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 698, + 541, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 698, + 541, + 723 + ], + "spans": [ + { + "bbox": [ + 313, + 698, + 541, + 723 + ], + "type": "text", + "content": "Besides this work, classical non-learning-based methods, such as BM3D [7] and Anscombe [20], work" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 753, + 317, + 762 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 753, + 317, + 762 + ], + "spans": [ + { + "bbox": [ + 294, + 753, + 317, + 762 + ], + "type": "text", + "content": "14019" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 72, + 294, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 72, + 294, + 95 + ], + "spans": [ + { + "bbox": [ + 68, + 72, + 294, + 95 + ], + "type": "text", + "content": "well for Gaussian and Poisson noise, respectively, and require the noise level as an input." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 68, + 97, + 294, + 203 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 97, + 294, + 203 + ], + "spans": [ + { + "bbox": [ + 68, + 97, + 294, + 203 + ], + "type": "text", + "content": "DIP (Deep Image Prior) [28] and its variants such as the Deep Decoder [11] build on the fact that CNNs have an inductive bias towards natural images, in that they can fit natural images much faster than noise. Therefore, a network trained, with early stopping, to map a random input to the noisy image will denoise the image. The denoising performance of DIP is often poor, and is dependent on the number of training epochs, which is hard to determine in advance." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 68, + 205, + 294, + 349 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 205, + 294, + 349 + ], + "spans": [ + { + "bbox": [ + 68, + 205, + 294, + 349 + ], + "type": "text", + "content": "Self2Self [24] utilizes the idea of the blind spot networks (reconstructing masked pixels) on a single image, but with dropout ensembling. However, this method is not computationally efficient, in that it requires long durations to denoise an image. According to the authors, it takes 1.2 hours to denoise one " + }, + { + "bbox": [ + 68, + 205, + 294, + 349 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 68, + 205, + 294, + 349 + ], + "type": "text", + "content": " image on a GPU. Compared to other blind spot networks, Self2Self achieves significantly better denoising scores, since it relies on ensembling, i.e., averaging the output of several networks. However, ensemble learning over smoothens the image, causing a loss of some details, despite the improvement in PSNR scores [8]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 350, + 294, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 350, + 294, + 434 + ], + "spans": [ + { + "bbox": [ + 68, + 350, + 294, + 434 + ], + "type": "text", + "content": "Similar to almost all supervised and self-supervised methods, both Self2Self and DIP use a UNet [25] or a variant of it as the backbone network in their architectures. A UNet typically has millions of parameters, making it unsuitable for compute limited applications. Our work departs from this scheme, by designing a shallow and simple network with few parameters." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 447, + 124, + 459 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 447, + 124, + 459 + ], + "spans": [ + { + "bbox": [ + 69, + 447, + 124, + 459 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 68, + 468, + 294, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 468, + 294, + 563 + ], + "spans": [ + { + "bbox": [ + 68, + 468, + 294, + 563 + ], + "type": "text", + "content": "Our method builds on the Noise2Noise [17], for training a network on pairs of noisy images, and the Neighbour2Neighbour (NB2NB) [12], which generates such pairs from a single noisy image. Our main idea is to generate a pair of noisy images from a single noisy image and train a small network only on this pair. We start with a brief summary of Noise2Noise and then introduce our method." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 574, + 294, + 599 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 574, + 294, + 599 + ], + "spans": [ + { + "bbox": [ + 69, + 574, + 294, + 599 + ], + "type": "text", + "content": "3.1. Background: Noise2Noise and Neighbour2Neighbour" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 68, + 606, + 294, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 606, + 294, + 677 + ], + "spans": [ + { + "bbox": [ + 68, + 606, + 294, + 677 + ], + "type": "text", + "content": "Supervised denoising methods are typically neural networks " + }, + { + "bbox": [ + 68, + 606, + 294, + 677 + ], + "type": "inline_equation", + "content": "f_{\\theta}" + }, + { + "bbox": [ + 68, + 606, + 294, + 677 + ], + "type": "text", + "content": " that map a noisy image " + }, + { + "bbox": [ + 68, + 606, + 294, + 677 + ], + "type": "inline_equation", + "content": "\\mathbf{y}" + }, + { + "bbox": [ + 68, + 606, + 294, + 677 + ], + "type": "text", + "content": " to an estimate " + }, + { + "bbox": [ + 68, + 606, + 294, + 677 + ], + "type": "inline_equation", + "content": "f_{\\theta}(\\mathbf{y})" + }, + { + "bbox": [ + 68, + 606, + 294, + 677 + ], + "type": "text", + "content": " of the clean image " + }, + { + "bbox": [ + 68, + 606, + 294, + 677 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 68, + 606, + 294, + 677 + ], + "type": "text", + "content": ". Supervised denoising methods are typically trained on pairs of clean images " + }, + { + "bbox": [ + 68, + 606, + 294, + 677 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 68, + 606, + 294, + 677 + ], + "type": "text", + "content": " and noisy measurements " + }, + { + "bbox": [ + 68, + 606, + 294, + 677 + ], + "type": "inline_equation", + "content": "\\mathbf{y} = \\mathbf{x} + \\mathbf{e}" + }, + { + "bbox": [ + 68, + 606, + 294, + 677 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 68, + 606, + 294, + 677 + ], + "type": "inline_equation", + "content": "\\mathbf{e}" + }, + { + "bbox": [ + 68, + 606, + 294, + 677 + ], + "type": "text", + "content": " is noise. We refer to supervised denoising as Noise2Clean (N2C)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 68, + 678, + 294, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 678, + 294, + 723 + ], + "spans": [ + { + "bbox": [ + 68, + 678, + 294, + 723 + ], + "type": "text", + "content": "Neural networks can also be trained on different noisy observations of the same clean image. Noise2Noise (N2N) [17] assumes access to a set of pairs of noisy images " + }, + { + "bbox": [ + 68, + 678, + 294, + 723 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_1 = \\mathbf{x} + \\mathbf{e}_1, \\mathbf{y}_2 = \\mathbf{x} + \\mathbf{e}_2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 72, + 540, + 153 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 540, + 153 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 540, + 153 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 72, + 540, + 153 + ], + "type": "inline_equation", + "content": "\\mathbf{e}_1, \\mathbf{e}_2" + }, + { + "bbox": [ + 313, + 72, + 540, + 153 + ], + "type": "text", + "content": " are independent noise vectors. A network " + }, + { + "bbox": [ + 313, + 72, + 540, + 153 + ], + "type": "inline_equation", + "content": "f_{\\theta}" + }, + { + "bbox": [ + 313, + 72, + 540, + 153 + ], + "type": "text", + "content": " is then trained to minimize the empirical risk " + }, + { + "bbox": [ + 313, + 72, + 540, + 153 + ], + "type": "inline_equation", + "content": "\\frac{1}{n} \\sum_{i=1}^{n} \\left\\| f_{\\theta}(\\mathbf{y}_1^i) - \\mathbf{y}_2^i \\right\\|_2^2" + }, + { + "bbox": [ + 313, + 72, + 540, + 153 + ], + "type": "text", + "content": ". This makes sense, since in expectation over such noisy instances, and assuming zero mean noise, training a network in a supervised manner to map a noisy image to another noisy image is equivalent to mapping it to a clean image i.e.," + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 314, + 160, + 542, + 189 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 160, + 542, + 189 + ], + "spans": [ + { + "bbox": [ + 314, + 160, + 542, + 189 + ], + "type": "interline_equation", + "content": "\\underset {\\theta} {\\arg \\min } \\mathbb {E} \\left[ \\| f _ {\\theta} (\\mathbf {y} _ {1}) - \\mathbf {x} \\| _ {2} ^ {2} \\right] = \\underset {\\theta} {\\arg \\min } \\mathbb {E} \\left[ \\| f _ {\\theta} (\\mathbf {y} _ {1}) - \\mathbf {y} _ {2} \\| _ {2} ^ {2} \\right]. \\tag {1}", + "image_path": "c22e52fde3d362fefc1a06e37d331984c3d5a7dbf9b791b5c517780e164bb748.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 326, + 190, + 525, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 190, + 525, + 201 + ], + "spans": [ + { + "bbox": [ + 326, + 190, + 525, + 201 + ], + "type": "text", + "content": "The proof is given in the supplementary material." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 202, + 540, + 273 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 202, + 540, + 273 + ], + "spans": [ + { + "bbox": [ + 313, + 202, + 540, + 273 + ], + "type": "text", + "content": "In theory N2N training reaches the same performance as N2C training if the dataset is infinitely large. In practice, since the training set is limited in size, N2N falls slightly short of N2C. For example, N2N training with a UNet on 50k images gives a performance drop of only about " + }, + { + "bbox": [ + 313, + 202, + 540, + 273 + ], + "type": "inline_equation", + "content": "0.02\\mathrm{dB}" + }, + { + "bbox": [ + 313, + 202, + 540, + 273 + ], + "type": "text", + "content": " compared to N2C with a UNet." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 274, + 540, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 274, + 540, + 334 + ], + "spans": [ + { + "bbox": [ + 313, + 274, + 540, + 334 + ], + "type": "text", + "content": "Despite the great performance of N2N, its usability is often limited, since it is difficult to obtain a pair of noisy images of the same static scene. For instance, the object being captured might be non-static, or the lighting conditions change rapidly." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 335, + 540, + 395 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 335, + 540, + 395 + ], + "spans": [ + { + "bbox": [ + 313, + 335, + 540, + 395 + ], + "type": "text", + "content": "Neighbour2Neighbour (NB2NB) [12] extends N2N and allows training only on a set of single noisy images, by sub-sampling a noisy image to create a pair of noisy images. Similar to N2N, NB2NB exhibits strong denoising performance when trained on many images." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 314, + 403, + 443, + 415 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 403, + 443, + 415 + ], + "spans": [ + { + "bbox": [ + 314, + 403, + 443, + 415 + ], + "type": "text", + "content": "3.2. Zero-Shot Noise2Noise" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 422, + 540, + 481 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 422, + 540, + 481 + ], + "spans": [ + { + "bbox": [ + 313, + 422, + 540, + 481 + ], + "type": "text", + "content": "Our work extends Noise2Noise [17] and Neighbour2Neighbour [12] by enabling training on only one single noisy image. To avoid overfitting to the single image, we use a very shallow network and an explicit regularization term." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 483, + 540, + 636 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 483, + 540, + 636 + ], + "spans": [ + { + "bbox": [ + 313, + 483, + 540, + 636 + ], + "type": "text", + "content": "Almost all self- or un-supervised denoising methods, including ours, rely on the premise that a clean natural image has different attributes than random noise. As shown in [12], a noisy image can be decomposed into a pair of downsampled images. Based on the premise that nearby pixels of a clean image are highly correlated and often have similar values, while the noise pixels are unstructured and independent, the downsampled pair of noisy images has similar signal but independent noise. This pair can therefore serve as an approximation of two noisy observations of the same scene, where one observation is used as the input, and the other as the target, as in N2N." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 639, + 540, + 722 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 639, + 540, + 722 + ], + "spans": [ + { + "bbox": [ + 313, + 639, + 540, + 722 + ], + "type": "text", + "content": "Our approach is to first decompose the image into a pair of downsampled images and second train a lightweight network with regularization to map one downsampled image to the other. Applying the so-trained network to a noisy image yields the denoised image. We first explain how we generate the downsampled images, and then how we fit the network." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 753, + 317, + 762 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 753, + 317, + 762 + ], + "spans": [ + { + "bbox": [ + 294, + 753, + 317, + 762 + ], + "type": "text", + "content": "14020" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 72, + 294, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 72, + 294, + 191 + ], + "spans": [ + { + "bbox": [ + 68, + 72, + 294, + 191 + ], + "type": "text", + "content": "Image Pair Down sampler The pair down sampler takes as input an image " + }, + { + "bbox": [ + 68, + 72, + 294, + 191 + ], + "type": "inline_equation", + "content": "\\mathbf{y}" + }, + { + "bbox": [ + 68, + 72, + 294, + 191 + ], + "type": "text", + "content": " of size " + }, + { + "bbox": [ + 68, + 72, + 294, + 191 + ], + "type": "inline_equation", + "content": "H\\times W\\times C" + }, + { + "bbox": [ + 68, + 72, + 294, + 191 + ], + "type": "text", + "content": " and generates two images " + }, + { + "bbox": [ + 68, + 72, + 294, + 191 + ], + "type": "inline_equation", + "content": "D_{1}(\\mathbf{y})" + }, + { + "bbox": [ + 68, + 72, + 294, + 191 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 68, + 72, + 294, + 191 + ], + "type": "inline_equation", + "content": "D_{2}(\\mathbf{y})" + }, + { + "bbox": [ + 68, + 72, + 294, + 191 + ], + "type": "text", + "content": ", each of size " + }, + { + "bbox": [ + 68, + 72, + 294, + 191 + ], + "type": "inline_equation", + "content": "H / 2\\times W / 2\\times C" + }, + { + "bbox": [ + 68, + 72, + 294, + 191 + ], + "type": "text", + "content": ". The down sampler generates those images by dividing the image into non-overlapping patches of size " + }, + { + "bbox": [ + 68, + 72, + 294, + 191 + ], + "type": "inline_equation", + "content": "2\\times 2" + }, + { + "bbox": [ + 68, + 72, + 294, + 191 + ], + "type": "text", + "content": ", taking an average of the diagonal pixels of each patch and assigning it to the first low-resolution image, then the average of the anti-diagonal pixels and assigning it to the second low-resolution image. See Figure 2 for an illustration of the pair down sampler." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 68, + 192, + 296, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 192, + 296, + 334 + ], + "spans": [ + { + "bbox": [ + 68, + 192, + 296, + 334 + ], + "type": "text", + "content": "The downsample is implemented with convolutions as follows. The first low-resolution image is obtained by applying a 2D convolution with stride two and fixed kernel " + }, + { + "bbox": [ + 68, + 192, + 296, + 334 + ], + "type": "inline_equation", + "content": "\\mathbf{k}_1 = \\begin{bmatrix} 0 & 0.5 \\\\ 0.5 & 0 \\end{bmatrix}" + }, + { + "bbox": [ + 68, + 192, + 296, + 334 + ], + "type": "text", + "content": " to the original image as " + }, + { + "bbox": [ + 68, + 192, + 296, + 334 + ], + "type": "inline_equation", + "content": "D_1(\\mathbf{y}) = \\mathbf{y} * \\mathbf{k}_1" + }, + { + "bbox": [ + 68, + 192, + 296, + 334 + ], + "type": "text", + "content": ", and the second image is obtained by applying a 2D convolution with stride two and fixed kernel " + }, + { + "bbox": [ + 68, + 192, + 296, + 334 + ], + "type": "inline_equation", + "content": "\\mathbf{k}_2 = \\begin{bmatrix} 0.5 & 0 \\\\ 0 & 0.5 \\end{bmatrix}" + }, + { + "bbox": [ + 68, + 192, + 296, + 334 + ], + "type": "text", + "content": " to the original image as " + }, + { + "bbox": [ + 68, + 192, + 296, + 334 + ], + "type": "inline_equation", + "content": "D_2(\\mathbf{y}) = \\mathbf{y} * \\mathbf{k}_2" + }, + { + "bbox": [ + 68, + 192, + 296, + 334 + ], + "type": "text", + "content": ". The convolutions are implemented channel-wise and therefore the downsampling scheme is applicable to any arbitrary number of input channels." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 76, + 342, + 287, + 450 + ], + "blocks": [ + { + "bbox": [ + 76, + 342, + 287, + 450 + ], + "lines": [ + { + "bbox": [ + 76, + 342, + 287, + 450 + ], + "spans": [ + { + "bbox": [ + 76, + 342, + 287, + 450 + ], + "type": "image", + "image_path": "072dc66dcb427118e22a6c61690194a433223b48e11010108bfa7e63e6b1f977.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 68, + 457, + 295, + 513 + ], + "lines": [ + { + "bbox": [ + 68, + 457, + 295, + 513 + ], + "spans": [ + { + "bbox": [ + 68, + 457, + 295, + 513 + ], + "type": "text", + "content": "Figure 2. The Image Pair Downsampler decomposes an image into two images of half the spatial resolution by averaging diagonal pixels of " + }, + { + "bbox": [ + 68, + 457, + 295, + 513 + ], + "type": "inline_equation", + "content": "2 \\times 2" + }, + { + "bbox": [ + 68, + 457, + 295, + 513 + ], + "type": "text", + "content": " non-overlapping patches. In the above example the input is a " + }, + { + "bbox": [ + 68, + 457, + 295, + 513 + ], + "type": "inline_equation", + "content": "4 \\times 4" + }, + { + "bbox": [ + 68, + 457, + 295, + 513 + ], + "type": "text", + "content": " image, and the output is two " + }, + { + "bbox": [ + 68, + 457, + 295, + 513 + ], + "type": "inline_equation", + "content": "2 \\times 2" + }, + { + "bbox": [ + 68, + 457, + 295, + 513 + ], + "type": "text", + "content": " images." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 539, + 295, + 599 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 539, + 295, + 599 + ], + "spans": [ + { + "bbox": [ + 68, + 539, + 295, + 599 + ], + "type": "text", + "content": "Zero-shot-image denoising method. Given a test image " + }, + { + "bbox": [ + 68, + 539, + 295, + 599 + ], + "type": "inline_equation", + "content": "\\mathbf{y}" + }, + { + "bbox": [ + 68, + 539, + 295, + 599 + ], + "type": "text", + "content": " to denoise, our method is conceptually similar to first fitting a small image-to-image neural network " + }, + { + "bbox": [ + 68, + 539, + 295, + 599 + ], + "type": "inline_equation", + "content": "f_{\\theta}" + }, + { + "bbox": [ + 68, + 539, + 295, + 599 + ], + "type": "text", + "content": " to map the first downsampled image " + }, + { + "bbox": [ + 68, + 539, + 295, + 599 + ], + "type": "inline_equation", + "content": "D_{1}(\\mathbf{y})" + }, + { + "bbox": [ + 68, + 539, + 295, + 599 + ], + "type": "text", + "content": " to the second one, " + }, + { + "bbox": [ + 68, + 539, + 295, + 599 + ], + "type": "inline_equation", + "content": "D_{2}(\\mathbf{y})" + }, + { + "bbox": [ + 68, + 539, + 295, + 599 + ], + "type": "text", + "content": " by minimizing the loss" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 113, + 605, + 294, + 620 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 605, + 294, + 620 + ], + "spans": [ + { + "bbox": [ + 113, + 605, + 294, + 620 + ], + "type": "interline_equation", + "content": "\\mathcal {L} (\\boldsymbol {\\theta}) = \\left\\| f _ {\\boldsymbol {\\theta}} \\left(D _ {1} (\\mathbf {y})\\right) - D _ {2} (\\mathbf {y}) \\right\\| _ {2} ^ {2}. \\tag {2}", + "image_path": "aa55eabf2a2874edb0f0895f235dedd84401cb56bbde18f0312a204d5d9d0a86.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 68, + 627, + 295, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 627, + 295, + 662 + ], + "spans": [ + { + "bbox": [ + 68, + 627, + 295, + 662 + ], + "type": "text", + "content": "Once we fitted the network, we can apply it to the original noisy observation to estimate the denoised image as " + }, + { + "bbox": [ + 68, + 627, + 295, + 662 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{x}} = f_{\\hat{\\theta}}(\\mathbf{y})" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 68, + 663, + 296, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 663, + 296, + 723 + ], + "spans": [ + { + "bbox": [ + 68, + 663, + 296, + 723 + ], + "type": "text", + "content": "However, our experiments showed that residual learning, a symmetric loss, and an additional consistency-enforcing term are critical for good performance. We next explain the elements of our loss function. In residual learning, the network is optimized to fit" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 72, + 533, + 84 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 533, + 84 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 533, + 84 + ], + "type": "text", + "content": "the noise instead of the image. The loss then becomes" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 339, + 92, + 540, + 107 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 339, + 92, + 540, + 107 + ], + "spans": [ + { + "bbox": [ + 339, + 92, + 540, + 107 + ], + "type": "interline_equation", + "content": "\\mathcal {L} (\\boldsymbol {\\theta}) = \\left\\| D _ {1} (\\mathbf {y}) - f _ {\\boldsymbol {\\theta}} \\left(D _ {1} (\\mathbf {y})\\right) - D _ {2} (\\mathbf {y}) \\right\\| _ {2} ^ {2}. \\tag {3}", + "image_path": "1139b6aa2bdc3731325c86a2444142d332c1304591b4a82c3a8980490e8762ff.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 116, + 541, + 164 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 116, + 541, + 164 + ], + "spans": [ + { + "bbox": [ + 313, + 116, + 541, + 164 + ], + "type": "text", + "content": "Following [6], where a symmetric loss was used in the context of self-supervised pretraining of a siamese network, we additionally adopt a symmetric loss, which yields the residual loss:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 323, + 172, + 539, + 226 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 172, + 539, + 226 + ], + "spans": [ + { + "bbox": [ + 323, + 172, + 539, + 226 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} _ {\\text {r e s .}} (\\boldsymbol {\\theta}) = \\frac {1}{2} \\left(\\left\\| D _ {1} (\\mathbf {y}) - f _ {\\boldsymbol {\\theta}} \\left(D _ {1} (\\mathbf {y})\\right) - D _ {2} (\\mathbf {y}) \\right\\| _ {2} ^ {2} + \\right. \\\\ \\left. \\left\\| D _ {2} (\\mathbf {y}) - f _ {\\boldsymbol {\\theta}} \\left(D _ {2} (\\mathbf {y})\\right) - D _ {1} (\\mathbf {y}) \\right\\| _ {2} ^ {2}\\right). \\tag {4} \\\\ \\end{array}", + "image_path": "4d2f4ec5431dfdcc73779f62b46d81488bb5914171234238fcfcc2b6d1eca5d4.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 228, + 541, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 228, + 541, + 275 + ], + "spans": [ + { + "bbox": [ + 313, + 228, + 541, + 275 + ], + "type": "text", + "content": "In addition, we enforce consistency by ensuring that first denoising the image " + }, + { + "bbox": [ + 313, + 228, + 541, + 275 + ], + "type": "inline_equation", + "content": "\\mathbf{y}" + }, + { + "bbox": [ + 313, + 228, + 541, + 275 + ], + "type": "text", + "content": " and then downsampling it, is similar to what we get when first downsampling " + }, + { + "bbox": [ + 313, + 228, + 541, + 275 + ], + "type": "inline_equation", + "content": "\\mathbf{y}" + }, + { + "bbox": [ + 313, + 228, + 541, + 275 + ], + "type": "text", + "content": " and then denoising it, i.e., we consider a loss of the form:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 321, + 283, + 540, + 299 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 283, + 540, + 299 + ], + "spans": [ + { + "bbox": [ + 321, + 283, + 540, + 299 + ], + "type": "interline_equation", + "content": "\\mathcal {L} (\\boldsymbol {\\theta}) = \\left\\| D (\\mathbf {y}) - f _ {\\boldsymbol {\\theta}} (D (\\mathbf {y})) - D (\\mathbf {y} - f _ {\\boldsymbol {\\theta}} (\\mathbf {y})) \\right\\| _ {2} ^ {2}. \\tag {5}", + "image_path": "836c4969ea29a9622640e70d6117da092899a51d13a537a98d5433c6658f686c.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 306, + 541, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 306, + 541, + 327 + ], + "spans": [ + { + "bbox": [ + 313, + 306, + 541, + 327 + ], + "type": "text", + "content": "Again adopting a symmetric loss, the consistency loss becomes:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 315, + 334, + 544, + 383 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 334, + 544, + 383 + ], + "spans": [ + { + "bbox": [ + 315, + 334, + 544, + 383 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} _ {\\mathrm {c o n s .}} (\\pmb {\\theta}) = \\frac {1}{2} \\Big (\\| D _ {1} (\\mathbf {y}) - f _ {\\pmb {\\theta}} (D _ {1} (\\mathbf {y})) - D _ {1} (\\mathbf {y} - f _ {\\pmb {\\theta}} (\\mathbf {y})) \\| _ {2} ^ {2} \\\\ \\left. + \\left\\| D _ {2} (\\mathbf {y}) - f _ {\\boldsymbol {\\theta}} \\left(D _ {2} (\\mathbf {y})\\right) - D _ {2} (\\mathbf {y} - f _ {\\boldsymbol {\\theta}} (\\mathbf {y})) \\right\\| _ {2} ^ {2}\\right). \\tag {6} \\\\ \\end{array}", + "image_path": "bf1f2b5b6f19aab496c7b72ff5cd53336a5439a5d3de9b6aa1eba52640743147.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 384, + 541, + 454 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 384, + 541, + 454 + ], + "spans": [ + { + "bbox": [ + 313, + 384, + 541, + 454 + ], + "type": "text", + "content": "Note that for the residual loss, the network only has the downsampled images as input. Only in the consistency loss, the network gets to see the image in full spatial resolution. Including the consistency loss enables better denoising performance and helps to avoid overfitting. It can therefore be seen as a regularizing term." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 456, + 541, + 576 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 456, + 541, + 576 + ], + "spans": [ + { + "bbox": [ + 313, + 456, + 541, + 576 + ], + "type": "text", + "content": "In summary, we minimize the loss " + }, + { + "bbox": [ + 313, + 456, + 541, + 576 + ], + "type": "inline_equation", + "content": "\\mathcal{L}(\\pmb{\\theta}) = \\mathcal{L}_{\\mathrm{res.}}(\\pmb{\\theta}) + \\mathcal{L}_{\\mathrm{cons.}}(\\pmb{\\theta})" + }, + { + "bbox": [ + 313, + 456, + 541, + 576 + ], + "type": "text", + "content": " using gradient descent, which yields the network parameters " + }, + { + "bbox": [ + 313, + 456, + 541, + 576 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{\\theta}}" + }, + { + "bbox": [ + 313, + 456, + 541, + 576 + ], + "type": "text", + "content": ". With those, we estimate the denoised image as " + }, + { + "bbox": [ + 313, + 456, + 541, + 576 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{x}} = \\mathbf{y} - f_{\\hat{\\pmb{\\theta}}}(\\mathbf{y})" + }, + { + "bbox": [ + 313, + 456, + 541, + 576 + ], + "type": "text", + "content": ". Note that only the network parameters " + }, + { + "bbox": [ + 313, + 456, + 541, + 576 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta}" + }, + { + "bbox": [ + 313, + 456, + 541, + 576 + ], + "type": "text", + "content": " are optimized during the gradient descent updates, since the downsampling operations " + }, + { + "bbox": [ + 313, + 456, + 541, + 576 + ], + "type": "inline_equation", + "content": "D_{1}" + }, + { + "bbox": [ + 313, + 456, + 541, + 576 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 456, + 541, + 576 + ], + "type": "inline_equation", + "content": "D_{2}" + }, + { + "bbox": [ + 313, + 456, + 541, + 576 + ], + "type": "text", + "content": " are fixed. Convergence typically requires 1k to 2k iterations, which thanks to using a lightweight network takes less than half a minute on a GPU and around one minute on a CPU." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 590, + 541, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 590, + 541, + 723 + ], + "spans": [ + { + "bbox": [ + 313, + 590, + 541, + 723 + ], + "type": "text", + "content": "Network Many supervised and self-supervised methods use a relatively large network, often a UNet [25]. Instead, we use a very simple two-layer image-to-image network. It consists of only two convolutional operators with kernel size " + }, + { + "bbox": [ + 313, + 590, + 541, + 723 + ], + "type": "inline_equation", + "content": "3 \\times 3" + }, + { + "bbox": [ + 313, + 590, + 541, + 723 + ], + "type": "text", + "content": " followed by one operator of " + }, + { + "bbox": [ + 313, + 590, + 541, + 723 + ], + "type": "inline_equation", + "content": "1 \\times 1" + }, + { + "bbox": [ + 313, + 590, + 541, + 723 + ], + "type": "text", + "content": " convolutions. This network has about " + }, + { + "bbox": [ + 313, + 590, + 541, + 723 + ], + "type": "inline_equation", + "content": "20k" + }, + { + "bbox": [ + 313, + 590, + 541, + 723 + ], + "type": "text", + "content": " parameters, which is small compared to typical denoising networks. An exact comparison of the network sizes can be found in section 4.4. There are no normalization or pooling layers. The low parameter count and simple structure enables fast denoising even when deployed on a CPU." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 753, + 317, + 762 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 753, + 317, + 762 + ], + "spans": [ + { + "bbox": [ + 294, + 753, + 317, + 762 + ], + "type": "text", + "content": "14021" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 294, + 109 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 72, + 294, + 109 + ], + "spans": [ + { + "bbox": [ + 69, + 72, + 294, + 109 + ], + "type": "text", + "content": "In the ablation studies we show that using a UNet instead of a lightweight network leads to overfitting and much worse denoising performance." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 121, + 149, + 134 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 121, + 149, + 134 + ], + "spans": [ + { + "bbox": [ + 69, + 121, + 149, + 134 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 68, + 141, + 294, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 141, + 294, + 236 + ], + "spans": [ + { + "bbox": [ + 68, + 141, + 294, + 236 + ], + "type": "text", + "content": "We compare our denoising algorithm (ZS-N2N) to several baselines. The baselines include dataset based methods, as well as other zero-shot methods. For the dataset based methods, we include both supervised (with clean images) and self-supervised (only noisy images) methods. We test all methods on artificial and real-world noise. We provide ablation studies in the supplementary material." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 238, + 294, + 320 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 238, + 294, + 320 + ], + "spans": [ + { + "bbox": [ + 68, + 238, + 294, + 320 + ], + "type": "text", + "content": "The results highlight the dependency of dataset based methods on the dataset they are trained on and suggest that given a small training set, they are outperformed by dataset free ones. Furthermore, the experiments show that methods based on noise models achieve good performance for the specific noise model, but do not generalise to other distributions." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 68, + 323, + 294, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 323, + 294, + 453 + ], + "spans": [ + { + "bbox": [ + 68, + 323, + 294, + 453 + ], + "type": "text", + "content": "Concerning the dataset and noise model free methods, our proposed method is either on par or better than other baselines on Gaussian, Poisson, and real world camera and microscope noise. Our method only falls short of Self2Self [24] on high noise levels, however, it requires only " + }, + { + "bbox": [ + 68, + 323, + 294, + 453 + ], + "type": "inline_equation", + "content": "\\frac{1}{200}" + }, + { + "bbox": [ + 68, + 323, + 294, + 453 + ], + "type": "text", + "content": " of the denoising time of Self2Self and " + }, + { + "bbox": [ + 68, + 323, + 294, + 453 + ], + "type": "inline_equation", + "content": "2\\%" + }, + { + "bbox": [ + 68, + 323, + 294, + 453 + ], + "type": "text", + "content": " of its memory. Moreover, Self2self's performance on low noise levels is insufficient. Therefore, considering denoising quality, generalisition, and computational resources, our method achieves a better trade-off compared to existing methods as shown in Figure 1." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 463, + 135, + 475 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 463, + 135, + 475 + ], + "spans": [ + { + "bbox": [ + 69, + 463, + 135, + 475 + ], + "type": "text", + "content": "4.1. Baselines" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 68, + 482, + 294, + 577 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 482, + 294, + 577 + ], + "spans": [ + { + "bbox": [ + 68, + 482, + 294, + 577 + ], + "type": "text", + "content": "We compare to Noise2Clean (N2C) with a UNet, which is the current state-of-the-art denoising algorithm. There exists several other networks that perform on par with the UNet, such as DnCNN [32] and RED30 [22], but the UNet is orders of magnitude faster, since it is not very deep, and has a multi-resolution structure. The UNet is therefore the standard choice in all recent denoising papers [12, 15, 17, 23]." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 68, + 578, + 294, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 578, + 294, + 709 + ], + "spans": [ + { + "bbox": [ + 68, + 578, + 294, + 709 + ], + "type": "text", + "content": "For the self-supervised methods, we compare to Neighbour2Neighbour (NB2NB) [12] and Noise2Void (N2V) [15]. We exclude the methods that require an explicit noise model, such as [4, 16, 23, 34], since these methods work well on synthetic denoising tasks for the given noise distribution, but fail to generalize to unknown noise distributions or real-world noise [12, 30]. This is due to the fact that the synthetic noise is insufficient for simulating real camera noise, which is signal-dependent and substantially altered by the camera's imaging system." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 80, + 711, + 294, + 722 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 711, + 294, + 722 + ], + "spans": [ + { + "bbox": [ + 80, + 711, + 294, + 722 + ], + "type": "text", + "content": "Regarding the zero-shot methods, which are most" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 72, + 541, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 541, + 133 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 541, + 133 + ], + "type": "text", + "content": "similar to ours, we compare to the deep learning based algorithms: DIP [28] and Self2Self (S2S) [24], and also to the classical algorithm: BM3D [7]. Note that apart of our method (and BM3D), all baselines use a U-Net or a variation of it as a denoising backbone." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 133, + 541, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 133, + 541, + 240 + ], + "spans": [ + { + "bbox": [ + 313, + 133, + 541, + 240 + ], + "type": "text", + "content": "The performance of DIP is very sensitive to the number of gradient descent steps. We used the ground truth images to determine the best early stopping iteration. The DIP results can therefore be seen as an over optimistic performance of the method. For a fair comparison, we report the results of the best performing model for the other baselines. A comparison of the sensitivity of the methods to the number of optimization steps can be found in the supplementary material." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 241, + 539, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 241, + 539, + 324 + ], + "spans": [ + { + "bbox": [ + 313, + 241, + 539, + 324 + ], + "type": "text", + "content": "The original implementation of S2S uses an ensemble of multiple networks, i.e., averaging the outputs of several networks. All other baselines do not utilize ensembling or averaging. For a fair comparison, we additionally report the results of S2S without any ensembling, which we denote by S2S*. S2S denotes the original implementation with an ensemble of 50 networks." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 334, + 409, + 346 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 334, + 409, + 346 + ], + "spans": [ + { + "bbox": [ + 313, + 334, + 409, + 346 + ], + "type": "text", + "content": "4.2. Synthetic Noise" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 354, + 541, + 412 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 354, + 541, + 412 + ], + "spans": [ + { + "bbox": [ + 313, + 354, + 541, + 412 + ], + "type": "text", + "content": "The dataset based methods (N2C, NB2NB, N2V) are trained on 500 colour images from ImageNet [10]. All methods are tested on the Kodak24 " + }, + { + "bbox": [ + 313, + 354, + 541, + 412 + ], + "type": "inline_equation", + "content": "{}^{1}" + }, + { + "bbox": [ + 313, + 354, + 541, + 412 + ], + "type": "text", + "content": " and McMaster18 [13] datasets. All training and test images are centercropped to patches of size " + }, + { + "bbox": [ + 313, + 354, + 541, + 412 + ], + "type": "inline_equation", + "content": "{256} \\times {256}" + }, + { + "bbox": [ + 313, + 354, + 541, + 412 + ], + "type": "text", + "content": " ." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 413, + 541, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 413, + 541, + 485 + ], + "spans": [ + { + "bbox": [ + 313, + 413, + 541, + 485 + ], + "type": "text", + "content": "We examine Gaussian and Poisson noise with noise levels " + }, + { + "bbox": [ + 313, + 413, + 541, + 485 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 313, + 413, + 541, + 485 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 413, + 541, + 485 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 313, + 413, + 541, + 485 + ], + "type": "text", + "content": " respectively. We consider the fixed noise levels " + }, + { + "bbox": [ + 313, + 413, + 541, + 485 + ], + "type": "inline_equation", + "content": "\\sigma, \\lambda = 10, 25, 50" + }, + { + "bbox": [ + 313, + 413, + 541, + 485 + ], + "type": "text", + "content": ". The " + }, + { + "bbox": [ + 313, + 413, + 541, + 485 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 313, + 413, + 541, + 485 + ], + "type": "text", + "content": " values for Gaussian noise correspond to pixel values in the interval [0,255], while the " + }, + { + "bbox": [ + 313, + 413, + 541, + 485 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 313, + 413, + 541, + 485 + ], + "type": "text", + "content": " values for Poisson noise correspond to values in the interval [0,1]." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 486, + 539, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 486, + 539, + 593 + ], + "spans": [ + { + "bbox": [ + 313, + 486, + 539, + 593 + ], + "type": "text", + "content": "For the dataset based methods, we also consider blind denoising during training with the range of noise levels " + }, + { + "bbox": [ + 313, + 486, + 539, + 593 + ], + "type": "inline_equation", + "content": "\\sigma, \\lambda \\in [10, 50]" + }, + { + "bbox": [ + 313, + 486, + 539, + 593 + ], + "type": "text", + "content": ". During training, a " + }, + { + "bbox": [ + 313, + 486, + 539, + 593 + ], + "type": "inline_equation", + "content": "\\sigma, \\lambda" + }, + { + "bbox": [ + 313, + 486, + 539, + 593 + ], + "type": "text", + "content": " value is sampled uniformly from the given range for each image in each training epoch, unlike the fixed noise levels, where all training images are contaminated with the same noise level. Blind denoising is what is used in practice, since an exact noise level is typically not given, but rather a range of noise levels." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 594, + 541, + 701 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 594, + 541, + 701 + ], + "spans": [ + { + "bbox": [ + 313, + 594, + 541, + 701 + ], + "type": "text", + "content": "In table 1, we present the denoising performance of the different methods. For the dataset based methods, " + }, + { + "bbox": [ + 313, + 594, + 541, + 701 + ], + "type": "inline_equation", + "content": "\\sigma, \\lambda" + }, + { + "bbox": [ + 313, + 594, + 541, + 701 + ], + "type": "text", + "content": " is known, denotes that the network trained on that exact noise level is used for testing, while unknown denotes the blind denoising, where the network trained on the range of noise levels [10,50] is used for testing. BM3D requires as input the value of the noise level. For Gaussian denoising the known " + }, + { + "bbox": [ + 313, + 594, + 541, + 701 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 313, + 594, + 541, + 701 + ], + "type": "text", + "content": " value was used, while for Possion denoising the noise level was estimated us" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 324, + 712, + 470, + 722 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 712, + 470, + 722 + ], + "spans": [ + { + "bbox": [ + 324, + 712, + 470, + 722 + ], + "type": "text", + "content": "http://r0k.us/graphics/kodak/" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 294, + 753, + 317, + 762 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 753, + 317, + 762 + ], + "spans": [ + { + "bbox": [ + 294, + 753, + 317, + 762 + ], + "type": "text", + "content": "14022" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 294, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 294, + 106 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 294, + 106 + ], + "type": "text", + "content": "ing the method in [5]. Note that ZS-N2N, DIP, and S2S do not utilize any prior information on the noise distribution or level." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 109, + 294, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 109, + 294, + 251 + ], + "spans": [ + { + "bbox": [ + 67, + 109, + 294, + 251 + ], + "type": "text", + "content": "As seen from the results, the dataset based methods often fall slightly short of the dataset free methods. This is due to the fact that they were only trained on 500 images, whereas they reach good performance when trained on larger datasets. In the supplementary material, we show that when N2C is trained on 4000 images, it outperforms all other baselines and its performance can keep improving with more training data. Another drawback of dataset based methods is that they are sensitive to the data they are trained on. They experience a performance drop when trained on a range of noise levels as opposed to a specific noise level as the test set." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 252, + 294, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 252, + 294, + 323 + ], + "spans": [ + { + "bbox": [ + 67, + 252, + 294, + 323 + ], + "type": "text", + "content": "Regarding the zero-shot methods, DIP exhibited worse scores in all simulations. BM3D is tailored to work well for Gaussian denoising, where the exact noise variance is known and required as input. However, its performance dropped for Poisson noise, where the noise level was estimated." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 324, + 294, + 455 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 324, + 294, + 455 + ], + "spans": [ + { + "bbox": [ + 67, + 324, + 294, + 455 + ], + "type": "text", + "content": "ZS-N2N and S2S do not rely on a specific noise model and therefore work consistently well for both Gaussian and Poisson noise. However, S2S suffers from at least two drawbacks. The first is it heavily relies on ensembling to achieve good scores as seen by comparing the results of S2S with " + }, + { + "bbox": [ + 67, + 324, + 294, + 455 + ], + "type": "inline_equation", + "content": "\\mathrm{S2S^{*}}" + }, + { + "bbox": [ + 67, + 324, + 294, + 455 + ], + "type": "text", + "content": ". Despite improving the scores, ensembling oversmoothens the image causing a loss in some visual features [8]. Note that all other baselines are ensemble free. The second drawback is that it performs worse than all other baselines on low noise levels, as seen in the Gaussian noise with " + }, + { + "bbox": [ + 67, + 324, + 294, + 455 + ], + "type": "inline_equation", + "content": "\\sigma = 10" + }, + { + "bbox": [ + 67, + 324, + 294, + 455 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 456, + 294, + 527 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 456, + 294, + 527 + ], + "spans": [ + { + "bbox": [ + 67, + 456, + 294, + 527 + ], + "type": "text", + "content": "Considering that DIP performs poorly, that BM3D only works well for Gaussian noise, and that S2S's performance without ensembling and on low noise levels is unsatisfactory, our method, ZS-N2N is the only dataset free denoising algorithm that performs well on different noise distributions and levels." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 536, + 174, + 547 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 536, + 174, + 547 + ], + "spans": [ + { + "bbox": [ + 69, + 536, + 174, + 547 + ], + "type": "text", + "content": "4.3. Real-World Noise" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 555, + 294, + 638 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 555, + 294, + 638 + ], + "spans": [ + { + "bbox": [ + 67, + 555, + 294, + 638 + ], + "type": "text", + "content": "Camera noise: Following [24], we evaluate on the PolyU dataset [29] which consists of high-resolution images from various scenes captured by 5 cameras from the 3 leading brands of cameras: Canon, Nikon, and Sony. We also consider the SIDD [1], which consists of images captured by several smartphone cameras under different lighting conditions and noise patterns." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 639, + 294, + 721 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 639, + 294, + 721 + ], + "spans": [ + { + "bbox": [ + 67, + 639, + 294, + 721 + ], + "type": "text", + "content": "Since the computational cost for running S2S is high, we randomly choose 20 images from both datasets to test on. The SIDD validation set has images of size " + }, + { + "bbox": [ + 67, + 639, + 294, + 721 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 67, + 639, + 294, + 721 + ], + "type": "text", + "content": ". For consistency, we center-crop the PolyU images to patches of size " + }, + { + "bbox": [ + 67, + 639, + 294, + 721 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 67, + 639, + 294, + 721 + ], + "type": "text", + "content": ". The results are shown in table 2. All methods perform similarly except for BM3D and the ensemble free version of S2S, which" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 314, + 72, + 459, + 84 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 72, + 459, + 84 + ], + "spans": [ + { + "bbox": [ + 314, + 72, + 459, + 84 + ], + "type": "text", + "content": "exhibit a notable performance drop." + } + ] + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 315, + 95, + 547, + 134 + ], + "blocks": [ + { + "bbox": [ + 315, + 95, + 547, + 134 + ], + "lines": [ + { + "bbox": [ + 315, + 95, + 547, + 134 + ], + "spans": [ + { + "bbox": [ + 315, + 95, + 547, + 134 + ], + "type": "table", + "html": "
DatasetZS-N2NDIPS2SS2S*BM3D
PolyU36.9237.0737.0133.1236.11
SIDD34.0734.3133.9830.7728.19
", + "image_path": "f798b3ecd447631810d171f9a3b4d6c1a19f4ba843f1beabd42eac8175e5f65f.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 171, + 539, + 290 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 171, + 539, + 290 + ], + "spans": [ + { + "bbox": [ + 313, + 171, + 539, + 290 + ], + "type": "text", + "content": "Microscope noise: We additionally evaluate on the Fluorescence Microscopy dataset [33], which contains real grayscale fluorescence images obtained with commercial confocal, two-photon, and wide-field microscopes and representative biological samples such as cells, zebrafish, and mouse brain tissues. We pick random images from the test set to test on. We also compare to Noise2Fast (N2F) [18], for which code for denoising grayscale is available. The results are depicted in table 3." + } + ] + } + ], + "index": 11 + }, + { + "type": "table", + "bbox": [ + 319, + 300, + 535, + 398 + ], + "blocks": [ + { + "bbox": [ + 315, + 143, + 537, + 153 + ], + "lines": [ + { + "bbox": [ + 315, + 143, + 537, + 153 + ], + "spans": [ + { + "bbox": [ + 315, + 143, + 537, + 153 + ], + "type": "text", + "content": "Table 2. Denoising PSNR in dB on real world camera noise." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 319, + 300, + 535, + 398 + ], + "lines": [ + { + "bbox": [ + 319, + 300, + 535, + 398 + ], + "spans": [ + { + "bbox": [ + 319, + 300, + 535, + 398 + ], + "type": "table", + "html": "
ImagePhoton BPAEPhoton MiceConfocal BPAEAverage
ZS-N2N30.7331.4235.8532.67
DIP29.2230.0135.5131.58
S2S30.9031.5131.0131.14
S2S*29.4929.9929.5429.67
BM3D27.1929.4833.2329.97
N2F30.9331.0736.0132.67
", + "image_path": "d52d06630cbb8303c97dab9b3a1657560e8a49ea2aecb308b3f8d7323837fa26.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_body" + } + ], + "index": 12 + }, + { + "bbox": [ + 328, + 407, + 525, + 418 + ], + "lines": [ + { + "bbox": [ + 328, + 407, + 525, + 418 + ], + "spans": [ + { + "bbox": [ + 328, + 407, + 525, + 418 + ], + "type": "text", + "content": "Table 3. PSNR in dB on real world microscope noise." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 313, + 434, + 539, + 612 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 434, + 539, + 612 + ], + "spans": [ + { + "bbox": [ + 313, + 434, + 539, + 612 + ], + "type": "text", + "content": "Our method and Noise2Fast achieve similar scores and slightly outperform the other baselines. Despite the similarity in scores, when inspecting the denoised images visually, we see differences: Our method produces visually sharper images and preserves slightly more details, while the Noise2Fast images are relatively smooth. This is most noticeable on images with fine details, such as MRI images, see Figure 3 for a knee image from the fastMRI dataset [31]. The blurriness in the Noise2Fast images is likely due to the downsampling scheme used, which drops some pixel values, and the ensembling performed to obtain the final image estimate, which oversmoothens the image [8]. Our method, on the other hand, preserves all pixel values during downsampling, and is ensemble free." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 620, + 455, + 633 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 620, + 455, + 633 + ], + "spans": [ + { + "bbox": [ + 313, + 620, + 455, + 633 + ], + "type": "text", + "content": "4.4. Computational Efficiency" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 639, + 539, + 721 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 639, + 539, + 721 + ], + "spans": [ + { + "bbox": [ + 313, + 639, + 539, + 721 + ], + "type": "text", + "content": "In this section we focus on the computational efficiency. We consider the denoising time and the memory requirements represented by the number of network parameters. Since in some applications a GPU is not available [9], we additionally consider the denoising time on a CPU. The GPU tested is Quadro RTX 6000 and the CPU is Intel Core i9-9940X 3.30GHz." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 753, + 317, + 762 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 753, + 317, + 762 + ], + "spans": [ + { + "bbox": [ + 294, + 753, + 317, + 762 + ], + "type": "text", + "content": "14023" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 85, + 70, + 526, + 376 + ], + "blocks": [ + { + "bbox": [ + 85, + 70, + 526, + 376 + ], + "lines": [ + { + "bbox": [ + 85, + 70, + 526, + 376 + ], + "spans": [ + { + "bbox": [ + 85, + 70, + 526, + 376 + ], + "type": "table", + "html": "
NoiseMethodKodak24McMaster18
Gaussiandataset-basedN2Cσ known?σ = 10σ = 25σ = 50σ = 10σ = 25
yes33.4528.2725.4733.0328.46
no32.1628.1824.4531.9728.26
NB2NByes33.0127.9025.0232.6328.01
no31.7927.8024.1531.1927.85
N2Vyes30.1926.2124.0730.9526.50
no28.9526.0323.1929.6426.31
dataset-freeZS-N2N (ours)-33.6929.0724.8134.2128.80
DIP-32.2827.3823.9533.0727.61
S2S-29.5428.3926.2230.7828.71
S2S*-26.9326.2924.8327.6426.48
BM3Dyes33.7429.0225.5134.5129.21
Poissondataset-basedN2Cλ known?λ = 50λ = 25λ = 10λ = 50λ = 25
yes29.4227.4926.2529.8928.20
no28.9227.1423.1328.6227.51
NB2NByes29.1927.0125.7129.4127.79
no28.5326.8823.6028.0327.66
N2Vyes27.7325.5523.7727.8625.65
no27.0425.2821.9326.3425.52
dataset-freeZS-N2N (ours)-29.4527.5224.9230.3628.41
DIP-27.5125.8423.8128.7327.37
S2S-28.8928.3127.2930.1129.40
S2S*-26.7526.4025.6327.5527.24
BM3Dno28.3626.5824.2027.3324.77
", + "image_path": "fcee7752d5dbd5a40edbe27f76aa63bbda08283f3781384aeaed670d00f8c964.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 71, + 386, + 539, + 430 + ], + "lines": [ + { + "bbox": [ + 71, + 386, + 539, + 430 + ], + "spans": [ + { + "bbox": [ + 71, + 386, + 539, + 430 + ], + "type": "text", + "content": "Table 1. PSNR scores in dB for Gaussian and Poisson denoising. Best result is in bold, second best result is underlined. The dataset based methods are italicized. Note DIP's mediocre scores and BM3D's performance drop between Gaussian and Poisson noise. S2S has significantly lower scores in low noise as seen with " + }, + { + "bbox": [ + 71, + 386, + 539, + 430 + ], + "type": "inline_equation", + "content": "\\sigma = 10" + }, + { + "bbox": [ + 71, + 386, + 539, + 430 + ], + "type": "text", + "content": " and its ensemble free version S2S* has inadequate performance. Denoised samples can be found in the supplementary material." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 71, + 451, + 294, + 570 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 451, + 294, + 570 + ], + "spans": [ + { + "bbox": [ + 71, + 451, + 294, + 570 + ], + "type": "text", + "content": "In table 4 we display the time required to denoise one colour image of size " + }, + { + "bbox": [ + 71, + 451, + 294, + 570 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 71, + 451, + 294, + 570 + ], + "type": "text", + "content": " at inference, as well as the total number of trainable parameters of a model. The dataset based methods are trained for long durations, but after training, the network parameters are fixed, and inference is almost instantaneous, since it is just a forward pass through the model. The time taken for denoising is therefore negligible compared to the zero-shot methods, whose parameters are optimized for each test image separately." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 71, + 575, + 294, + 646 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 575, + 294, + 646 + ], + "spans": [ + { + "bbox": [ + 71, + 575, + 294, + 646 + ], + "type": "text", + "content": "In the original implementation of S2S, the authors report a denoising time of 1.2 hours for a " + }, + { + "bbox": [ + 71, + 575, + 294, + 646 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 71, + 575, + 294, + 646 + ], + "type": "text", + "content": " colour image on GPU. However, we noticed that only half of the gradient update iterations are needed for convergence. We therefore report only half of their GPU time." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 71, + 651, + 294, + 721 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 651, + 294, + 721 + ], + "spans": [ + { + "bbox": [ + 71, + 651, + 294, + 721 + ], + "type": "text", + "content": "Concerning the denoising time, dataset based methods are the fastest, since a forward pass through a fixed network requires only milli seconds. Regarding the deep learning based zero-shot methods, ZS-N2N is significantly more computationally efficient. Specifically, on CPU it is 200 times and 35 times faster than S2S and" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 316, + 451, + 539, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 451, + 539, + 487 + ], + "spans": [ + { + "bbox": [ + 316, + 451, + 539, + 487 + ], + "type": "text", + "content": "DIP respectively and has only " + }, + { + "bbox": [ + 316, + 451, + 539, + 487 + ], + "type": "inline_equation", + "content": "2\\%" + }, + { + "bbox": [ + 316, + 451, + 539, + 487 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 316, + 451, + 539, + 487 + ], + "type": "inline_equation", + "content": "1\\%" + }, + { + "bbox": [ + 316, + 451, + 539, + 487 + ], + "type": "text", + "content": " of their memory requirements. Only the classical BM3D is computationally more efficient than ZS-N2N." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 316, + 498, + 385, + 510 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 498, + 385, + 510 + ], + "spans": [ + { + "bbox": [ + 316, + 498, + 385, + 510 + ], + "type": "text", + "content": "4.5. Discussion" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 316, + 518, + 539, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 518, + 539, + 601 + ], + "spans": [ + { + "bbox": [ + 316, + 518, + 539, + 601 + ], + "type": "text", + "content": "Dataset based methods typically achieve state-of-the-art results but our experiments manifested two of their shortcomings: They don't perform well when trained on small datasets, and the performance drops when the test data differs from the training data, as seen by varying the noise levels. This highlights the importance of dataset free denoising algorithms." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 316, + 603, + 539, + 721 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 603, + 539, + 721 + ], + "spans": [ + { + "bbox": [ + 316, + 603, + 539, + 721 + ], + "type": "text", + "content": "Methods that rely on an explicit model of the noise distribution such as Noisier2Noise [23] and Anscombe [20] or those tailored to work well for specific distributions such as BM3D, do not generalize well to other distributions. Their performance therefore degrades when the noise distribution is unknown, or the noise level must be estimated. This has been manifested by BM3D's competitive performance on Gaussian noise, but its failure to keep up with the other baselines on Poisson and real world noise. These findings highlight the advantage" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 754, + 317, + 762 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 754, + 317, + 762 + ], + "spans": [ + { + "bbox": [ + 294, + 754, + 317, + 762 + ], + "type": "text", + "content": "14024" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 70, + 69, + 165, + 175 + ], + "blocks": [ + { + "bbox": [ + 70, + 69, + 165, + 175 + ], + "lines": [ + { + "bbox": [ + 70, + 69, + 165, + 175 + ], + "spans": [ + { + "bbox": [ + 70, + 69, + 165, + 175 + ], + "type": "image", + "image_path": "4a90a39e1e5aeffe1bf4ef2585b249e2f75711e91028c17d4c61cba64f880f8f.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 194, + 69, + 291, + 175 + ], + "blocks": [ + { + "bbox": [ + 194, + 69, + 291, + 175 + ], + "lines": [ + { + "bbox": [ + 194, + 69, + 291, + 175 + ], + "spans": [ + { + "bbox": [ + 194, + 69, + 291, + 175 + ], + "type": "image", + "image_path": "57e4001787a4600cc5a18ca0d13e3c647553819bbdcfed802a4b23c56bad574f.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 320, + 69, + 417, + 175 + ], + "blocks": [ + { + "bbox": [ + 320, + 69, + 417, + 175 + ], + "lines": [ + { + "bbox": [ + 320, + 69, + 417, + 175 + ], + "spans": [ + { + "bbox": [ + 320, + 69, + 417, + 175 + ], + "type": "image", + "image_path": "fbf1a105137312093bd3be1650c690ce8c329005a5b4e06f253480600247281b.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 445, + 70, + 542, + 175 + ], + "blocks": [ + { + "bbox": [ + 445, + 70, + 542, + 175 + ], + "lines": [ + { + "bbox": [ + 445, + 70, + 542, + 175 + ], + "spans": [ + { + "bbox": [ + 445, + 70, + 542, + 175 + ], + "type": "image", + "image_path": "8fca2ab14f3f86eebb388395846f2c72490c0887387c6152e6f80ca46cd37363.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 70, + 184, + 167, + 278 + ], + "blocks": [ + { + "bbox": [ + 70, + 184, + 167, + 278 + ], + "lines": [ + { + "bbox": [ + 70, + 184, + 167, + 278 + ], + "spans": [ + { + "bbox": [ + 70, + 184, + 167, + 278 + ], + "type": "image", + "image_path": "6af9e111c367a366653d7ded1785a29f770a7bcd717b48de0b280d3906ca8995.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 299, + 541, + 332 + ], + "lines": [ + { + "bbox": [ + 67, + 299, + 541, + 332 + ], + "spans": [ + { + "bbox": [ + 67, + 299, + 541, + 332 + ], + "type": "text", + "content": "Figure 3. Visual comparison between our method and Noise2Fast for denoising Gaussian noise on a knee MRI. Both methods achieve similar PSNR, but notice how the center and left edge are blurry and oversmooth in Noise2Fast. Our method produces a sharper image with less loss of details." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 195, + 184, + 290, + 278 + ], + "blocks": [ + { + "bbox": [ + 195, + 184, + 290, + 278 + ], + "lines": [ + { + "bbox": [ + 195, + 184, + 290, + 278 + ], + "spans": [ + { + "bbox": [ + 195, + 184, + 290, + 278 + ], + "type": "image", + "image_path": "032512022491e34fb7120f464f3eb60cc120ffd18fa3528ffcf08ecfe9e618f1.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 321, + 184, + 416, + 278 + ], + "blocks": [ + { + "bbox": [ + 321, + 184, + 416, + 278 + ], + "lines": [ + { + "bbox": [ + 321, + 184, + 416, + 278 + ], + "spans": [ + { + "bbox": [ + 321, + 184, + 416, + 278 + ], + "type": "image", + "image_path": "a1998f7d1439aa188a577ca18ed8ea70f6dad3c491d2fb0ded4fa0f08d2cebd6.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 446, + 184, + 541, + 278 + ], + "blocks": [ + { + "bbox": [ + 446, + 184, + 541, + 278 + ], + "lines": [ + { + "bbox": [ + 446, + 184, + 541, + 278 + ], + "spans": [ + { + "bbox": [ + 446, + 184, + 541, + 278 + ], + "type": "image", + "image_path": "a171e5ab676a203e8e42fff539343f738e4e985ae75b8fad3afa4d505e479059.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 129, + 343, + 482, + 396 + ], + "blocks": [ + { + "bbox": [ + 129, + 343, + 482, + 396 + ], + "lines": [ + { + "bbox": [ + 129, + 343, + 482, + 396 + ], + "spans": [ + { + "bbox": [ + 129, + 343, + 482, + 396 + ], + "type": "table", + "html": "
MethodN2CNB2NBN2VZS-N2NDIPS2SBM3D
GPU time---20 sec.3 min.35 min.4 sec.
CPU time---80 sec.45 min.4.5 hr.4 sec.
Network size3.3M1.3M2.2M22k2.2M1M-
", + "image_path": "a12a00be32866bf89f6184853791d22ebab695665a3566592800701682f7ddaf.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 403, + 541, + 437 + ], + "lines": [ + { + "bbox": [ + 67, + 403, + 541, + 437 + ], + "spans": [ + { + "bbox": [ + 67, + 403, + 541, + 437 + ], + "type": "text", + "content": "Table 4. Computational Resources. First and Second Rows: Time taken to denoise one image on average on GPU and CPU. The time for the dataset based methods is discarded, since it is negligible. BM3D does not benefit from the GPU, as there is no optimization involved. Bottom Row: Number of parameters of a network." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 458, + 196, + 470 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 458, + 196, + 470 + ], + "spans": [ + { + "bbox": [ + 67, + 458, + 196, + 470 + ], + "type": "text", + "content": "of noise model free techniques." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 67, + 471, + 295, + 566 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 471, + 295, + 566 + ], + "spans": [ + { + "bbox": [ + 67, + 471, + 295, + 566 + ], + "type": "text", + "content": "Regarding the three dataset free and noise model free methods considered, DIP was often lagging behind S2S and ZS-N2N, despite using the ground truths to find the best possible early stopping iteration. S2S's performance without ensembling is inadequate, and even with ensembling, it does not work well on low noise levels. Moreover, it requires more than 0.5 hours to denoise an image on a GPU and 4.5 hours on a CPU." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 67, + 567, + 295, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 567, + 295, + 651 + ], + "spans": [ + { + "bbox": [ + 67, + 567, + 295, + 651 + ], + "type": "text", + "content": "Except for ZS-N2N, all deep learning based baselines have millions of parameters, making them computationally expensive. Considering ZS-N2N's ability to generalize to various denoising conditions with relatively fast denoising time, very few parameters, and CPU compatibility, we can conclude that it offers a good trade-off between denoising quality and computational resources." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 69, + 665, + 142, + 677 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 665, + 142, + 677 + ], + "spans": [ + { + "bbox": [ + 69, + 665, + 142, + 677 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 67, + 686, + 295, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 686, + 295, + 723 + ], + "spans": [ + { + "bbox": [ + 67, + 686, + 295, + 723 + ], + "type": "text", + "content": "We proposed a novel zero-shot image denoising algorithm that does not require any training examples or knowledge of the noise model or level. Our work uses a" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 458, + 541, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 458, + 541, + 589 + ], + "spans": [ + { + "bbox": [ + 313, + 458, + 541, + 589 + ], + "type": "text", + "content": "simple 2-layer network, and allows denoising in a relatively short period of time even when executed without a GPU. The method can perform well on simulated noise as well as real-world camera and microscope noise, and achieves a good trade-off between generalization, denoising quality and computational resources compared to existing dataset free methods. A demo of our implementation including our code and hyperparameters can be found in the following colab notebook: https://colab.research.google.com/drive/1i82nyizTdszyHkaHBuKPbWnTzao8HF9b" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 314, + 605, + 417, + 618 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 605, + 417, + 618 + ], + "spans": [ + { + "bbox": [ + 314, + 605, + 417, + 618 + ], + "type": "text", + "content": "Acknowledgements" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 627, + 541, + 721 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 627, + 541, + 721 + ], + "spans": [ + { + "bbox": [ + 313, + 627, + 541, + 721 + ], + "type": "text", + "content": "The authors are supported by the Institute of Advanced Studies at the Technical University of Munich, the Deutsche Forschungsgemeinschaft (DFG, German Research Foundation) - 456465471, 464123524, the German Federal Ministry of Education and Research, and the Bavarian State Ministry for Science and the Arts. The authors of this work take full responsibility for its content." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 753, + 317, + 762 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 753, + 317, + 762 + ], + "spans": [ + { + "bbox": [ + 294, + 753, + 317, + 762 + ], + "type": "text", + "content": "14025" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 70, + 71, + 128, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 71, + 128, + 83 + ], + "spans": [ + { + "bbox": [ + 70, + 71, + 128, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 91, + 296, + 722 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 75, + 91, + 296, + 134 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 91, + 296, + 134 + ], + "spans": [ + { + "bbox": [ + 75, + 91, + 296, + 134 + ], + "type": "text", + "content": "[1] Abdelrahman Abdelhamed, Stephen Lin, and Michael S. Brown. A high-quality denoising dataset for smartphone cameras. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018. 6" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 75, + 136, + 296, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 136, + 296, + 201 + ], + "spans": [ + { + "bbox": [ + 75, + 136, + 296, + 201 + ], + "type": "text", + "content": "[2] Joshua Batson and Loic Royer. Noise2Self: Blind denoising by self-supervision. In Kamalika Chaudhuri and Ruslan Salakhutdinov, editors, Proceedings of the 36th International Conference on Machine Learning, volume 97 of Proceedings of Machine Learning Research, pages 524-533. PMLR, 2019. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 75, + 202, + 294, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 202, + 294, + 255 + ], + "spans": [ + { + "bbox": [ + 75, + 202, + 294, + 255 + ], + "type": "text", + "content": "[3] Tim Brooks, Ben Mildenhall, Tianfan Xue, Jiawen Chen, Dillon Sharlet, and Jonathan T. Barron. Unprocessing Images for Learned Raw Denoising. In IEEE Conference on Computer Vision and Pattern Recognition, pages 11036-11045, 2019. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 75, + 258, + 294, + 300 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 258, + 294, + 300 + ], + "spans": [ + { + "bbox": [ + 75, + 258, + 294, + 300 + ], + "type": "text", + "content": "[4] Sungmin Cha and Taesup Moon. Fully convolutional pixel adaptive image denoiser. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), 2019. 2, 5" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 75, + 301, + 294, + 345 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 301, + 294, + 345 + ], + "spans": [ + { + "bbox": [ + 75, + 301, + 294, + 345 + ], + "type": "text", + "content": "[5] Guangyong Chen, Fengyuan Zhu, and Pheng Ann Heng. An efficient statistical method for image noise level estimation. In 2015 IEEE International Conference on Computer Vision (ICCV), pages 477-485, 2015. 6" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 75, + 346, + 294, + 389 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 346, + 294, + 389 + ], + "spans": [ + { + "bbox": [ + 75, + 346, + 294, + 389 + ], + "type": "text", + "content": "[6] Xinlei Chen and Kaiming He. Exploring simple siamese representation learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 15750-15758, 2021. 4" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 75, + 391, + 294, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 391, + 294, + 434 + ], + "spans": [ + { + "bbox": [ + 75, + 391, + 294, + 434 + ], + "type": "text", + "content": "[7] K. Dabov, A. Foi, V. Katkovnik, and K. Egiazarian. Image Denoising by Sparse 3-D Transform-Domain Collaborative Filtering. IEEE Transactions on Image Processing, 16(8):2080-2095, 2007. 1, 2, 5" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 75, + 435, + 294, + 478 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 435, + 294, + 478 + ], + "spans": [ + { + "bbox": [ + 75, + 435, + 294, + 478 + ], + "type": "text", + "content": "[8] Mohammad Zalbagi Darestani and Reinhard Heckel. Accelerated mri with un-trained neural networks. IEEE Transactions on Computational Imaging, 7:724-733, 2021. 3, 6" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 75, + 479, + 294, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 479, + 294, + 533 + ], + "spans": [ + { + "bbox": [ + 75, + 479, + 294, + 533 + ], + "type": "text", + "content": "[9] Mauricio Delbracio, Ignacio Garcia-Dorado, SungJoon Choi, Damien Kelly, and Peyman Milanfar. Polyblur: Removing mild blur by polynomial reblurring. IEEE Transactions on Computational Imaging, 7:837-848, 2021. 6" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 535, + 294, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 535, + 294, + 578 + ], + "spans": [ + { + "bbox": [ + 70, + 535, + 294, + 578 + ], + "type": "text", + "content": "[10] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE Conference on Computer Vision and Pattern Recognition, pages 248-255, 2009. 5" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 70, + 579, + 294, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 579, + 294, + 621 + ], + "spans": [ + { + "bbox": [ + 70, + 579, + 294, + 621 + ], + "type": "text", + "content": "[11] Reinhard Heckel and Paul Hand. Deep decoder: Concise image representations from untrained non-convolutional networks. International Conference on Learning Representations, 2019. 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 624, + 294, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 624, + 294, + 677 + ], + "spans": [ + { + "bbox": [ + 70, + 624, + 294, + 677 + ], + "type": "text", + "content": "[12] Tao Huang, Songjiang Li, Xu Jia, Huchuan Lu, and Jianzhuang Liu. Neighbor2neighbor: Self-supervised denoising from single noisy images. In 2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 14776-14785, 2021. 1, 2, 3, 5" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 70, + 679, + 294, + 722 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 679, + 294, + 722 + ], + "spans": [ + { + "bbox": [ + 70, + 679, + 294, + 722 + ], + "type": "text", + "content": "[13] Sandip M. Kasar and Sachin D. Ruikar. Image demosaicking by nonlocal adaptive thresholding. In 2013 International Conference on Signal Processing, Image Processing Pattern Recognition, pages 34-38, 2013. 5" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 541, + 722 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 316, + 73, + 541, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 73, + 541, + 105 + ], + "spans": [ + { + "bbox": [ + 316, + 73, + 541, + 105 + ], + "type": "text", + "content": "[14] Chaewon Kim, Jaeho Lee, and Jinwoo Shin. Zero-shot blind image denoising via implicit neural representations, 2022. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 317, + 107, + 541, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 107, + 541, + 161 + ], + "spans": [ + { + "bbox": [ + 317, + 107, + 541, + 161 + ], + "type": "text", + "content": "[15] Alexander Krull, Tim-Oliver Buchholz, and Florian Jug. Noise2void - learning denoising from single noisy images. In 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2124-2132, 2019. 1, 2, 5" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 317, + 163, + 541, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 163, + 541, + 228 + ], + "spans": [ + { + "bbox": [ + 317, + 163, + 541, + 228 + ], + "type": "text", + "content": "[16] Samuli Laine, Tero Karras, Jaakko Lehtinen, and Timo Aila. High-quality self-supervised deep image denoising. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'Alché-Buc, E. Fox, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 32. Curran Associates, Inc., 2019. 5" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 317, + 230, + 541, + 317 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 230, + 541, + 317 + ], + "spans": [ + { + "bbox": [ + 317, + 230, + 541, + 317 + ], + "type": "text", + "content": "[17] Jaakko Lehtinen, Jacob Munkberg, Jon Hasselgren, Samuli Laine, Tero Karras, Miika Aittala, and Timo Aila. Noise2Noise: Learning image restoration without clean data. In Jennifer Dy and Andreas Krause, editors, Proceedings of the 35th International Conference on Machine Learning, volume 80 of Proceedings of Machine Learning Research, pages 2965-2974. PMLR, 2018. 1, 2, 3, 5" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 319, + 540, + 352 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 319, + 540, + 352 + ], + "spans": [ + { + "bbox": [ + 316, + 319, + 540, + 352 + ], + "type": "text", + "content": "[18] Jason Lequyer, Reuben Philip, Amit Sharma, Wen-Hsin Hsu, and Laurence Pelletier. A fast blind zero-shot denoiser. Nature Machine Intelligence, oct 2022. 2, 6" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 317, + 354, + 541, + 418 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 354, + 541, + 418 + ], + "spans": [ + { + "bbox": [ + 317, + 354, + 541, + 418 + ], + "type": "text", + "content": "[19] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 10012-10022, 2021. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 420, + 540, + 463 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 420, + 540, + 463 + ], + "spans": [ + { + "bbox": [ + 316, + 420, + 540, + 463 + ], + "type": "text", + "content": "[20] Markku Makitalo and Alessandro Foi. Optimal inversion of the anscombe transformation in low-countoisson image denoising. IEEE Transactions on Image Processing, 20(1):99-109, 2011. 1, 2, 7" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 465, + 540, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 465, + 540, + 498 + ], + "spans": [ + { + "bbox": [ + 316, + 465, + 540, + 498 + ], + "type": "text", + "content": "[21] Youssef Mansour, Kang Lin, and Reinhard Heckel. Image-to-image mlp-mixer for image reconstruction. CoRR, abs/2202.02018, 2022. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 317, + 499, + 541, + 564 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 499, + 541, + 564 + ], + "spans": [ + { + "bbox": [ + 317, + 499, + 541, + 564 + ], + "type": "text", + "content": "[22] Xiaojiao Mao, Chunhua Shen, and Yu-Bin Yang. Image restoration using very deep convolutional encoder-decoder networks with symmetric skip connections. In D. Lee, M. Sugiyama, U. Luxburg, I. Guyon, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 29. Curran Associates, Inc., 2016. 5" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 567, + 540, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 567, + 540, + 620 + ], + "spans": [ + { + "bbox": [ + 316, + 567, + 540, + 620 + ], + "type": "text", + "content": "[23] Nick Moran, Dan Schmidt, Yu Zhong, and Patrick Coady. Noisier2noise: Learning to denoise from unpaired noisy data. In 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 12061-12069, 2020. 2, 5, 7" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 316, + 622, + 540, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 622, + 540, + 677 + ], + "spans": [ + { + "bbox": [ + 316, + 622, + 540, + 677 + ], + "type": "text", + "content": "[24] Yuhui Quan, Mingqin Chen, Tongyao Pang, and Hui Ji. Self2self with dropout: Learning self-supervised denoising from single image. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 1, 2, 3, 5, 6" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 316, + 678, + 540, + 722 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 678, + 540, + 722 + ], + "spans": [ + { + "bbox": [ + 316, + 678, + 540, + 722 + ], + "type": "text", + "content": "[25] Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-net: Convolutional networks for biomedical image segmentation. In Nassir Navab, Joachim Hornegger, William M. Wells, and Alejandro F. Frangi, editors," + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 753, + 318, + 763 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 753, + 318, + 763 + ], + "spans": [ + { + "bbox": [ + 294, + 753, + 318, + 763 + ], + "type": "text", + "content": "14026" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 70, + 73, + 295, + 662 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 88, + 73, + 294, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 73, + 294, + 106 + ], + "spans": [ + { + "bbox": [ + 88, + 73, + 294, + 106 + ], + "type": "text", + "content": "Medical Image Computing and Computer-Assisted Intervention - MICCAI 2015, pages 234-241, Cham, 2015. Springer International Publishing. 3, 4" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 71, + 107, + 295, + 172 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 107, + 295, + 172 + ], + "spans": [ + { + "bbox": [ + 71, + 107, + 295, + 172 + ], + "type": "text", + "content": "[26] Shakarim Soltanayev and Se Young Chun. Training deep learning based denoisers without ground truth data. In S. Bengio, H. Wallach, H. Larochelle, K. Grauman, N. Cesa-Bianchi, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 31. Curran Associates, Inc., 2018. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 174, + 295, + 238 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 174, + 295, + 238 + ], + "spans": [ + { + "bbox": [ + 70, + 174, + 295, + 238 + ], + "type": "text", + "content": "[27] Zhengzhong Tu, Hossein Talebi, Han Zhang, Feng Yang, Peyman Milanfar, Alan Bovik, and Yinxiao Li. Maxim: Multi-axis mlp for image processing. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 5769-5780, June 2022. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 241, + 295, + 283 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 241, + 295, + 283 + ], + "spans": [ + { + "bbox": [ + 70, + 241, + 295, + 283 + ], + "type": "text", + "content": "[28] Dmitry Ulyanov, Andrea Vedaldi, and Victor Lempitsky. Deep image prior. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018. 1, 2, 3, 5" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 285, + 294, + 317 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 285, + 294, + 317 + ], + "spans": [ + { + "bbox": [ + 70, + 285, + 294, + 317 + ], + "type": "text", + "content": "[29] Jun Xu, Hui Li, Zhetong Liang, David Zhang, and Lei Zhang. Real-world noisy image denoising: A new benchmark, 2018. 6" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 319, + 294, + 384 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 319, + 294, + 384 + ], + "spans": [ + { + "bbox": [ + 70, + 319, + 294, + 384 + ], + "type": "text", + "content": "[30] Syed Waqas Zamir, Aditya Arora, Salman Khan, Munawar Hayat, Fahad Shahbaz Khan, Ming-Hsuan Yang, and Ling Shao. Cycleisp: Real image restoration via improved data synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 5" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 386, + 294, + 495 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 386, + 294, + 495 + ], + "spans": [ + { + "bbox": [ + 70, + 386, + 294, + 495 + ], + "type": "text", + "content": "[31] Jure Zbontar, Florian Knoll, Anuroop Sriram, Tullie Murrell, Zhengnan Huang, Matthew J. Muckley, Aaron Defazio, Ruben Stern, Patricia Johnson, Mary Bruno, Marc Parente, Krzysztof J. Geras, Joe Katsnelson, Hersh Chandarana, Zizhao Zhang, Michal Drozdzal, Adriana Romero, Michael Rabbat, Pascal Vincent, Nafissa Yakubova, James Pinkerton, Duo Wang, Erich Owens, C. Lawrence Zitnick, Michael P. Recht, Daniel K. Sodickson, and Yvonne W. Lui. fastMRI: An open dataset and benchmarks for accelerated MRI. 2018. 6" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 497, + 294, + 540 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 497, + 294, + 540 + ], + "spans": [ + { + "bbox": [ + 70, + 497, + 294, + 540 + ], + "type": "text", + "content": "[32] K. Zhang, W. Zuo, Y. Chen, D. Meng, and L. Zhang. Beyond a Gaussian Denoiser: Residual Learning of Deep CNN for Image Denoising. IEEE Transactions on Image Processing, 26(7):3142-3155, 2017. 2, 5" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 541, + 294, + 585 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 541, + 294, + 585 + ], + "spans": [ + { + "bbox": [ + 70, + 541, + 294, + 585 + ], + "type": "text", + "content": "[33] Yide Zhang, Yinhao Zhu, Evan Nichols, Qingfei Wang, Siyuan Zhang, Cody Smith, and Scott Howard. Aoisson-gaussian denoising dataset with real fluorescence microscopy images. In CVPR, 2019. 6" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 586, + 294, + 662 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 586, + 294, + 662 + ], + "spans": [ + { + "bbox": [ + 70, + 586, + 294, + 662 + ], + "type": "text", + "content": "[34] Magaiya Zhussip, Shakarim Soltanayev, and Se Young Chun. Extending stein's unbiased risk estimator to train deep denoisers with correlated pairs of noisy images. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'Alché-Buc, E. Fox, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 32. Curran Associates, Inc., 2019. 2, 5" + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 753, + 317, + 762 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 753, + 317, + 762 + ], + "spans": [ + { + "bbox": [ + 294, + 753, + 317, + 762 + ], + "type": "text", + "content": "14027" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/Zero-Shot Object Counting/a8ad52a0-ee27-4576-981a-5efd9d6920f0_content_list.json b/2023/Zero-Shot Object Counting/a8ad52a0-ee27-4576-981a-5efd9d6920f0_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..b3d816eb6c90d6fa5d603520582cb35216201915 --- /dev/null +++ b/2023/Zero-Shot Object Counting/a8ad52a0-ee27-4576-981a-5efd9d6920f0_content_list.json @@ -0,0 +1,1692 @@ +[ + { + "type": "text", + "text": "Zero-Shot Object Counting", + "text_level": 1, + "bbox": [ + 344, + 130, + 627, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Jingyi $\\mathrm{Xu}^{1}$ , Hieu $\\mathrm{Le}^{2}$ , Vu Nguyen $^{1}$ , Viresh Ranjan $^{*3}$ , and Dimitris Samaras $^{1}$", + "bbox": [ + 184, + 191, + 781, + 213 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1Stony Brook University 2EPFL 3Amazon", + "bbox": [ + 307, + 222, + 661, + 242 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 286, + 313, + 301 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Class-agnostic object counting aims to count object instances of an arbitrary class at test time. Current methods for this challenging problem require human-annotated exemplars as inputs, which are often unavailable for novel categories, especially for autonomous systems. Thus, we propose zero-shot object counting (ZSC), a new setting where only the class name is available during test time. Such a counting system does not require human annotators in the loop and can operate automatically. Starting from a class name, we propose a method that can accurately identify the optimal patches which can then be used as counting exemplars. Specifically, we first construct a class prototype to select the patches that are likely to contain the objects of interest, namely class-relevant patches. Furthermore, we introduce a model that can quantitatively measure how suitable an arbitrary patch is as a counting exemplar. By applying this model to all the candidate patches, we can select the most suitable patches as exemplars for counting. Experimental results on a recent class-agnostic counting dataset, FSC-147, validate the effectiveness of our method. Code is available at https://github.com/cvlab-stonybrook/zero-shot-counting.", + "bbox": [ + 75, + 319, + 473, + 652 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 681, + 209, + 698 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Object counting aims to infer the number of objects in an image. Most of the existing methods focus on counting objects from specialized categories such as human crowds [37], cars [29], animals [4], and cells [46]. These methods count only a single category at a time. Recently, class-agnostic counting [28, 34, 38] has been proposed to count objects of arbitrary categories. Several human-annotated bounding boxes of objects are required to specify the objects of interest (see Figure 1a). However, having humans in the loop is not practical for many real-world applications, such as fully automated wildlife monitoring systems or vi", + "bbox": [ + 75, + 708, + 468, + 875 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/0c00134abb2666d7bd32bc16dc84060d555ab990b2ba9dc3c7810e1eea5ab5a2.jpg", + "image_caption": [ + "(a) Few-shot Counting", + "Figure 1. Our proposed task of zero-shot object counting (ZSC). Traditional few-shot counting methods require a few exemplars of the object category (a). We propose zero-shot counting where the counter only needs the class name to count the number of object instances. (b). Few-shot counting methods require human annotators at test time while zero-shot counters can be fully automatic." + ], + "image_footnote": [], + "bbox": [ + 558, + 285, + 684, + 434 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/6615debf754375cd3058cd6458bfb7a60f5640e9fee3eca317637b25be9849b5.jpg", + "image_caption": [ + "(b) Zero-Shot Counting" + ], + "image_footnote": [], + "bbox": [ + 705, + 286, + 831, + 434 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "sual anomaly detection systems.", + "bbox": [ + 500, + 551, + 715, + 566 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "A more practical setting, exemplar-free class-agnostic counting, has been proposed recently by Ranjan et al. [33]. They introduce RepRPN, which first identifies the objects that occur most frequently in the image, and then uses them as exemplars for object counting. Even though RepRPN does not require any annotated boxes at test time, the method simply counts objects from the class with the highest number of instances. Thus, it can not be used for counting a specific class of interest. The method is only suitable for counting images with a single dominant object class, which limits the potential applicability.", + "bbox": [ + 496, + 566, + 890, + 733 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Our goal is to build an exemplar-free object counter where we can specify what to count. To this end, we introduce a new counting task in which the user only needs to provide the name of the class for counting rather than the exemplars (see Figure 1b). In this way, the counting model can not only operate in an automatic manner but also allow the user to define what to count by simply providing the class name. Note that the class to count during test time can be arbitrary. For cases where the test class is completely unseen to the trained model, the counter needs to adapt to the unseen class without any annotated data. Hence, we", + "bbox": [ + 496, + 734, + 892, + 900 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 807, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Work done prior to joining Amazon", + "bbox": [ + 94, + 887, + 292, + 900 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "15548", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "name this setting zero-shot object counting (ZSC), inspired by previous zero-shot learning approaches [6, 57].", + "bbox": [ + 75, + 90, + 468, + 121 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To count without any annotated exemplars, our idea is to identify a few patches in the input image containing the target object that can be used as counting exemplars. Here the challenges are twofold: 1) how to localize patches that contain the object of interest based on the provided class name, and 2) how to select good exemplars for counting. Ideally, good object exemplars are visually representative for most instances in the image, which can benefit the object counter. In addition, we want to avoid selecting patches that contain irrelevant objects or backgrounds, which likely lead to incorrect object counts.", + "bbox": [ + 75, + 123, + 467, + 287 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To this end, we propose a two-step method that first localizes the class-relevant patches which contain the objects of interest based on the given class name, and then selects among these patches the optimal exemplars for counting. We use these selected exemplars, together with a pre-trained exemplar-based counting model, to achieve exemplar-free object counting.", + "bbox": [ + 75, + 291, + 467, + 397 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In particular, to localize the patches containing the objects of interest, we first construct a class prototype in a pretrained embedding space based on the given class name. To construct the class prototype, we train a conditional variational autoencoder (VAE) to generate features for an arbitrary class conditioned on its semantic embedding. The class prototype is computed by taking the average of the generated features. We then select the patches whose embeddings are the $k$ -nearest neighbors of the class prototype as the class-relevant patches.", + "bbox": [ + 75, + 398, + 467, + 549 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "After obtaining the class-relevant patches, we further select among them the optimal patches to be used as counting exemplars. Here we observe that the feature maps obtained using good exemplars and bad exemplars often exhibit distinguishable differences. An example of the feature maps obtained with different exemplars is shown in Figure 2. The feature map from a good exemplar typically exhibits some repetitive patterns (e.g., the dots on the feature map) that center around the object areas while the patterns from a bad exemplar are more irregular and occur randomly across the image. Based on this observation, we train a model to measure the goodness of an input patch based on its corresponding feature maps. Specifically, given an arbitrary patch and a pre-trained exemplar-based object counter, we train this model to predict the counting error of the counter when using the patch as the exemplar. Here the counting error can indicate the goodness of the exemplar. After this error predictor is trained, we use it to select those patches with the smallest predicted errors as the final exemplars for counting.", + "bbox": [ + 75, + 551, + 467, + 838 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Experiments on the FSC-147 dataset show that our method outperforms the previous exemplar-free counting method [33] by a large margin. We also provide analyses to show that patches selected by our method can be", + "bbox": [ + 75, + 839, + 467, + 900 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/4b39c3d1f8fd9279d469780fddec6dddadcbe6a39a2a530bde83b39226eee0e0.jpg", + "image_caption": [ + "Figure 2. Feature maps obtained using different exemplars given a pre-trained exemplar-based counting model. The feature maps obtained using good exemplars typically exhibit some repetitive patterns while the patterns from bad exemplars are more irregular." + ], + "image_footnote": [], + "bbox": [ + 514, + 92, + 883, + 224 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "used in other exemplar-based counting methods to achieve exemplar-free counting. In short, our main contributions can be summarized as follows:", + "bbox": [ + 498, + 308, + 890, + 353 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We introduce the task of zero-shot object counting that counts the number of instances of a specific class in the input image, given only the class name and without relying on any human-annotated exemplars.", + "- We propose a simple yet effective patch selection method that can accurately localize the optimal patches across the query image as exemplars for zero-shot object counting.", + "- We verify the effectiveness of our method on the FSC-147 dataset, through extensive ablation studies and visualization results." + ], + "bbox": [ + 517, + 364, + 890, + 532 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 500, + 560, + 638, + 575 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1. Class-specific Object Counting", + "text_level": 1, + "bbox": [ + 500, + 585, + 769, + 602 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Class-specific object counting focuses on counting predefined categories, such as humans [1, 15, 24, 26, 37, 39, 40, 42, 47, 52, 53, 55, 56], animals [4], cells [46], or cars [14, 29]. Generally, existing methods can be categorized into two groups: detection-based methods [8, 14, 18] and regression-based methods [7, 10, 11, 27, 41, 53, 56]. Detection-based methods apply an object detector on the image and count the number of objects based on the detected boxes. Regression-based methods predict a density map for each input image, and the final result is obtained by summing up the pixel values. Both types of methods require abundant training data to learn a good model. Class-specific counters can perform well on trained categories. However, they can not be used to count objects of arbitrary categories at test time.", + "bbox": [ + 496, + 609, + 890, + 821 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.2. Class-agnostic Object Counting", + "text_level": 1, + "bbox": [ + 500, + 830, + 777, + 848 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Class-agnostic object counting aims to count arbitrary categories given only a few exemplars [3, 13, 25, 28, 31, 34, 38, 50, 51]. GMN [28] uses a shared embedding module to", + "bbox": [ + 498, + 854, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "15549", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/6ebfd08a8f53d7e8186002eb73ea2f3d646578d8303afba3c237121bc88582b5.jpg", + "image_caption": [ + "Figure 3. Overview of the proposed method. We first use a generative model to obtain a class prototype for the given class (e.g. grape) in a pre-trained feature space. Then given an input query image, we randomly sample a number of patches of various sizes and extract the corresponding feature embedding for each patch. We select the patches whose embeddings are the nearest neighbors of the class prototype as class-relevant patches. Then for each of the selected class-relevant patches, we use a pre-trained exemplar-based counting model to obtain the intermediate feature maps. Our proposed error predictor then takes the feature maps as input and predicts the counting error (here we use normalized counting errors). We select the patches with the smallest predicted errors as the final exemplar patches and use them for counting." + ], + "image_footnote": [], + "bbox": [ + 107, + 90, + 848, + 315 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "extract feature maps for both query images and exemplars, which are then concatenated and fed into a matching module to regress the object count. FamNet [34] adopts a similar way to do correlation matching and further applies test-time adaptation. These methods require human-annotated exemplars as inputs. Recently, Ranjan et al. have proposed RepRPN [33], which achieves exemplar-free counting by identifying exemplars from the most frequent objects via a Region Proposal Network (RPN)-based model. However, the class of interest can not be explicitly specified for the RepRPN. In comparison, our proposed method can count instances of a specific class given only the class name.", + "bbox": [ + 75, + 436, + 472, + 619 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.3. Zero-shot Image Classification", + "text_level": 1, + "bbox": [ + 76, + 633, + 349, + 648 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Zero-shot classification aims to classify unseen categories for which data is not available during training [5, 9, 12, 16, 19, 21, 23, 35, 36]. Semantic descriptors are mostly leveraged as a bridge to enable the knowledge transfer between seen and unseen classes. Earlier zero-shot learning (ZSL) works relate the semantic descriptors with visual features in an embedding space and recognize unseen samples by searching their nearest class-level semantic descriptor in this embedding space [17, 36, 43, 54]. Recently, generative models [20, 22, 48, 49] have been widely employed to synthesize unseen class data to facilitate ZSL [30, 44, 45]. Xian et al. [44] use a conditional Wasserstein Generative Adversarial Network (GAN) [2] to generate unseen features which can then be used to train a discriminative classifier for ZSL. In our method, we also train a generative model conditioned on class-specific semantic embedding. Instead", + "bbox": [ + 75, + 659, + 472, + 900 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "of using this generative model to hallucinate data, we use it to compute a prototype for each class. This class prototype is then used to select patches that contain objects of interest.", + "bbox": [ + 498, + 436, + 893, + 483 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 500, + 494, + 591, + 511 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Figure 3 summarizes our proposed method. Given an input query image and a class label, we first use a generative model to construct a class prototype for the given class in a pre-trained feature space. We then randomly sample a number of patches of various sizes and extract the feature embedding for each patch. The class-relevant patches are those patches whose embeddings are the nearest neighbors of the class prototype in the embedding space. We further use an error predictor to select the patches with the smallest predicted errors as the final exemplars for counting. We use the selected exemplars in an exemplar-based object counter to infer the object counts. For the rest of the paper, we denote this exemplar-based counter as the \"base counting model\". We will first describe how we train this base counting model and then present the details of our patch selection method.", + "bbox": [ + 496, + 520, + 893, + 748 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Training Base Counting Model", + "text_level": 1, + "bbox": [ + 500, + 756, + 774, + 772 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We train our base counting model using abundant training images with annotations. Similar to previous works [34, 38], the base counting model uses the input image and the exemplars to obtain a density map for object counting. The model consists of a feature extractor $F$ and a counter $C$ . Given a query image $I$ and an exemplar $B$ of an arbitrary class $c$ , we input $I$ and $B$ to the feature extractor to obtain the corresponding output, denoted as $F(I)$ and $F(B)$ re", + "bbox": [ + 496, + 779, + 893, + 902 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "15550", + "bbox": [ + 480, + 944, + 519, + 957 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "spectively. $F(I)$ is a feature map of size $d * h_{I} * w_{I}$ and $F(B)$ is a feature map of size $d * h_{B} * w_{B}$ . We further perform global average pooling on $F(B)$ to form a feature vector $b$ of $d$ dimensions.", + "bbox": [ + 76, + 90, + 468, + 150 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "After feature extraction, we obtain the similarity map $S$ by correlating the exemplar feature vector $b$ with the image feature map $F(I)$ . Specifically, if $w_{ij} = F_{ij}(I)$ is the channel feature at spatial position $(i,j)$ , $S$ can be computed by:", + "bbox": [ + 76, + 151, + 468, + 212 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nS _ {i j} (I, B) = w _ {i j} ^ {T} b. \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 210, + 224, + 468, + 244 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In the case where $n$ exemplars are given, we use Eq. 1 to calculate $n$ similarity maps, and the final similarity map is the average of these $n$ similarity maps.", + "bbox": [ + 76, + 248, + 468, + 292 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We then concatenate the image feature map $F(I)$ with the similarity map $S$ , and input them into the counter $C$ to predict a density map $D$ . The final predicted count $N$ is obtained by summing over the predicted density map $D$ :", + "bbox": [ + 76, + 294, + 468, + 354 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nN = \\sum_ {i, j} D _ {(i, j)}, \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 215, + 363, + 468, + 395 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $D_{(i,j)}$ denotes the density value for pixel $(i,j)$ . The supervision signal for training the counting model is the $L_{2}$ loss between the predicted density map and the ground truth density map:", + "bbox": [ + 76, + 398, + 468, + 458 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nL _ {\\text {c o u n t}} = \\left\\| D (I, B) - D ^ {*} (I) \\right\\| _ {2} ^ {2}, \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 166, + 472, + 468, + 489 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $D^{*}$ denotes the ground truth density map.", + "bbox": [ + 76, + 494, + 395, + 510 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. Zero-shot Object Counting", + "text_level": 1, + "bbox": [ + 76, + 518, + 320, + 535 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In this section, we describe how we count objects of any unseen category given only the class name without access to any exemplar. Our strategy is to select a few patches in the image that can be used as exemplars for the base counting model. These patches are selected such that: 1) they contain the objects that we are counting and 2) they benefit the counting model, i.e., lead to small counting errors.", + "bbox": [ + 76, + 541, + 468, + 648 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2.1 Selecting Class-relevant Patches", + "text_level": 1, + "bbox": [ + 76, + 665, + 354, + 680 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To select patches that contain the objects of interest, we first generate a class prototype based on the given class name using a conditional VAE model. Then we randomly sample a number of patches across the query image and select the class-relevant patches based on the generated prototype.", + "bbox": [ + 76, + 688, + 468, + 763 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Class prototype generation. Inspired by previous zero-shot learning approaches [44, 45], we train a conditional VAE model to generate features for an arbitrary class based on the semantic embedding of the class. The semantic embedding is obtained from a pre-trained text-vision model [32] given the corresponding class name. Specifically, we train the VAE model to reconstruct features in a pre-trained ImageNet feature space. The VAE is composed of an Encoder $E$ , which maps a visual feature $x$ to a latent code $z$ ,", + "bbox": [ + 76, + 765, + 468, + 900 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "and a decoder $G$ which reconstructs $x$ from $z$ . Both $E$ and $G$ are conditioned on the semantic embedding $a$ . The loss function for training this VAE for an input feature $x$ can be defined as:", + "bbox": [ + 498, + 90, + 890, + 148 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} L _ {V} (x) = \\operatorname {K L} (q (z | x, a) | | p (z | a)) \\tag {4} \\\\ - \\mathrm {E} _ {q (z | x, a)} [ \\log p (x | z, a) ]. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 584, + 160, + 890, + 196 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The first term is the Kullback-Leibler divergence between the VAE posterior $q(z|x,a)$ and a prior distribution $p(z|a)$ . The second term is the decoder's reconstruction error. $q(z|x,a)$ is modeled as $E(x,a)$ and $p(x|z,a)$ is equal to $G(z,a)$ . The prior distribution is assumed to be $\\mathcal{N}(0,I)$ for all classes.", + "bbox": [ + 498, + 205, + 890, + 295 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We can use the trained VAE to generate the class prototype for an arbitrary target class for counting. Specifically, given the target class name $y$ , we first generate a set of features by inputting the respective semantic vector $a^y$ and a noise vector $z$ to the decoder $G$ :", + "bbox": [ + 498, + 297, + 890, + 372 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {G} ^ {y} = \\{\\hat {x} | \\hat {x} = G (z, y), z \\sim \\mathcal {N} (0, I) \\}. \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 568, + 385, + 890, + 402 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The class prototype $\\mathfrak{p}^y$ is computed by taking the mean of all the features generated by VAE:", + "bbox": [ + 498, + 412, + 890, + 443 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {p} ^ {y} = \\frac {1}{| \\mathbb {G} ^ {y} |} \\sum_ {\\hat {x} \\in \\mathbb {G} ^ {y}} \\hat {x} \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 622, + 454, + 890, + 486 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Class-relevant patch selection. The generated class prototype can be considered as a class center representing the distribution of features of the corresponding class in the embedding space. Using the class prototype, we can select the class-relevant patches across the query image. Specifically, we first randomly sample $M$ patches of various sizes $\\{b_1, b_2, \\dots, b_m\\}$ across the query image and extract their corresponding ImageNet features $\\{f_1, f_2, \\dots, f_m\\}$ . To select the class-relevant patches, we calculate the $L_2$ distance between the class prototype and the patch embedding, namely $d_i = \\| f_i - \\mathrm{p}^y\\|_2$ . Then we select the patches whose embeddings are the $k$ -nearest neighbors of the class prototype as the class-relevant patches. Since the ImageNet feature space is highly discriminative, i.e., features close to each other typically belong to the same class, the selected patches are likely to contain the objects of the target class.", + "bbox": [ + 498, + 494, + 890, + 738 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2.2 Selecting Exemplars for Counting", + "text_level": 1, + "bbox": [ + 500, + 756, + 790, + 771 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Given a set of class-relevant patches and a pre-trained exemplar-based object counter, we aim to select a few exemplars from these patches that are optimal for counting. To do so, we introduce an error prediction network that predicts the counting error of an arbitrary patch when the patch is used as the exemplar. The counting error is calculated from the pre-trained counting model. Specifically, to train this error predictor, given a query image $\\bar{I}$ and an arbitrary patch", + "bbox": [ + 498, + 779, + 890, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "15551", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "$\\bar{B}$ cropped from $\\bar{I}$ , we first use the base counting model to get the image feature map $F(\\bar{I})$ , similarity map $\\bar{S}$ , and the final predicted density map $\\bar{D}$ . The counting error of the base counting model can be written as:", + "bbox": [ + 76, + 90, + 468, + 151 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\epsilon = \\left| \\sum_ {i, j} \\bar {D} _ {(i, j)} - \\bar {N} ^ {*} \\right|, \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 192, + 162, + 468, + 196 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\bar{N}^*$ denotes the ground truth object count in image $\\bar{I}$ . $\\epsilon$ can be used to measure the goodness of $\\bar{B}$ as an exemplar for $\\bar{I}$ , i.e., a small $\\epsilon$ indicates that $\\bar{B}$ is a suitable exemplar for counting and vice versa.", + "bbox": [ + 76, + 200, + 468, + 262 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The error predictor $R$ is trained to regress the counting error produced by the base counting model. The input of $R$ is the channel-wise concatenation of the image feature map $F(\\bar{I})$ and the similarity map $\\tilde{S}$ . The training objective is the minimization of the mean squared error between the output of the predictor $R(F(\\bar{I}),\\bar{S})$ and the actual counting error produced by the base counting model $\\epsilon$ .", + "bbox": [ + 75, + 263, + 468, + 368 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "After the error predictor is trained, we can use it to select the optimal patches for counting. The candidates for selection here are the class-relevant patches selected by the class prototype in the previous step. For each candidate patch, we use the trained error predictor to infer the counting error when it is being used as the exemplar. The final selected patches for counting are the patches that yield the top- $s$ smallest counting errors.", + "bbox": [ + 75, + 369, + 468, + 489 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.2.3 Using the Selected Patches as Exemplars", + "text_level": 1, + "bbox": [ + 76, + 510, + 413, + 526 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Using the error predictor, we predict the error for each candidate patch and select the patches that lead to the smallest counting errors. The selected patches can then be used as exemplars for the base counting model to get the density map and the final count. We also conduct experiments to show that these selected patches can serve as exemplars for other exemplar-based counting models to achieve exemplar-free class-agnostic counting.", + "bbox": [ + 75, + 534, + 468, + 656 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 76, + 670, + 209, + 686 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Implementation Details", + "text_level": 1, + "bbox": [ + 76, + 695, + 294, + 710 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Network architecture For the base counting model, we use ResNet-50 as the backbone of the feature extractor, initialized with the weights of a pre-trained ImageNet model. The backbone outputs feature maps of 1024 channels. For each query image, the number of channels is reduced to 256 using an $1 \\times 1$ convolution. For each exemplar, the feature maps are first processed with global average pooling and then linearly mapped to obtain a 256-d feature vector. The counter consists of 5 convolutional and bilinear upsampling layers to regress a density map of the same size as the query image. For the feature generation model, both the encoder and the decoder are two-layer fully-connected", + "bbox": [ + 75, + 719, + 468, + 900 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "(FC) networks with 4096 hidden units. LeakyReLU and ReLU are the non-linear activation functions in the hidden and output layers, respectively. The dimensions of the latent space and the semantic embeddings are both set to be 512. For the error predictor, 5 convolutional and bilinear upsampling layers are followed by a linear layer to output the counting error.", + "bbox": [ + 496, + 90, + 890, + 196 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Dataset We use the FSC-147 dataset [34] to train the base counting model and the error predictor. FSC-147 is the first large-scale dataset for class-agnostic counting. It includes 6135 images from 147 categories varying from animals, kitchen utensils, to vehicles. The categories in the training, validation, and test sets do not overlap. The feature generator is trained on the MS-COCO detection dataset. Note that the previous exemplar-free method [33] also uses MS-COCO to pre-train their counter.", + "bbox": [ + 496, + 196, + 890, + 332 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Training details Both the base counting model and the error predictor are trained using the AdamW optimizer with a fixed learning rate of $10^{-5}$ . The base counting model is trained for 300 epochs with a batch size of 8. We resize the input query image to a fixed height of 384, and the width is adjusted accordingly to preserve the aspect ratio of the original image. Exemplars are resized to $128 \\times 128$ before being input into the feature extractor. The feature generation model is trained using the Adam optimizer and the learning rate is set to be $10^{-4}$ . The semantic embeddings are extracted from CLIP [32]. To select the class-relevant patches, we randomly sample 450 boxes of various sizes across the input query image and select 10 patches whose embeddings are the 10-nearest neighbors of the class prototype. The final selected patches are those that yield the top-3 smallest counting errors predicted by the error predictor.", + "bbox": [ + 496, + 333, + 890, + 575 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.2. Evaluation Metrics", + "text_level": 1, + "bbox": [ + 500, + 584, + 684, + 599 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We use Mean Average Error (MAE) and Root Mean Squared Error (RMSE) to measure the performance of different object counters. Besides, we follow [31] to report the Normalized Relative Error (NAE) and Squared Relative Error (SRE). In particular, MAE = $\\frac{1}{n}\\sum_{i=1}^{n}|y_i - \\hat{y}_i|$ ; RMSE = $\\sqrt{\\frac{1}{n}\\sum_{i=1}^{n}(y_i - \\hat{y}_i)^2}$ ; NAE = $\\frac{1}{n}\\sum_{i=1}^{n}\\frac{|y_i - \\hat{y}_i|}{y_i}$ ; SRE = $\\sqrt{\\frac{1}{n}\\sum_{i=1}^{n}\\frac{(y_i - \\hat{y}_i)^2}{y_i}}$ where $n$ is the number of test images, and $y_i$ and $\\hat{y}_i$ are the ground truth and the predicted number of objects for image $i$ respectively.", + "bbox": [ + 496, + 607, + 890, + 762 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.3. Comparing Methods", + "text_level": 1, + "bbox": [ + 500, + 771, + 694, + 787 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We compare our method with the previous works on class-agnostic counting. RepRPN-Counter [33] is the only previous class-agnostic counting method that does not require human-annotated exemplars as input. In order to make other exemplar based class-agnostic methods including GMN (General Matching Network [28]), FamNet (Few-shot adaptation and matching Network [34]) and BMNet", + "bbox": [ + 496, + 795, + 890, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "15552", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/a1d0ad93004fb7f7365ac0106411ed0f96180ca75cccb8b3964276fcb238c793.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodExemplarsVal SetTest Set
MAERMSENAESREMAERMSENAESRE
GMN [28]GT29.6689.81--26.52124.57--
RPN40.96108.47--39.72142.81--
FamNet+ [34]GT23.7569.070.524.2522.0899.540.446.45
RPN42.85121.590.756.9442.70146.080.747.14
BMNet [38]GT19.0667.950.264.3916.71103.310.263.32
RPN37.26108.540.425.4337.22143.130.415.31
BMNet+ [38]GT15.7458.530.276.5714.6291.830.252.74
RPN35.15106.070.415.2834.52132.640.395.26
RepRPN-Counter [33]-30.4098.73--27.45129.69--
Ours (Base)GT18.5561.120.303.1820.68109.140.367.63
RPN32.1999.210.384.8029.25130.650.354.35
Patch-Selection26.9388.630.364.2622.09115.170.343.74
", + "bbox": [ + 191, + 88, + 781, + 295 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 1. Quantitative comparisons on the FSC-147 dataset. \"GT\" denotes using human-annotated boxes as exemplars. \"RPN\" denotes using the top-3 RPN proposals with the highest objectness scores as exemplars. \"Patch-Selection\" denotes using our selected patches as exemplars.", + "bbox": [ + 75, + 299, + 893, + 340 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "(Bilinear Matching Network [38]) work in the exemplar-free setup, we replace the human-provided exemplars with the exemplars generated by a pre-trained object detector. Specifically, we use the RPN of Faster RCNN pre-trained on MS-COCO dataset and select the top-3 proposals with the highest objectness score as the exemplars. We also include the performance of these methods using human-annotated exemplars for a complete comparison.", + "bbox": [ + 75, + 357, + 472, + 478 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.4. Results", + "text_level": 1, + "bbox": [ + 76, + 494, + 171, + 508 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Quantitative results. As shown in Table 1, our proposed method outperforms the previous exemplar-free counting method [33] by a large margin, resulting in a reduction of 10.10 w.r.t. the validation RMSE and 14.52 w.r.t. the test RMSE. We also notice that the performance of all exemplar-based counting methods drops significantly when replacing human-annotated exemplars with RPN generated proposals. The state-of-the-art exemplar-based method BMNet+ [38], for example, shows an 19.90 error increase w.r.t. the test MAE and a 40.81 increase w.r.t. the test RMSE. In comparison, the performance gap is much smaller when using our selected patches as exemplars, as reflected by a 1.41 increase w.r.t. the test MAE and a 6.03 increase w.r.t. the test RMSE. Noticeably, the NAE and the SRE on the test set are even reduced when using our selected patches compared with the human-annotated exemplars.", + "bbox": [ + 75, + 520, + 468, + 762 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Qualitative analysis. In Figure 4, we present a few input images, the image patches selected by our method, and the corresponding density maps. Our method effectively identifies the patches that are suitable for object counting. The density maps produced by our selected patches are meaningful and close to the density maps produced by human-annotated patches. The counting model with random image patches as exemplars, in comparison, fails to output meaningful density maps and infers incorrect object counts.", + "bbox": [ + 75, + 763, + 470, + 902 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5. Analyses", + "text_level": 1, + "bbox": [ + 500, + 354, + 599, + 372 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.1. Ablation Studies", + "text_level": 1, + "bbox": [ + 500, + 381, + 663, + 397 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Our proposed patch selection method consists of two steps: the selection of class-relevant patches via a generated class prototype and the selection of the optimal patches via an error predictor. We analyze the contribution of each step quantitatively and qualitatively. Quantitative results are in Table 2. We first evaluate the performance of our baseline, i.e. using 3 randomly sampled patches as exemplars without any selection step. As shown in Table 2, using the class prototype to select class-relevant patches reduces the error rate by 7.19 and 6.07 on the validation and test set of MAE, respectively. Applying the error predictor can improve the baseline performance by 7.22 on the validation MAE and 7.57 on the test MAE. Finally, applying the two components together further boosts performance, achieving 26.93 on the validation MAE and 22.09 on the test MAE.", + "bbox": [ + 496, + 407, + 890, + 632 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We provide further qualitative analysis by visualizing the selected patches. As shown in Figure 5, for each input query image, we show 10 class-relevant patches selected using our generated prototype, ranked by their predicted counting error (from low to high). All the 10 selected class-relevant patches exhibit some class specific features. However, not all these patches are suitable to be used as counting exemplars, i.e., some patches only contain parts of the object, and some patches contain some background. By further applying our proposed error predictor, we can identify the most suitable patches with the smallest predicted counting errors.", + "bbox": [ + 496, + 635, + 890, + 801 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.2. Generalization to Exemplar-based Methods", + "text_level": 1, + "bbox": [ + 498, + 815, + 870, + 830 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Our proposed method can be considered as a general patch selection method that is applicable to other visual counters to achieve exemplar-free counting. To verify that, we use our selected patches as the exemplars for three", + "bbox": [ + 496, + 840, + 890, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "15553", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/fe4a6ef528c84641fb0e5dd34b44a7a8276d88d070b0e4c619d7316609b7c106.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 178, + 90, + 285, + 167 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/8ca765e47c0c7daa9642f1120b10640503f129a405c3ea0307f1142e0fc7fcdb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 315, + 90, + 457, + 166 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/d1909e34066588d405bc4d78523cb7389e762ed28ac84e5b336bbde8d1ade92a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 475, + 90, + 616, + 167 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/94bdd630442b3b673505ae494ab3ffa239ebd0023791386d504855f8c24bc4f2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 633, + 90, + 774, + 167 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/eb3009146f36d56ad021f85e0b4771c8f06a5c28b738f5771aa15c3230cbd957.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 178, + 172, + 285, + 253 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/f374c2dfeec04c74b949d57f39d8a02f9f0835f1b3f4d6a9028d61dca898e190.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 315, + 172, + 457, + 253 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/8b309d9a73c65e4e982e77d9e88ec2e160d479f751387141841d308fcf13b8b8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 475, + 172, + 616, + 252 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/29b47f7e9b6a9d5ea8fdf3d200a38bfe0d9b75ca54f0b71e926e53ca6fb5f1d6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 633, + 172, + 772, + 252 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/4bc34c2ca1145c11f19f0a3e9fc03a84e2f29e8d5859ac7cd3b0be5486b693f2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 178, + 262, + 285, + 325 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/a44cff7de693c74ae8f4bfd3d996ad58aa1ae282811b5b9f872751ffde832b3b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 316, + 258, + 457, + 332 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/531c80e23d0421edb6b176462146e43fb76a7e403dba021f57de93950817aaf1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 475, + 258, + 616, + 332 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/6b2b203afd898aae2fa9579837ebd56b1f319d0c70f9cab13d61a3f08683ae32.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 633, + 258, + 772, + 332 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/330cdba23ac7d59f52bf378aadf8438e3bcadeb2a7bd0b1e8c7b423755924c50.jpg", + "image_caption": [ + "Figure 4. Qualitative results on the FSC-147 dataset. We show the counting exemplars and the corresponding density maps of ground truth boxes, randomly selected patches, and our selected patches respectively. Predicted counting results are shown at the top-right corner. Our method accurately identifies suitable patches for counting and the predicted density maps are close to the ground truth density maps." + ], + "image_footnote": [], + "bbox": [ + 178, + 337, + 282, + 410 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/69dbf44a6ceaf9a2dcb2374ab289efdabb0dfd9300461637ca541da95e0d99b0.jpg", + "image_caption": [ + "Ground Truth" + ], + "image_footnote": [], + "bbox": [ + 316, + 335, + 457, + 409 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/9158edf2d16c61e5c3520586e1b488792e3cffbdd9fdd417458ba156bad48009.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 475, + 335, + 616, + 409 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/289d8be36828979647e6e733900eba80a45585256e711c64af9db544502c13a4.jpg", + "image_caption": [ + "Ours" + ], + "image_footnote": [], + "bbox": [ + 633, + 335, + 772, + 409 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/2bf6900cbb2c147be150f90b4bc4c32a82b1862130611c1fbf3e2a15615a7e57.jpg", + "image_caption": [ + "Figure 5. Qualitative ablation analysis. All the 10 selected class-relevant patches exhibit some class-specific attributes. They are ranked by the predicted counting errors and the final selected patches with the smallest errors are framed in green." + ], + "image_footnote": [], + "bbox": [ + 168, + 481, + 802, + 667 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/c1827ed886ca7a02a6055278c94d46bb4b724acc6a035c02b093ab5e21bfc5d1.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
PrototypePredictorVal SetTest Set
MAERMSENAESREMAERMSENAESRE
--35.20106.700.616.6831.37134.980.525.92
-28.0188.290.394.6625.30113.820.404.88
-27.9888.620.434.5923.80128.360.404.43
26.9388.630.364.2622.09115.170.343.74
", + "bbox": [ + 78, + 719, + 470, + 791 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 2. Ablation study on each component's contribution to the final results. We show the effectiveness of the two steps of our framework: selecting class-relevant patches via a generated class prototype and selecting optimal patches via an error predictor.", + "bbox": [ + 75, + 801, + 470, + 859 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "other different exemplar-based methods: FamNet [34], BMNet and BMNet+ [38]. Figure 6 (a) shows the results on the FSC-147 validation set. The baseline uses three randomly sampled patches as the exemplars for the pre-trained exemplar-based counter. By using the generated class prototype to select class-relevant patches, the error rate is reduced by 5.18, 8.59 and 5.60 on FamNet, BMNet and BMNet+, respectively. In addition, as the error predictor is additionally adopted, the error rate is further reduced by 1.76, 1.00 and 1.08 on FamNet, BMNet and BMNet+, respectively. Similarly, Figure 6 (b) shows the results on the FSC", + "bbox": [ + 496, + 720, + 893, + 888 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "15554", + "bbox": [ + 480, + 944, + 519, + 957 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "147 test set. Our method achieves consistent performance improvements for all three methods.", + "bbox": [ + 76, + 90, + 468, + 121 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/d2b04911282de7979d4bf2835d8b21d7e9590aa789438c8d8521920c00a25814.jpg", + "image_caption": [ + "(a)" + ], + "image_footnote": [], + "bbox": [ + 94, + 142, + 433, + 284 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/9871b996fafadca835a82c199766ca8c4307f6aff46fadae7410b9f81c06a48b.jpg", + "image_caption": [ + "(b)" + ], + "image_footnote": [], + "bbox": [ + 94, + 309, + 433, + 450 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.3. Multi-class Object Counting", + "text_level": 1, + "bbox": [ + 76, + 604, + 331, + 619 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Our method can count instances of a specific class given the class name, which is particularly useful when there are multiple classes in the same image. In this section, we show some visualization results in this multi-class scenario. As seen in Figure 7, our method selects patches according to the given class name and count instances from that specific class in the input image. Correspondingly, the heatmap highlights the image regions that are most relevant to the specified class. Here the heatmaps are obtained by correlating the exemplar feature vector with the image feature map in a pre-trained ImageNet feature space. Note that we mask out the image region where the activation value in the heatmap is below a threshold for counting purpose. We also show the patches selected using another exemplar-free counting method, RepRPN [33]. The class of RepRPN selected patches can not be explicitly specified. It simply selects patches from the class with the highest number of instances in the image according to the repetition score.", + "bbox": [ + 75, + 628, + 470, + 900 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/efee905dc8491de7e8ab72a66f303efe9a7bfd45def19d6566416ca8dafa8c6a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 90, + 867, + 232 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/b2057ff667537b23fb5a12c10c5f78d9a862efe17544dad0c261cde14cdd70eb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 234, + 867, + 375 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/a21535710190b515d0cc1b8add4a36c28d21c85ccb4ca6cf656c6e1b9f73716e.jpg", + "image_caption": [ + "Figure 6. Using our selected patches as exemplars for other exemplar-based class-agnostic counting methods (FamNet, BMNet and BMNet+) on FSC-147 dataset. Blue bars are the MAEs of using three randomly sampled patches. Orange bars are the MAEs of using the class prototype to select class-relevant patches as exemplars. Green bars are the MAEs of using the class prototype and error predictor to select optimal patches as exemplars.", + "Figure 7. Visualization results of our method in some multi-class examples. Our method selects patches according to the given class name and the corresponding heatmap highlights the relevant areas." + ], + "image_footnote": [], + "bbox": [ + 506, + 380, + 867, + 518 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 607, + 617, + 622 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this paper, we proposed a new task, zero-shot object counting, to count instances of a specific class given only the class name without access to any exemplars. To address this, we developed a simple yet effective method that accurately localizes the optimal patches across the query image that can be used as counting exemplars. Specifically, we construct a class prototype in a pre-trained feature space and use the prototype to select patches that contain objects of interest; then we use an error predictor to select those patches with the smallest predicted errors as the final exemplars for counting. Extensive results demonstrate the effectiveness of our method. We also conduct experiments to show that our selected patches can be used for other exemplar-based counting methods to achieve exemplar-free counting.", + "bbox": [ + 496, + 631, + 890, + 843 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgements. This research was partially supported by NSF grants IIS-2123920 and IIS-2212046 and the NASA Biodiversity program (Award 80NSSC21K1027).", + "bbox": [ + 498, + 849, + 890, + 893 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "15555", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 174, + 106 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Shahira Abousamra, Minh Hoai, Dimitris Samaras, and Chao Chen. Localization in the crowd with topological constraints. In AAAI, 2021. 2", + "[2] Martin Arjovsky, Soumith Chintala, and Léon Bottou. Wasserstein gan. In ICML, 2017. 3", + "[3] Carlos Arteta, Victor S. Lempitsky, Julia Alison Noble, and Andrew Zisserman. Interactive object counting. In ECCV, 2014. 2", + "[4] Carlos Arteta, Victor S. Lempitsky, and Andrew Zisserman. Counting in the wild. In ECCV, 2016. 1, 2", + "[5] Yuval Atzmon and Gal Chechik. Adaptive confidence smoothing for generalized zero-shot learning. In CVPR, 2019. 3", + "[6] Ankan Bansal, Karan Sikka, Gaurav Sharma, Rama Chellappa, and Ajay Divakaran. Zero-shot object detection. In ECCV, 2018. 2", + "[7] Antoni B. Chan, Zhang-Sheng John Liang, and Nuno Vasconcelos. Privacy preserving crowd monitoring: Counting people without people models or tracking. In CVPR, 2008. 2", + "[8] Prithvijit Chattopadhyay, Ramakrishna Vedantam, Ramprasaath R. Selvaraju, Dhruv Batra, and Devi Parikh. Counting everyday objects in everyday scenes. CVPR, 2017. 2", + "[9] Long Chen, Hanwang Zhang, Jun Xiao, W. Liu, and Shih-Fu Chang. Zero-shot visual recognition using semantics-preserving adversarial embedding networks. In CVPR, 2018. 3", + "[10] Hisham Cholakkal, Guolei Sun, Fahad Shahbaz Khan, and Ling Shao. Object counting and instance segmentation with image-level supervision. In CVPR, 2019. 2", + "[11] Hisham Cholakkal, Guolei Sun, Salman Hameed Khan, Fahad Shahbaz Khan, Ling Shao, and Luc Van Gool. Towards partial supervision for generic object counting in natural scenes. volume 44, 2022. 2", + "[12] Andrea Frome, Gregory S. Corrado, Jonathon Shlens, Samy Bengio, Jeffrey Dean, Marc'Aurelio Ranzato, and Tomas Mikolov. Devise: A deep visual-semantic embedding model. In NIPS, 2013. 3", + "[13] Shenjian Gong, Shanshan Zhang, Jian Yang, Dengxin Dai, and Bernt Schiele. Class-agnostic object counting robust to intraclass diversity. In ECCV, 2022. 2", + "[14] Meng-Ru Hsieh, Yen-Liang Lin, and Winston H. Hsu. Drone-based object counting by spatially regularized regional proposal network. In ICCV, 2017. 2", + "[15] Haroon Idrees, Muhammad Tayyab, Kishan Athrey, Dong Zhang, Somaya Ali Al-Maadeed, Nasir M. Rajpoot, and Mubarak Shah. Composition loss for counting, density map estimation and localization in dense crowds. In ECCV, 2018. 2", + "[16] Dinesh Jayaraman and Kristen Grauman. Zero-shot recognition with unreliable attributes. In NIPS, 2014. 3", + "[17] Christoph H. Lampert, Hannes Nickisch, and Stefan Harmeling. Learning to detect unseen object classes by between-class attribute transfer. CVPR, 2009. 3" + ], + "bbox": [ + 78, + 116, + 468, + 898 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[18] Issam H. Laradji, Negar Rostamzadeh, Pedro H. O. Pinheiro, David Vázquez, and Mark W. Schmidt. Where are the blobs: Counting by localization with point supervision. In ECCV, 2018. 2", + "[19] Hieu Le, Bento Goncalves, Dimitris Samaras, and Heather Lynch. Weakly labeling the antarctic: The penguin colony case. In CVPR Workshops, June 2019. 3", + "[20] Hieu Le and Dimitris Samaras. Physics-based shadow image decomposition for shadow removal. Los Alamitos, CA, USA. IEEE Computer Society. 3", + "[21] Hieu Le and Dimitris Samaras. From shadow segmentation to shadow removal. In ECCV, 2020. 3", + "[22] Hieu Le, Tomas F. Yago Vicente, Vu Nguyen, Minh Hoai, and Dimitris Samaras. A+D Net: Training a shadow detector with adversarial shadow attenuation. In ECCV, 2018. 3", + "[23] Hieu Le, Chen-Ping Yu, Gregory Zelinsky, and Dimitris Samaras. Co-localization with category-consistent features and geodesic distance propagation. In ICCV Workshop, 2017. 3", + "[24] Dongze Lian, Jing Li, Jia Zheng, Weixin Luo, and Shenghua Gao. Density map regression guided detection network for rgb-d crowd counting and localization. CVPR, 2019. 2", + "[25] Chang Liu, Yujie Zhong, Andrew Zisserman, and Weidi Xie. Countr: Transformer-based generalised visual counting. In BMVC, 2022. 2", + "[26] Weizhe Liu, N. Durasov, and P. Fua. Leveraging self-supervision for cross-domain crowd counting. In CVPR, 2022. 2", + "[27] Weizhe Liu, Mathieu Salzmann, and Pascal V. Fua. Context-aware crowd counting. In CVPR, 2019. 2", + "[28] Erika Lu, Weidi Xie, and Andrew Zisserman. Class-agnostic counting. In ACCV, 2018. 1, 2, 5, 6", + "[29] Terrell N. Mundhenk, Goran Konjevod, Wesam A. Sakla, and Kofi Boakye. A large contextual dataset for classification, detection and counting of cars with deep learning. In ECCV, 2016. 1, 2", + "[30] Sanath Narayan, Akshita Gupta, Fahad Shahbaz Khan, Cees G. M. Snoek, and Ling Shao. Latent embedding feedback and discriminative features for zero-shot classification. In ECCV, 2020. 3", + "[31] Thanh Nguyen, Chau Pham, Khoi Nguyen, and Minh Hoai. Few-shot object counting and detection. In ECCV, 2022. 2, 5", + "[32] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In ICML, 2021. 4, 5", + "[33] Viresh Ranjan and Minh Hoai. Exemplar free class agnostic counting. In ACCV, 2022. 1, 2, 3, 5, 6, 8", + "[34] Viresh Ranjan, Udbhav Sharma, Thua Nguyen, and Minh Hoai. Learning to count everything. In CVPR, 2021. 1, 2, 3, 5, 6, 7", + "[35] Mahdi Rezaei and Mahsa Shahidi. Zero-shot learning and its applications from autonomous vehicles to Covid-19 diagnosis: A review. In Intelligence-Based Medicine, volume 3, 2020. 3" + ], + "bbox": [ + 501, + 92, + 890, + 898 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "15556", + "bbox": [ + 480, + 945, + 519, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[36] Bernardino Romero-Paredes and Philip H. S. Torr. An embarrassingly simple approach to zero-shot learning. In ICML, 2015. 3", + "[37] Deepak Babu Sam, Abhinav Agarwalla, Jimmy Joseph, Vishwanath A. Sindagi, R. Venkatesh Babu, and Vishal M. Patel. Completely self-supervised crowd counting via distribution matching. In ECCV, 2022. 1, 2", + "[38] Min Shi, Hao Lu, Chen Feng, Chengxin Liu, and Zhiguo Cao. Represent, compare, and learn: A similarity-aware framework for class-agnostic counting. In CVPR, 2022. 1, 2, 3, 6, 7", + "[39] Vishwanath A. Sindagi, Rajeev Yasarla, and Vishal M. Patel. Pushing the frontiers of unconstrained crowd counting: New dataset and benchmark method. In ICCV, 2019. 2", + "[40] Jia Wan, Ziquan Liu, and Antoni B. Chan. A generalized loss function for crowd counting and localization. In CVPR, 2021. 2", + "[41] Boyu Wang, Huidong Liu, Dimitris Samaras, and Minh Hoai Nguyen. Distribution matching for crowd counting. In NeurIPS, 2020. 2", + "[42] Qi Wang, Junyu Gao, Wei Lin, and Xuelong Li. Nwpu-crowd: A large-scale benchmark for crowd counting and localization. IEEE Transactions on Pattern Analysis and Machine Intelligence, 43, 2021. 2", + "[43] Yongqin Xian, Zeynep Akata, Gaurav Sharma, Quynh N. Nguyen, Matthias Hein, and Bernt Schiele. Latent embeddings for zero-shot classification. In CVPR, 2016. 3", + "[44] Yongqin Xian, Christoph H. Lampert, Bernt Schiele, and Zeynep Akata. Zero-shot learning—a comprehensive evaluation of the good, the bad and the ugly. IEEE Transactions on Pattern Analysis and Machine Intelligence, 41, 2019. 3, 4", + "[45] Yongqin Xian, Saurabh Sharma, Bernt Schiele, and Zeynep Akata. F-vaegan-d2: A feature generating framework for any-shot learning. In CVPR, 2019. 3, 4", + "[46] Weidi Xie, J. Alison Noble, and Andrew Zisserman. Microscopy cell counting and detection with fully convolutional regression networks. Computer Methods in Biomechanics and Biomedical Engineering: Imaging & Visualization, 6, 2018. 1, 2", + "[47] Haipeng Xiong and Angela Yao. Discrete-constrained regression for local counting models. In ECCV, 2022. 2", + "[48] Jingyi Xu and Hieu Le. Generating representative samples for few-shot classification. In CVPR, 2022. 3", + "[49] Jingyi Xu, Hieu Le, Mingzhen Huang, ShahRukh Athar, and Dimitris Samaras. Variational feature disentangling for fine-grained few-shot classification. In ICCV, 2021. 3", + "[50] Shuo Yang, Hung-Ting Su, Winston H. Hsu, and Wen-Chin Chen. Class-agnostic few-shot object counting. In WACV, 2021. 2", + "[51] Zhiyuan You, Kai Yang, Wenhan Luo, Xin Lu, Lei Cui, and Xinyi Le. Few-shot object counting with similarity-aware feature enhancement. In WACV, 2023. 2", + "[52] Anran Zhang, Lei Yue, Jiayi Shen, Fan Zhu, Xiantong Zhen, Xianbin Cao, and Ling Shao. Attentional neural fields for crowd counting. In ICCV, 2019. 2" + ], + "bbox": [ + 78, + 90, + 468, + 898 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[53] Cong Zhang, Hongsheng Li, Xiaogang Wang, and Xiaokang Yang. Cross-scene crowd counting via deep convolutional neural networks. In CVPR, 2015. 2", + "[54] Li Zhang, Tao Xiang, and Shaogang Gong. Learning a deep embedding model for zero-shot learning. In CVPR, 2017. 3", + "[55] Qi Zhang and Antoni Chan. Calibration-free multi-view crowd counting. In ECCV, 2022. 2", + "[56] Yingying Zhang, Desen Zhou, Siqin Chen, Shenghua Gao, and Yi Ma. Single-image crowd counting via multi-column convolutional neural network. In CVPR, 2016. 2", + "[57] Ye Zheng, Jiahong Wu, Yongqiang Qin, Faen Zhang, and Li Cui. Zero-shot instance segmentation. In CVPR, 2021. 2" + ], + "bbox": [ + 501, + 92, + 890, + 263 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "15557", + "bbox": [ + 480, + 945, + 517, + 955 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2023/Zero-Shot Object Counting/a8ad52a0-ee27-4576-981a-5efd9d6920f0_model.json b/2023/Zero-Shot Object Counting/a8ad52a0-ee27-4576-981a-5efd9d6920f0_model.json new file mode 100644 index 0000000000000000000000000000000000000000..5e5a3029f0a826c69558aa2c35ce3027883be5fd --- /dev/null +++ b/2023/Zero-Shot Object Counting/a8ad52a0-ee27-4576-981a-5efd9d6920f0_model.json @@ -0,0 +1,2332 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.808, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.345, + 0.131, + 0.628, + 0.154 + ], + "angle": 0, + "content": "Zero-Shot Object Counting" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.193, + 0.782, + 0.214 + ], + "angle": 0, + "content": "Jingyi \\(\\mathrm{Xu}^{1}\\), Hieu \\(\\mathrm{Le}^{2}\\), Vu Nguyen\\(^{1}\\), Viresh Ranjan\\(^{*3}\\), and Dimitris Samaras\\(^{1}\\)" + }, + { + "type": "text", + "bbox": [ + 0.308, + 0.223, + 0.662, + 0.243 + ], + "angle": 0, + "content": "1Stony Brook University 2EPFL 3Amazon" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.287, + 0.314, + 0.303 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.32, + 0.474, + 0.653 + ], + "angle": 0, + "content": "Class-agnostic object counting aims to count object instances of an arbitrary class at test time. Current methods for this challenging problem require human-annotated exemplars as inputs, which are often unavailable for novel categories, especially for autonomous systems. Thus, we propose zero-shot object counting (ZSC), a new setting where only the class name is available during test time. Such a counting system does not require human annotators in the loop and can operate automatically. Starting from a class name, we propose a method that can accurately identify the optimal patches which can then be used as counting exemplars. Specifically, we first construct a class prototype to select the patches that are likely to contain the objects of interest, namely class-relevant patches. Furthermore, we introduce a model that can quantitatively measure how suitable an arbitrary patch is as a counting exemplar. By applying this model to all the candidate patches, we can select the most suitable patches as exemplars for counting. Experimental results on a recent class-agnostic counting dataset, FSC-147, validate the effectiveness of our method. Code is available at https://github.com/cvlab-stonybrook/zero-shot-counting." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.683, + 0.21, + 0.699 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.709, + 0.47, + 0.876 + ], + "angle": 0, + "content": "Object counting aims to infer the number of objects in an image. Most of the existing methods focus on counting objects from specialized categories such as human crowds [37], cars [29], animals [4], and cells [46]. These methods count only a single category at a time. Recently, class-agnostic counting [28, 34, 38] has been proposed to count objects of arbitrary categories. Several human-annotated bounding boxes of objects are required to specify the objects of interest (see Figure 1a). However, having humans in the loop is not practical for many real-world applications, such as fully automated wildlife monitoring systems or vi" + }, + { + "type": "image", + "bbox": [ + 0.56, + 0.286, + 0.686, + 0.435 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.531, + 0.438, + 0.681, + 0.453 + ], + "angle": 0, + "content": "(a) Few-shot Counting" + }, + { + "type": "image", + "bbox": [ + 0.706, + 0.287, + 0.832, + 0.435 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.707, + 0.438, + 0.865, + 0.453 + ], + "angle": 0, + "content": "(b) Zero-Shot Counting" + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.458, + 0.895, + 0.542 + ], + "angle": 0, + "content": "Figure 1. Our proposed task of zero-shot object counting (ZSC). Traditional few-shot counting methods require a few exemplars of the object category (a). We propose zero-shot counting where the counter only needs the class name to count the number of object instances. (b). Few-shot counting methods require human annotators at test time while zero-shot counters can be fully automatic." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.553, + 0.717, + 0.568 + ], + "angle": 0, + "content": "sual anomaly detection systems." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.568, + 0.892, + 0.734 + ], + "angle": 0, + "content": "A more practical setting, exemplar-free class-agnostic counting, has been proposed recently by Ranjan et al. [33]. They introduce RepRPN, which first identifies the objects that occur most frequently in the image, and then uses them as exemplars for object counting. Even though RepRPN does not require any annotated boxes at test time, the method simply counts objects from the class with the highest number of instances. Thus, it can not be used for counting a specific class of interest. The method is only suitable for counting images with a single dominant object class, which limits the potential applicability." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.735, + 0.893, + 0.901 + ], + "angle": 0, + "content": "Our goal is to build an exemplar-free object counter where we can specify what to count. To this end, we introduce a new counting task in which the user only needs to provide the name of the class for counting rather than the exemplars (see Figure 1b). In this way, the counting model can not only operate in an automatic manner but also allow the user to define what to count by simply providing the class name. Note that the class to count during test time can be arbitrary. For cases where the test class is completely unseen to the trained model, the counter needs to adapt to the unseen class without any annotated data. Hence, we" + }, + { + "type": "page_footnote", + "bbox": [ + 0.096, + 0.888, + 0.294, + 0.901 + ], + "angle": 0, + "content": "*Work done prior to joining Amazon" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "15548" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.122 + ], + "angle": 0, + "content": "name this setting zero-shot object counting (ZSC), inspired by previous zero-shot learning approaches [6, 57]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.124, + 0.468, + 0.289 + ], + "angle": 0, + "content": "To count without any annotated exemplars, our idea is to identify a few patches in the input image containing the target object that can be used as counting exemplars. Here the challenges are twofold: 1) how to localize patches that contain the object of interest based on the provided class name, and 2) how to select good exemplars for counting. Ideally, good object exemplars are visually representative for most instances in the image, which can benefit the object counter. In addition, we want to avoid selecting patches that contain irrelevant objects or backgrounds, which likely lead to incorrect object counts." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.292, + 0.468, + 0.398 + ], + "angle": 0, + "content": "To this end, we propose a two-step method that first localizes the class-relevant patches which contain the objects of interest based on the given class name, and then selects among these patches the optimal exemplars for counting. We use these selected exemplars, together with a pre-trained exemplar-based counting model, to achieve exemplar-free object counting." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.399, + 0.468, + 0.55 + ], + "angle": 0, + "content": "In particular, to localize the patches containing the objects of interest, we first construct a class prototype in a pretrained embedding space based on the given class name. To construct the class prototype, we train a conditional variational autoencoder (VAE) to generate features for an arbitrary class conditioned on its semantic embedding. The class prototype is computed by taking the average of the generated features. We then select the patches whose embeddings are the \\(k\\)-nearest neighbors of the class prototype as the class-relevant patches." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.552, + 0.468, + 0.839 + ], + "angle": 0, + "content": "After obtaining the class-relevant patches, we further select among them the optimal patches to be used as counting exemplars. Here we observe that the feature maps obtained using good exemplars and bad exemplars often exhibit distinguishable differences. An example of the feature maps obtained with different exemplars is shown in Figure 2. The feature map from a good exemplar typically exhibits some repetitive patterns (e.g., the dots on the feature map) that center around the object areas while the patterns from a bad exemplar are more irregular and occur randomly across the image. Based on this observation, we train a model to measure the goodness of an input patch based on its corresponding feature maps. Specifically, given an arbitrary patch and a pre-trained exemplar-based object counter, we train this model to predict the counting error of the counter when using the patch as the exemplar. Here the counting error can indicate the goodness of the exemplar. After this error predictor is trained, we use it to select those patches with the smallest predicted errors as the final exemplars for counting." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.84, + 0.468, + 0.901 + ], + "angle": 0, + "content": "Experiments on the FSC-147 dataset show that our method outperforms the previous exemplar-free counting method [33] by a large margin. We also provide analyses to show that patches selected by our method can be" + }, + { + "type": "image", + "bbox": [ + 0.515, + 0.093, + 0.885, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.24, + 0.892, + 0.297 + ], + "angle": 0, + "content": "Figure 2. Feature maps obtained using different exemplars given a pre-trained exemplar-based counting model. The feature maps obtained using good exemplars typically exhibit some repetitive patterns while the patterns from bad exemplars are more irregular." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.309, + 0.892, + 0.354 + ], + "angle": 0, + "content": "used in other exemplar-based counting methods to achieve exemplar-free counting. In short, our main contributions can be summarized as follows:" + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.366, + 0.892, + 0.426 + ], + "angle": 0, + "content": "- We introduce the task of zero-shot object counting that counts the number of instances of a specific class in the input image, given only the class name and without relying on any human-annotated exemplars." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.429, + 0.892, + 0.488 + ], + "angle": 0, + "content": "- We propose a simple yet effective patch selection method that can accurately localize the optimal patches across the query image as exemplars for zero-shot object counting." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.49, + 0.892, + 0.534 + ], + "angle": 0, + "content": "- We verify the effectiveness of our method on the FSC-147 dataset, through extensive ablation studies and visualization results." + }, + { + "type": "list", + "bbox": [ + 0.518, + 0.366, + 0.892, + 0.534 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.561, + 0.64, + 0.577 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.587, + 0.771, + 0.603 + ], + "angle": 0, + "content": "2.1. Class-specific Object Counting" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.61, + 0.892, + 0.822 + ], + "angle": 0, + "content": "Class-specific object counting focuses on counting predefined categories, such as humans [1, 15, 24, 26, 37, 39, 40, 42, 47, 52, 53, 55, 56], animals [4], cells [46], or cars [14, 29]. Generally, existing methods can be categorized into two groups: detection-based methods [8, 14, 18] and regression-based methods [7, 10, 11, 27, 41, 53, 56]. Detection-based methods apply an object detector on the image and count the number of objects based on the detected boxes. Regression-based methods predict a density map for each input image, and the final result is obtained by summing up the pixel values. Both types of methods require abundant training data to learn a good model. Class-specific counters can perform well on trained categories. However, they can not be used to count objects of arbitrary categories at test time." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.832, + 0.779, + 0.849 + ], + "angle": 0, + "content": "2.2. Class-agnostic Object Counting" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.856, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Class-agnostic object counting aims to count arbitrary categories given only a few exemplars [3, 13, 25, 28, 31, 34, 38, 50, 51]. GMN [28] uses a shared embedding module to" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "15549" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.109, + 0.092, + 0.849, + 0.316 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.322, + 0.896, + 0.42 + ], + "angle": 0, + "content": "Figure 3. Overview of the proposed method. We first use a generative model to obtain a class prototype for the given class (e.g. grape) in a pre-trained feature space. Then given an input query image, we randomly sample a number of patches of various sizes and extract the corresponding feature embedding for each patch. We select the patches whose embeddings are the nearest neighbors of the class prototype as class-relevant patches. Then for each of the selected class-relevant patches, we use a pre-trained exemplar-based counting model to obtain the intermediate feature maps. Our proposed error predictor then takes the feature maps as input and predicts the counting error (here we use normalized counting errors). We select the patches with the smallest predicted errors as the final exemplar patches and use them for counting." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.438, + 0.473, + 0.62 + ], + "angle": 0, + "content": "extract feature maps for both query images and exemplars, which are then concatenated and fed into a matching module to regress the object count. FamNet [34] adopts a similar way to do correlation matching and further applies test-time adaptation. These methods require human-annotated exemplars as inputs. Recently, Ranjan et al. have proposed RepRPN [33], which achieves exemplar-free counting by identifying exemplars from the most frequent objects via a Region Proposal Network (RPN)-based model. However, the class of interest can not be explicitly specified for the RepRPN. In comparison, our proposed method can count instances of a specific class given only the class name." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.634, + 0.35, + 0.65 + ], + "angle": 0, + "content": "2.3. Zero-shot Image Classification" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.66, + 0.473, + 0.901 + ], + "angle": 0, + "content": "Zero-shot classification aims to classify unseen categories for which data is not available during training [5, 9, 12, 16, 19, 21, 23, 35, 36]. Semantic descriptors are mostly leveraged as a bridge to enable the knowledge transfer between seen and unseen classes. Earlier zero-shot learning (ZSL) works relate the semantic descriptors with visual features in an embedding space and recognize unseen samples by searching their nearest class-level semantic descriptor in this embedding space [17, 36, 43, 54]. Recently, generative models [20, 22, 48, 49] have been widely employed to synthesize unseen class data to facilitate ZSL [30, 44, 45]. Xian et al. [44] use a conditional Wasserstein Generative Adversarial Network (GAN) [2] to generate unseen features which can then be used to train a discriminative classifier for ZSL. In our method, we also train a generative model conditioned on class-specific semantic embedding. Instead" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.437, + 0.895, + 0.484 + ], + "angle": 0, + "content": "of using this generative model to hallucinate data, we use it to compute a prototype for each class. This class prototype is then used to select patches that contain objects of interest." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.496, + 0.593, + 0.512 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.521, + 0.895, + 0.749 + ], + "angle": 0, + "content": "Figure 3 summarizes our proposed method. Given an input query image and a class label, we first use a generative model to construct a class prototype for the given class in a pre-trained feature space. We then randomly sample a number of patches of various sizes and extract the feature embedding for each patch. The class-relevant patches are those patches whose embeddings are the nearest neighbors of the class prototype in the embedding space. We further use an error predictor to select the patches with the smallest predicted errors as the final exemplars for counting. We use the selected exemplars in an exemplar-based object counter to infer the object counts. For the rest of the paper, we denote this exemplar-based counter as the \"base counting model\". We will first describe how we train this base counting model and then present the details of our patch selection method." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.757, + 0.776, + 0.773 + ], + "angle": 0, + "content": "3.1. Training Base Counting Model" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.78, + 0.895, + 0.903 + ], + "angle": 0, + "content": "We train our base counting model using abundant training images with annotations. Similar to previous works [34, 38], the base counting model uses the input image and the exemplars to obtain a density map for object counting. The model consists of a feature extractor \\( F \\) and a counter \\( C \\). Given a query image \\( I \\) and an exemplar \\( B \\) of an arbitrary class \\( c \\), we input \\( I \\) and \\( B \\) to the feature extractor to obtain the corresponding output, denoted as \\( F(I) \\) and \\( F(B) \\) re" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.958 + ], + "angle": 0, + "content": "15550" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.47, + 0.151 + ], + "angle": 0, + "content": "spectively. \\( F(I) \\) is a feature map of size \\( d * h_{I} * w_{I} \\) and \\( F(B) \\) is a feature map of size \\( d * h_{B} * w_{B} \\). We further perform global average pooling on \\( F(B) \\) to form a feature vector \\( b \\) of \\( d \\) dimensions." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.152, + 0.47, + 0.213 + ], + "angle": 0, + "content": "After feature extraction, we obtain the similarity map \\(S\\) by correlating the exemplar feature vector \\(b\\) with the image feature map \\(F(I)\\). Specifically, if \\(w_{ij} = F_{ij}(I)\\) is the channel feature at spatial position \\((i,j)\\), \\(S\\) can be computed by:" + }, + { + "type": "equation", + "bbox": [ + 0.211, + 0.226, + 0.469, + 0.245 + ], + "angle": 0, + "content": "\\[\nS _ {i j} (I, B) = w _ {i j} ^ {T} b. \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.249, + 0.469, + 0.294 + ], + "angle": 0, + "content": "In the case where \\( n \\) exemplars are given, we use Eq. 1 to calculate \\( n \\) similarity maps, and the final similarity map is the average of these \\( n \\) similarity maps." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.295, + 0.469, + 0.355 + ], + "angle": 0, + "content": "We then concatenate the image feature map \\( F(I) \\) with the similarity map \\( S \\), and input them into the counter \\( C \\) to predict a density map \\( D \\). The final predicted count \\( N \\) is obtained by summing over the predicted density map \\( D \\):" + }, + { + "type": "equation", + "bbox": [ + 0.217, + 0.364, + 0.469, + 0.396 + ], + "angle": 0, + "content": "\\[\nN = \\sum_ {i, j} D _ {(i, j)}, \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.399, + 0.469, + 0.459 + ], + "angle": 0, + "content": "where \\(D_{(i,j)}\\) denotes the density value for pixel \\((i,j)\\). The supervision signal for training the counting model is the \\(L_{2}\\) loss between the predicted density map and the ground truth density map:" + }, + { + "type": "equation", + "bbox": [ + 0.168, + 0.473, + 0.469, + 0.49 + ], + "angle": 0, + "content": "\\[\nL _ {\\text {c o u n t}} = \\left\\| D (I, B) - D ^ {*} (I) \\right\\| _ {2} ^ {2}, \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.495, + 0.397, + 0.511 + ], + "angle": 0, + "content": "where \\(D^{*}\\) denotes the ground truth density map." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.519, + 0.321, + 0.536 + ], + "angle": 0, + "content": "3.2. Zero-shot Object Counting" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.542, + 0.469, + 0.649 + ], + "angle": 0, + "content": "In this section, we describe how we count objects of any unseen category given only the class name without access to any exemplar. Our strategy is to select a few patches in the image that can be used as exemplars for the base counting model. These patches are selected such that: 1) they contain the objects that we are counting and 2) they benefit the counting model, i.e., lead to small counting errors." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.666, + 0.355, + 0.681 + ], + "angle": 0, + "content": "3.2.1 Selecting Class-relevant Patches" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.689, + 0.469, + 0.765 + ], + "angle": 0, + "content": "To select patches that contain the objects of interest, we first generate a class prototype based on the given class name using a conditional VAE model. Then we randomly sample a number of patches across the query image and select the class-relevant patches based on the generated prototype." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.766, + 0.469, + 0.901 + ], + "angle": 0, + "content": "Class prototype generation. Inspired by previous zero-shot learning approaches [44, 45], we train a conditional VAE model to generate features for an arbitrary class based on the semantic embedding of the class. The semantic embedding is obtained from a pre-trained text-vision model [32] given the corresponding class name. Specifically, we train the VAE model to reconstruct features in a pre-trained ImageNet feature space. The VAE is composed of an Encoder \\( E \\), which maps a visual feature \\( x \\) to a latent code \\( z \\)," + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.092, + 0.892, + 0.15 + ], + "angle": 0, + "content": "and a decoder \\( G \\) which reconstructs \\( x \\) from \\( z \\). Both \\( E \\) and \\( G \\) are conditioned on the semantic embedding \\( a \\). The loss function for training this VAE for an input feature \\( x \\) can be defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.586, + 0.161, + 0.892, + 0.198 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} L _ {V} (x) = \\operatorname {K L} (q (z | x, a) | | p (z | a)) \\tag {4} \\\\ - \\mathrm {E} _ {q (z | x, a)} [ \\log p (x | z, a) ]. \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.207, + 0.892, + 0.296 + ], + "angle": 0, + "content": "The first term is the Kullback-Leibler divergence between the VAE posterior \\( q(z|x,a) \\) and a prior distribution \\( p(z|a) \\). The second term is the decoder's reconstruction error. \\( q(z|x,a) \\) is modeled as \\( E(x,a) \\) and \\( p(x|z,a) \\) is equal to \\( G(z,a) \\). The prior distribution is assumed to be \\( \\mathcal{N}(0,I) \\) for all classes." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.298, + 0.892, + 0.373 + ], + "angle": 0, + "content": "We can use the trained VAE to generate the class prototype for an arbitrary target class for counting. Specifically, given the target class name \\( y \\), we first generate a set of features by inputting the respective semantic vector \\( a^y \\) and a noise vector \\( z \\) to the decoder \\( G \\):" + }, + { + "type": "equation", + "bbox": [ + 0.57, + 0.386, + 0.892, + 0.403 + ], + "angle": 0, + "content": "\\[\n\\mathbb {G} ^ {y} = \\{\\hat {x} | \\hat {x} = G (z, y), z \\sim \\mathcal {N} (0, I) \\}. \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.414, + 0.892, + 0.444 + ], + "angle": 0, + "content": "The class prototype \\(\\mathfrak{p}^y\\) is computed by taking the mean of all the features generated by VAE:" + }, + { + "type": "equation", + "bbox": [ + 0.624, + 0.455, + 0.892, + 0.487 + ], + "angle": 0, + "content": "\\[\n\\mathrm {p} ^ {y} = \\frac {1}{| \\mathbb {G} ^ {y} |} \\sum_ {\\hat {x} \\in \\mathbb {G} ^ {y}} \\hat {x} \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.496, + 0.892, + 0.739 + ], + "angle": 0, + "content": "Class-relevant patch selection. The generated class prototype can be considered as a class center representing the distribution of features of the corresponding class in the embedding space. Using the class prototype, we can select the class-relevant patches across the query image. Specifically, we first randomly sample \\( M \\) patches of various sizes \\( \\{b_1, b_2, \\dots, b_m\\} \\) across the query image and extract their corresponding ImageNet features \\( \\{f_1, f_2, \\dots, f_m\\} \\). To select the class-relevant patches, we calculate the \\( L_2 \\) distance between the class prototype and the patch embedding, namely \\( d_i = \\| f_i - \\mathrm{p}^y\\|_2 \\). Then we select the patches whose embeddings are the \\( k \\)-nearest neighbors of the class prototype as the class-relevant patches. Since the ImageNet feature space is highly discriminative, i.e., features close to each other typically belong to the same class, the selected patches are likely to contain the objects of the target class." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.757, + 0.791, + 0.772 + ], + "angle": 0, + "content": "3.2.2 Selecting Exemplars for Counting" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.78, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Given a set of class-relevant patches and a pre-trained exemplar-based object counter, we aim to select a few exemplars from these patches that are optimal for counting. To do so, we introduce an error prediction network that predicts the counting error of an arbitrary patch when the patch is used as the exemplar. The counting error is calculated from the pre-trained counting model. Specifically, to train this error predictor, given a query image \\(\\bar{I}\\) and an arbitrary patch" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "15551" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.091, + 0.47, + 0.152 + ], + "angle": 0, + "content": "\\(\\bar{B}\\) cropped from \\(\\bar{I}\\), we first use the base counting model to get the image feature map \\(F(\\bar{I})\\), similarity map \\(\\bar{S}\\), and the final predicted density map \\(\\bar{D}\\). The counting error of the base counting model can be written as:" + }, + { + "type": "equation", + "bbox": [ + 0.193, + 0.164, + 0.47, + 0.197 + ], + "angle": 0, + "content": "\\[\n\\epsilon = \\left| \\sum_ {i, j} \\bar {D} _ {(i, j)} - \\bar {N} ^ {*} \\right|, \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.202, + 0.469, + 0.263 + ], + "angle": 0, + "content": "where \\(\\bar{N}^*\\) denotes the ground truth object count in image \\(\\bar{I}\\). \\(\\epsilon\\) can be used to measure the goodness of \\(\\bar{B}\\) as an exemplar for \\(\\bar{I}\\), i.e., a small \\(\\epsilon\\) indicates that \\(\\bar{B}\\) is a suitable exemplar for counting and vice versa." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.264, + 0.469, + 0.369 + ], + "angle": 0, + "content": "The error predictor \\( R \\) is trained to regress the counting error produced by the base counting model. The input of \\( R \\) is the channel-wise concatenation of the image feature map \\( F(\\bar{I}) \\) and the similarity map \\( \\tilde{S} \\). The training objective is the minimization of the mean squared error between the output of the predictor \\( R(F(\\bar{I}),\\bar{S}) \\) and the actual counting error produced by the base counting model \\( \\epsilon \\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.37, + 0.469, + 0.491 + ], + "angle": 0, + "content": "After the error predictor is trained, we can use it to select the optimal patches for counting. The candidates for selection here are the class-relevant patches selected by the class prototype in the previous step. For each candidate patch, we use the trained error predictor to infer the counting error when it is being used as the exemplar. The final selected patches for counting are the patches that yield the top-\\(s\\) smallest counting errors." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.511, + 0.414, + 0.527 + ], + "angle": 0, + "content": "3.2.3 Using the Selected Patches as Exemplars" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.535, + 0.469, + 0.657 + ], + "angle": 0, + "content": "Using the error predictor, we predict the error for each candidate patch and select the patches that lead to the smallest counting errors. The selected patches can then be used as exemplars for the base counting model to get the density map and the final count. We also conduct experiments to show that these selected patches can serve as exemplars for other exemplar-based counting models to achieve exemplar-free class-agnostic counting." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.671, + 0.21, + 0.688 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.696, + 0.295, + 0.712 + ], + "angle": 0, + "content": "4.1. Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.72, + 0.469, + 0.901 + ], + "angle": 0, + "content": "Network architecture For the base counting model, we use ResNet-50 as the backbone of the feature extractor, initialized with the weights of a pre-trained ImageNet model. The backbone outputs feature maps of 1024 channels. For each query image, the number of channels is reduced to 256 using an \\(1 \\times 1\\) convolution. For each exemplar, the feature maps are first processed with global average pooling and then linearly mapped to obtain a 256-d feature vector. The counter consists of 5 convolutional and bilinear upsampling layers to regress a density map of the same size as the query image. For the feature generation model, both the encoder and the decoder are two-layer fully-connected" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.198 + ], + "angle": 0, + "content": "(FC) networks with 4096 hidden units. LeakyReLU and ReLU are the non-linear activation functions in the hidden and output layers, respectively. The dimensions of the latent space and the semantic embeddings are both set to be 512. For the error predictor, 5 convolutional and bilinear upsampling layers are followed by a linear layer to output the counting error." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.198, + 0.892, + 0.333 + ], + "angle": 0, + "content": "Dataset We use the FSC-147 dataset [34] to train the base counting model and the error predictor. FSC-147 is the first large-scale dataset for class-agnostic counting. It includes 6135 images from 147 categories varying from animals, kitchen utensils, to vehicles. The categories in the training, validation, and test sets do not overlap. The feature generator is trained on the MS-COCO detection dataset. Note that the previous exemplar-free method [33] also uses MS-COCO to pre-train their counter." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.334, + 0.892, + 0.576 + ], + "angle": 0, + "content": "Training details Both the base counting model and the error predictor are trained using the AdamW optimizer with a fixed learning rate of \\(10^{-5}\\). The base counting model is trained for 300 epochs with a batch size of 8. We resize the input query image to a fixed height of 384, and the width is adjusted accordingly to preserve the aspect ratio of the original image. Exemplars are resized to \\(128 \\times 128\\) before being input into the feature extractor. The feature generation model is trained using the Adam optimizer and the learning rate is set to be \\(10^{-4}\\). The semantic embeddings are extracted from CLIP [32]. To select the class-relevant patches, we randomly sample 450 boxes of various sizes across the input query image and select 10 patches whose embeddings are the 10-nearest neighbors of the class prototype. The final selected patches are those that yield the top-3 smallest counting errors predicted by the error predictor." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.585, + 0.685, + 0.6 + ], + "angle": 0, + "content": "4.2. Evaluation Metrics" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.608, + 0.892, + 0.763 + ], + "angle": 0, + "content": "We use Mean Average Error (MAE) and Root Mean Squared Error (RMSE) to measure the performance of different object counters. Besides, we follow [31] to report the Normalized Relative Error (NAE) and Squared Relative Error (SRE). In particular, MAE = \\(\\frac{1}{n}\\sum_{i=1}^{n}|y_i - \\hat{y}_i|\\); RMSE = \\(\\sqrt{\\frac{1}{n}\\sum_{i=1}^{n}(y_i - \\hat{y}_i)^2}\\); NAE = \\(\\frac{1}{n}\\sum_{i=1}^{n}\\frac{|y_i - \\hat{y}_i|}{y_i}\\); SRE = \\(\\sqrt{\\frac{1}{n}\\sum_{i=1}^{n}\\frac{(y_i - \\hat{y}_i)^2}{y_i}}\\) where \\(n\\) is the number of test images, and \\(y_i\\) and \\(\\hat{y}_i\\) are the ground truth and the predicted number of objects for image \\(i\\) respectively." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.772, + 0.696, + 0.789 + ], + "angle": 0, + "content": "4.3. Comparing Methods" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.796, + 0.892, + 0.901 + ], + "angle": 0, + "content": "We compare our method with the previous works on class-agnostic counting. RepRPN-Counter [33] is the only previous class-agnostic counting method that does not require human-annotated exemplars as input. In order to make other exemplar based class-agnostic methods including GMN (General Matching Network [28]), FamNet (Few-shot adaptation and matching Network [34]) and BMNet" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "15552" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.192, + 0.089, + 0.782, + 0.296 + ], + "angle": 0, + "content": "
MethodExemplarsVal SetTest Set
MAERMSENAESREMAERMSENAESRE
GMN [28]GT29.6689.81--26.52124.57--
RPN40.96108.47--39.72142.81--
FamNet+ [34]GT23.7569.070.524.2522.0899.540.446.45
RPN42.85121.590.756.9442.70146.080.747.14
BMNet [38]GT19.0667.950.264.3916.71103.310.263.32
RPN37.26108.540.425.4337.22143.130.415.31
BMNet+ [38]GT15.7458.530.276.5714.6291.830.252.74
RPN35.15106.070.415.2834.52132.640.395.26
RepRPN-Counter [33]-30.4098.73--27.45129.69--
Ours (Base)GT18.5561.120.303.1820.68109.140.367.63
RPN32.1999.210.384.8029.25130.650.354.35
Patch-Selection26.9388.630.364.2622.09115.170.343.74
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.3, + 0.895, + 0.342 + ], + "angle": 0, + "content": "Table 1. Quantitative comparisons on the FSC-147 dataset. \"GT\" denotes using human-annotated boxes as exemplars. \"RPN\" denotes using the top-3 RPN proposals with the highest objectness scores as exemplars. \"Patch-Selection\" denotes using our selected patches as exemplars." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.358, + 0.473, + 0.479 + ], + "angle": 0, + "content": "(Bilinear Matching Network [38]) work in the exemplar-free setup, we replace the human-provided exemplars with the exemplars generated by a pre-trained object detector. Specifically, we use the RPN of Faster RCNN pre-trained on MS-COCO dataset and select the top-3 proposals with the highest objectness score as the exemplars. We also include the performance of these methods using human-annotated exemplars for a complete comparison." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.495, + 0.172, + 0.51 + ], + "angle": 0, + "content": "4.4. Results" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.521, + 0.47, + 0.763 + ], + "angle": 0, + "content": "Quantitative results. As shown in Table 1, our proposed method outperforms the previous exemplar-free counting method [33] by a large margin, resulting in a reduction of 10.10 w.r.t. the validation RMSE and 14.52 w.r.t. the test RMSE. We also notice that the performance of all exemplar-based counting methods drops significantly when replacing human-annotated exemplars with RPN generated proposals. The state-of-the-art exemplar-based method BMNet+ [38], for example, shows an 19.90 error increase w.r.t. the test MAE and a 40.81 increase w.r.t. the test RMSE. In comparison, the performance gap is much smaller when using our selected patches as exemplars, as reflected by a 1.41 increase w.r.t. the test MAE and a 6.03 increase w.r.t. the test RMSE. Noticeably, the NAE and the SRE on the test set are even reduced when using our selected patches compared with the human-annotated exemplars." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.765, + 0.471, + 0.903 + ], + "angle": 0, + "content": "Qualitative analysis. In Figure 4, we present a few input images, the image patches selected by our method, and the corresponding density maps. Our method effectively identifies the patches that are suitable for object counting. The density maps produced by our selected patches are meaningful and close to the density maps produced by human-annotated patches. The counting model with random image patches as exemplars, in comparison, fails to output meaningful density maps and infers incorrect object counts." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.356, + 0.6, + 0.373 + ], + "angle": 0, + "content": "5. Analyses" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.382, + 0.665, + 0.398 + ], + "angle": 0, + "content": "5.1. Ablation Studies" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.408, + 0.892, + 0.633 + ], + "angle": 0, + "content": "Our proposed patch selection method consists of two steps: the selection of class-relevant patches via a generated class prototype and the selection of the optimal patches via an error predictor. We analyze the contribution of each step quantitatively and qualitatively. Quantitative results are in Table 2. We first evaluate the performance of our baseline, i.e. using 3 randomly sampled patches as exemplars without any selection step. As shown in Table 2, using the class prototype to select class-relevant patches reduces the error rate by 7.19 and 6.07 on the validation and test set of MAE, respectively. Applying the error predictor can improve the baseline performance by 7.22 on the validation MAE and 7.57 on the test MAE. Finally, applying the two components together further boosts performance, achieving 26.93 on the validation MAE and 22.09 on the test MAE." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.636, + 0.892, + 0.803 + ], + "angle": 0, + "content": "We provide further qualitative analysis by visualizing the selected patches. As shown in Figure 5, for each input query image, we show 10 class-relevant patches selected using our generated prototype, ranked by their predicted counting error (from low to high). All the 10 selected class-relevant patches exhibit some class specific features. However, not all these patches are suitable to be used as counting exemplars, i.e., some patches only contain parts of the object, and some patches contain some background. By further applying our proposed error predictor, we can identify the most suitable patches with the smallest predicted counting errors." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.816, + 0.871, + 0.832 + ], + "angle": 0, + "content": "5.2. Generalization to Exemplar-based Methods" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.841, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Our proposed method can be considered as a general patch selection method that is applicable to other visual counters to achieve exemplar-free counting. To verify that, we use our selected patches as the exemplars for three" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "15553" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.179, + 0.092, + 0.286, + 0.168 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.316, + 0.092, + 0.459, + 0.167 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.476, + 0.092, + 0.617, + 0.168 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.634, + 0.092, + 0.775, + 0.169 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.174, + 0.286, + 0.255 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.316, + 0.174, + 0.459, + 0.254 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.476, + 0.174, + 0.617, + 0.253 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.634, + 0.174, + 0.774, + 0.253 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.263, + 0.286, + 0.326 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.317, + 0.259, + 0.459, + 0.333 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.476, + 0.259, + 0.617, + 0.333 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.634, + 0.259, + 0.774, + 0.333 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.338, + 0.284, + 0.411 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.317, + 0.337, + 0.459, + 0.41 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.476, + 0.337, + 0.617, + 0.41 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.634, + 0.337, + 0.774, + 0.41 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.358, + 0.414, + 0.441, + 0.426 + ], + "angle": 0, + "content": "Ground Truth" + }, + { + "type": "image_caption", + "bbox": [ + 0.69, + 0.414, + 0.722, + 0.426 + ], + "angle": 0, + "content": "Ours" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.43, + 0.895, + 0.473 + ], + "angle": 0, + "content": "Figure 4. Qualitative results on the FSC-147 dataset. We show the counting exemplars and the corresponding density maps of ground truth boxes, randomly selected patches, and our selected patches respectively. Predicted counting results are shown at the top-right corner. Our method accurately identifies suitable patches for counting and the predicted density maps are close to the ground truth density maps." + }, + { + "type": "image", + "bbox": [ + 0.169, + 0.482, + 0.803, + 0.668 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.674, + 0.893, + 0.703 + ], + "angle": 0, + "content": "Figure 5. Qualitative ablation analysis. All the 10 selected class-relevant patches exhibit some class-specific attributes. They are ranked by the predicted counting errors and the final selected patches with the smallest errors are framed in green." + }, + { + "type": "table", + "bbox": [ + 0.08, + 0.72, + 0.472, + 0.792 + ], + "angle": 0, + "content": "
PrototypePredictorVal SetTest Set
MAERMSENAESREMAERMSENAESRE
--35.20106.700.616.6831.37134.980.525.92
-28.0188.290.394.6625.30113.820.404.88
-27.9888.620.434.5923.80128.360.404.43
26.9388.630.364.2622.09115.170.343.74
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.803, + 0.471, + 0.86 + ], + "angle": 0, + "content": "Table 2. Ablation study on each component's contribution to the final results. We show the effectiveness of the two steps of our framework: selecting class-relevant patches via a generated class prototype and selecting optimal patches via an error predictor." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.721, + 0.895, + 0.889 + ], + "angle": 0, + "content": "other different exemplar-based methods: FamNet [34], BMNet and BMNet+ [38]. Figure 6 (a) shows the results on the FSC-147 validation set. The baseline uses three randomly sampled patches as the exemplars for the pre-trained exemplar-based counter. By using the generated class prototype to select class-relevant patches, the error rate is reduced by 5.18, 8.59 and 5.60 on FamNet, BMNet and BMNet+, respectively. In addition, as the error predictor is additionally adopted, the error rate is further reduced by 1.76, 1.00 and 1.08 on FamNet, BMNet and BMNet+, respectively. Similarly, Figure 6 (b) shows the results on the FSC" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.958 + ], + "angle": 0, + "content": "15554" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.078, + 0.092, + 0.47, + 0.122 + ], + "angle": 0, + "content": "147 test set. Our method achieves consistent performance improvements for all three methods." + }, + { + "type": "image", + "bbox": [ + 0.096, + 0.143, + 0.434, + 0.285 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.267, + 0.293, + 0.284, + 0.304 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.096, + 0.31, + 0.434, + 0.452 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.266, + 0.459, + 0.284, + 0.47 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.483, + 0.47, + 0.581 + ], + "angle": 0, + "content": "Figure 6. Using our selected patches as exemplars for other exemplar-based class-agnostic counting methods (FamNet, BMNet and BMNet+) on FSC-147 dataset. Blue bars are the MAEs of using three randomly sampled patches. Orange bars are the MAEs of using the class prototype to select class-relevant patches as exemplars. Green bars are the MAEs of using the class prototype and error predictor to select optimal patches as exemplars." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.605, + 0.332, + 0.621 + ], + "angle": 0, + "content": "5.3. Multi-class Object Counting" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.629, + 0.471, + 0.901 + ], + "angle": 0, + "content": "Our method can count instances of a specific class given the class name, which is particularly useful when there are multiple classes in the same image. In this section, we show some visualization results in this multi-class scenario. As seen in Figure 7, our method selects patches according to the given class name and count instances from that specific class in the input image. Correspondingly, the heatmap highlights the image regions that are most relevant to the specified class. Here the heatmaps are obtained by correlating the exemplar feature vector with the image feature map in a pre-trained ImageNet feature space. Note that we mask out the image region where the activation value in the heatmap is below a threshold for counting purpose. We also show the patches selected using another exemplar-free counting method, RepRPN [33]. The class of RepRPN selected patches can not be explicitly specified. It simply selects patches from the class with the highest number of instances in the image according to the repetition score." + }, + { + "type": "image", + "bbox": [ + 0.507, + 0.092, + 0.869, + 0.233 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.236, + 0.869, + 0.375 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.381, + 0.869, + 0.52 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.54, + 0.892, + 0.584 + ], + "angle": 0, + "content": "Figure 7. Visualization results of our method in some multi-class examples. Our method selects patches according to the given class name and the corresponding heatmap highlights the relevant areas." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.608, + 0.619, + 0.623 + ], + "angle": 0, + "content": "6. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.632, + 0.892, + 0.844 + ], + "angle": 0, + "content": "In this paper, we proposed a new task, zero-shot object counting, to count instances of a specific class given only the class name without access to any exemplars. To address this, we developed a simple yet effective method that accurately localizes the optimal patches across the query image that can be used as counting exemplars. Specifically, we construct a class prototype in a pre-trained feature space and use the prototype to select patches that contain objects of interest; then we use an error predictor to select those patches with the smallest predicted errors as the final exemplars for counting. Extensive results demonstrate the effectiveness of our method. We also conduct experiments to show that our selected patches can be used for other exemplar-based counting methods to achieve exemplar-free counting." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.85, + 0.892, + 0.895 + ], + "angle": 0, + "content": "Acknowledgements. This research was partially supported by NSF grants IIS-2123920 and IIS-2212046 and the NASA Biodiversity program (Award 80NSSC21K1027)." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "15555" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.175, + 0.107 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.117, + 0.47, + 0.158 + ], + "angle": 0, + "content": "[1] Shahira Abousamra, Minh Hoai, Dimitris Samaras, and Chao Chen. Localization in the crowd with topological constraints. In AAAI, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.16, + 0.468, + 0.188 + ], + "angle": 0, + "content": "[2] Martin Arjovsky, Soumith Chintala, and Léon Bottou. Wasserstein gan. In ICML, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.19, + 0.468, + 0.231 + ], + "angle": 0, + "content": "[3] Carlos Arteta, Victor S. Lempitsky, Julia Alison Noble, and Andrew Zisserman. Interactive object counting. In ECCV, 2014. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.234, + 0.468, + 0.261 + ], + "angle": 0, + "content": "[4] Carlos Arteta, Victor S. Lempitsky, and Andrew Zisserman. Counting in the wild. In ECCV, 2016. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.264, + 0.468, + 0.304 + ], + "angle": 0, + "content": "[5] Yuval Atzmon and Gal Chechik. Adaptive confidence smoothing for generalized zero-shot learning. In CVPR, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.308, + 0.468, + 0.348 + ], + "angle": 0, + "content": "[6] Ankan Bansal, Karan Sikka, Gaurav Sharma, Rama Chellappa, and Ajay Divakaran. Zero-shot object detection. In ECCV, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.352, + 0.468, + 0.405 + ], + "angle": 0, + "content": "[7] Antoni B. Chan, Zhang-Sheng John Liang, and Nuno Vasconcelos. Privacy preserving crowd monitoring: Counting people without people models or tracking. In CVPR, 2008. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.409, + 0.468, + 0.451 + ], + "angle": 0, + "content": "[8] Prithvijit Chattopadhyay, Ramakrishna Vedantam, Ramprasaath R. Selvaraju, Dhruv Batra, and Devi Parikh. Counting everyday objects in everyday scenes. CVPR, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.453, + 0.468, + 0.507 + ], + "angle": 0, + "content": "[9] Long Chen, Hanwang Zhang, Jun Xiao, W. Liu, and Shih-Fu Chang. Zero-shot visual recognition using semantics-preserving adversarial embedding networks. In CVPR, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.511, + 0.468, + 0.552 + ], + "angle": 0, + "content": "[10] Hisham Cholakkal, Guolei Sun, Fahad Shahbaz Khan, and Ling Shao. Object counting and instance segmentation with image-level supervision. In CVPR, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.555, + 0.468, + 0.609 + ], + "angle": 0, + "content": "[11] Hisham Cholakkal, Guolei Sun, Salman Hameed Khan, Fahad Shahbaz Khan, Ling Shao, and Luc Van Gool. Towards partial supervision for generic object counting in natural scenes. volume 44, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.612, + 0.468, + 0.667 + ], + "angle": 0, + "content": "[12] Andrea Frome, Gregory S. Corrado, Jonathon Shlens, Samy Bengio, Jeffrey Dean, Marc'Aurelio Ranzato, and Tomas Mikolov. Devise: A deep visual-semantic embedding model. In NIPS, 2013. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.67, + 0.468, + 0.711 + ], + "angle": 0, + "content": "[13] Shenjian Gong, Shanshan Zhang, Jian Yang, Dengxin Dai, and Bernt Schiele. Class-agnostic object counting robust to intraclass diversity. In ECCV, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.714, + 0.468, + 0.755 + ], + "angle": 0, + "content": "[14] Meng-Ru Hsieh, Yen-Liang Lin, and Winston H. Hsu. Drone-based object counting by spatially regularized regional proposal network. In ICCV, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.758, + 0.468, + 0.826 + ], + "angle": 0, + "content": "[15] Haroon Idrees, Muhammad Tayyab, Kishan Athrey, Dong Zhang, Somaya Ali Al-Maadeed, Nasir M. Rajpoot, and Mubarak Shah. Composition loss for counting, density map estimation and localization in dense crowds. In ECCV, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.829, + 0.468, + 0.856 + ], + "angle": 0, + "content": "[16] Dinesh Jayaraman and Kristen Grauman. Zero-shot recognition with unreliable attributes. In NIPS, 2014. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.859, + 0.468, + 0.9 + ], + "angle": 0, + "content": "[17] Christoph H. Lampert, Hannes Nickisch, and Stefan Harmeling. Learning to detect unseen object classes by between-class attribute transfer. CVPR, 2009. 3" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.117, + 0.47, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.147 + ], + "angle": 0, + "content": "[18] Issam H. Laradji, Negar Rostamzadeh, Pedro H. O. Pinheiro, David Vázquez, and Mark W. Schmidt. Where are the blobs: Counting by localization with point supervision. In ECCV, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.149, + 0.892, + 0.189 + ], + "angle": 0, + "content": "[19] Hieu Le, Bento Goncalves, Dimitris Samaras, and Heather Lynch. Weakly labeling the antarctic: The penguin colony case. In CVPR Workshops, June 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.19, + 0.892, + 0.232 + ], + "angle": 0, + "content": "[20] Hieu Le and Dimitris Samaras. Physics-based shadow image decomposition for shadow removal. Los Alamitos, CA, USA. IEEE Computer Society. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.233, + 0.892, + 0.259 + ], + "angle": 0, + "content": "[21] Hieu Le and Dimitris Samaras. From shadow segmentation to shadow removal. In ECCV, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.261, + 0.892, + 0.3 + ], + "angle": 0, + "content": "[22] Hieu Le, Tomas F. Yago Vicente, Vu Nguyen, Minh Hoai, and Dimitris Samaras. A+D Net: Training a shadow detector with adversarial shadow attenuation. In ECCV, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.302, + 0.892, + 0.355 + ], + "angle": 0, + "content": "[23] Hieu Le, Chen-Ping Yu, Gregory Zelinsky, and Dimitris Samaras. Co-localization with category-consistent features and geodesic distance propagation. In ICCV Workshop, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.357, + 0.892, + 0.399 + ], + "angle": 0, + "content": "[24] Dongze Lian, Jing Li, Jia Zheng, Weixin Luo, and Shenghua Gao. Density map regression guided detection network for rgb-d crowd counting and localization. CVPR, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.4, + 0.892, + 0.44 + ], + "angle": 0, + "content": "[25] Chang Liu, Yujie Zhong, Andrew Zisserman, and Weidi Xie. Countr: Transformer-based generalised visual counting. In BMVC, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.442, + 0.892, + 0.481 + ], + "angle": 0, + "content": "[26] Weizhe Liu, N. Durasov, and P. Fua. Leveraging self-supervision for cross-domain crowd counting. In CVPR, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.483, + 0.892, + 0.51 + ], + "angle": 0, + "content": "[27] Weizhe Liu, Mathieu Salzmann, and Pascal V. Fua. Context-aware crowd counting. In CVPR, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.511, + 0.892, + 0.539 + ], + "angle": 0, + "content": "[28] Erika Lu, Weidi Xie, and Andrew Zisserman. Class-agnostic counting. In ACCV, 2018. 1, 2, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.54, + 0.892, + 0.594 + ], + "angle": 0, + "content": "[29] Terrell N. Mundhenk, Goran Konjevod, Wesam A. Sakla, and Kofi Boakye. A large contextual dataset for classification, detection and counting of cars with deep learning. In ECCV, 2016. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.596, + 0.892, + 0.649 + ], + "angle": 0, + "content": "[30] Sanath Narayan, Akshita Gupta, Fahad Shahbaz Khan, Cees G. M. Snoek, and Ling Shao. Latent embedding feedback and discriminative features for zero-shot classification. In ECCV, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.651, + 0.892, + 0.691 + ], + "angle": 0, + "content": "[31] Thanh Nguyen, Chau Pham, Khoi Nguyen, and Minh Hoai. Few-shot object counting and detection. In ECCV, 2022. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.692, + 0.892, + 0.774 + ], + "angle": 0, + "content": "[32] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In ICML, 2021. 4, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.776, + 0.892, + 0.802 + ], + "angle": 0, + "content": "[33] Viresh Ranjan and Minh Hoai. Exemplar free class agnostic counting. In ACCV, 2022. 1, 2, 3, 5, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.804, + 0.892, + 0.844 + ], + "angle": 0, + "content": "[34] Viresh Ranjan, Udbhav Sharma, Thua Nguyen, and Minh Hoai. Learning to count everything. In CVPR, 2021. 1, 2, 3, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.846, + 0.892, + 0.899 + ], + "angle": 0, + "content": "[35] Mahdi Rezaei and Mahsa Shahidi. Zero-shot learning and its applications from autonomous vehicles to Covid-19 diagnosis: A review. In Intelligence-Based Medicine, volume 3, 2020. 3" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.946, + 0.52, + 0.957 + ], + "angle": 0, + "content": "15556" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.469, + 0.133 + ], + "angle": 0, + "content": "[36] Bernardino Romero-Paredes and Philip H. S. Torr. An embarrassingly simple approach to zero-shot learning. In ICML, 2015. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.136, + 0.469, + 0.192 + ], + "angle": 0, + "content": "[37] Deepak Babu Sam, Abhinav Agarwalla, Jimmy Joseph, Vishwanath A. Sindagi, R. Venkatesh Babu, and Vishal M. Patel. Completely self-supervised crowd counting via distribution matching. In ECCV, 2022. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.194, + 0.469, + 0.248 + ], + "angle": 0, + "content": "[38] Min Shi, Hao Lu, Chen Feng, Chengxin Liu, and Zhiguo Cao. Represent, compare, and learn: A similarity-aware framework for class-agnostic counting. In CVPR, 2022. 1, 2, 3, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.251, + 0.468, + 0.292 + ], + "angle": 0, + "content": "[39] Vishwanath A. Sindagi, Rajeev Yasarla, and Vishal M. Patel. Pushing the frontiers of unconstrained crowd counting: New dataset and benchmark method. In ICCV, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.295, + 0.468, + 0.335 + ], + "angle": 0, + "content": "[40] Jia Wan, Ziquan Liu, and Antoni B. Chan. A generalized loss function for crowd counting and localization. In CVPR, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.338, + 0.468, + 0.379 + ], + "angle": 0, + "content": "[41] Boyu Wang, Huidong Liu, Dimitris Samaras, and Minh Hoai Nguyen. Distribution matching for crowd counting. In NeurIPS, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.382, + 0.468, + 0.436 + ], + "angle": 0, + "content": "[42] Qi Wang, Junyu Gao, Wei Lin, and Xuelong Li. Nwpu-crowd: A large-scale benchmark for crowd counting and localization. IEEE Transactions on Pattern Analysis and Machine Intelligence, 43, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.439, + 0.468, + 0.481 + ], + "angle": 0, + "content": "[43] Yongqin Xian, Zeynep Akata, Gaurav Sharma, Quynh N. Nguyen, Matthias Hein, and Bernt Schiele. Latent embeddings for zero-shot classification. In CVPR, 2016. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.483, + 0.468, + 0.55 + ], + "angle": 0, + "content": "[44] Yongqin Xian, Christoph H. Lampert, Bernt Schiele, and Zeynep Akata. Zero-shot learning—a comprehensive evaluation of the good, the bad and the ugly. IEEE Transactions on Pattern Analysis and Machine Intelligence, 41, 2019. 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.554, + 0.468, + 0.595 + ], + "angle": 0, + "content": "[45] Yongqin Xian, Saurabh Sharma, Bernt Schiele, and Zeynep Akata. F-vaegan-d2: A feature generating framework for any-shot learning. In CVPR, 2019. 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.598, + 0.468, + 0.666 + ], + "angle": 0, + "content": "[46] Weidi Xie, J. Alison Noble, and Andrew Zisserman. Microscopy cell counting and detection with fully convolutional regression networks. Computer Methods in Biomechanics and Biomedical Engineering: Imaging & Visualization, 6, 2018. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.669, + 0.468, + 0.697 + ], + "angle": 0, + "content": "[47] Haipeng Xiong and Angela Yao. Discrete-constrained regression for local counting models. In ECCV, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.699, + 0.468, + 0.725 + ], + "angle": 0, + "content": "[48] Jingyi Xu and Hieu Le. Generating representative samples for few-shot classification. In CVPR, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.728, + 0.468, + 0.77 + ], + "angle": 0, + "content": "[49] Jingyi Xu, Hieu Le, Mingzhen Huang, ShahRukh Athar, and Dimitris Samaras. Variational feature disentangling for fine-grained few-shot classification. In ICCV, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.772, + 0.468, + 0.812 + ], + "angle": 0, + "content": "[50] Shuo Yang, Hung-Ting Su, Winston H. Hsu, and Wen-Chin Chen. Class-agnostic few-shot object counting. In WACV, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.816, + 0.468, + 0.856 + ], + "angle": 0, + "content": "[51] Zhiyuan You, Kai Yang, Wenhan Luo, Xin Lu, Lei Cui, and Xinyi Le. Few-shot object counting with similarity-aware feature enhancement. In WACV, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.859, + 0.468, + 0.9 + ], + "angle": 0, + "content": "[52] Anran Zhang, Lei Yue, Jiayi Shen, Fan Zhu, Xiantong Zhen, Xianbin Cao, and Ling Shao. Attentional neural fields for crowd counting. In ICCV, 2019. 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.469, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.134 + ], + "angle": 0, + "content": "[53] Cong Zhang, Hongsheng Li, Xiaogang Wang, and Xiaokang Yang. Cross-scene crowd counting via deep convolutional neural networks. In CVPR, 2015. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.136, + 0.892, + 0.163 + ], + "angle": 0, + "content": "[54] Li Zhang, Tao Xiang, and Shaogang Gong. Learning a deep embedding model for zero-shot learning. In CVPR, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.165, + 0.892, + 0.192 + ], + "angle": 0, + "content": "[55] Qi Zhang and Antoni Chan. Calibration-free multi-view crowd counting. In ECCV, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.194, + 0.892, + 0.234 + ], + "angle": 0, + "content": "[56] Yingying Zhang, Desen Zhou, Siqin Chen, Shenghua Gao, and Yi Ma. Single-image crowd counting via multi-column convolutional neural network. In CVPR, 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.236, + 0.892, + 0.264 + ], + "angle": 0, + "content": "[57] Ye Zheng, Jiahong Wu, Yongqiang Qin, Faen Zhang, and Li Cui. Zero-shot instance segmentation. In CVPR, 2021. 2" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.264 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.946, + 0.519, + 0.957 + ], + "angle": 0, + "content": "15557" + } + ] +] \ No newline at end of file diff --git a/2023/Zero-Shot Object Counting/a8ad52a0-ee27-4576-981a-5efd9d6920f0_origin.pdf b/2023/Zero-Shot Object Counting/a8ad52a0-ee27-4576-981a-5efd9d6920f0_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..e598b5f3713d73f5d88741f4e0de60e9eae491a0 --- /dev/null +++ b/2023/Zero-Shot Object Counting/a8ad52a0-ee27-4576-981a-5efd9d6920f0_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b4466ce9f89b2e75aee34b841fec50415b02114fe1a9a936f48ec076344e5ddf +size 3309044 diff --git a/2023/Zero-Shot Object Counting/full.md b/2023/Zero-Shot Object Counting/full.md new file mode 100644 index 0000000000000000000000000000000000000000..8607f6a28d650cbde5064273a2610afdacbe215a --- /dev/null +++ b/2023/Zero-Shot Object Counting/full.md @@ -0,0 +1,329 @@ +# Zero-Shot Object Counting + +Jingyi $\mathrm{Xu}^{1}$ , Hieu $\mathrm{Le}^{2}$ , Vu Nguyen $^{1}$ , Viresh Ranjan $^{*3}$ , and Dimitris Samaras $^{1}$ + +1Stony Brook University 2EPFL 3Amazon + +# Abstract + +Class-agnostic object counting aims to count object instances of an arbitrary class at test time. Current methods for this challenging problem require human-annotated exemplars as inputs, which are often unavailable for novel categories, especially for autonomous systems. Thus, we propose zero-shot object counting (ZSC), a new setting where only the class name is available during test time. Such a counting system does not require human annotators in the loop and can operate automatically. Starting from a class name, we propose a method that can accurately identify the optimal patches which can then be used as counting exemplars. Specifically, we first construct a class prototype to select the patches that are likely to contain the objects of interest, namely class-relevant patches. Furthermore, we introduce a model that can quantitatively measure how suitable an arbitrary patch is as a counting exemplar. By applying this model to all the candidate patches, we can select the most suitable patches as exemplars for counting. Experimental results on a recent class-agnostic counting dataset, FSC-147, validate the effectiveness of our method. Code is available at https://github.com/cvlab-stonybrook/zero-shot-counting. + +# 1. Introduction + +Object counting aims to infer the number of objects in an image. Most of the existing methods focus on counting objects from specialized categories such as human crowds [37], cars [29], animals [4], and cells [46]. These methods count only a single category at a time. Recently, class-agnostic counting [28, 34, 38] has been proposed to count objects of arbitrary categories. Several human-annotated bounding boxes of objects are required to specify the objects of interest (see Figure 1a). However, having humans in the loop is not practical for many real-world applications, such as fully automated wildlife monitoring systems or vi + +![](images/0c00134abb2666d7bd32bc16dc84060d555ab990b2ba9dc3c7810e1eea5ab5a2.jpg) +(a) Few-shot Counting +Figure 1. Our proposed task of zero-shot object counting (ZSC). Traditional few-shot counting methods require a few exemplars of the object category (a). We propose zero-shot counting where the counter only needs the class name to count the number of object instances. (b). Few-shot counting methods require human annotators at test time while zero-shot counters can be fully automatic. + +![](images/6615debf754375cd3058cd6458bfb7a60f5640e9fee3eca317637b25be9849b5.jpg) +(b) Zero-Shot Counting + +sual anomaly detection systems. + +A more practical setting, exemplar-free class-agnostic counting, has been proposed recently by Ranjan et al. [33]. They introduce RepRPN, which first identifies the objects that occur most frequently in the image, and then uses them as exemplars for object counting. Even though RepRPN does not require any annotated boxes at test time, the method simply counts objects from the class with the highest number of instances. Thus, it can not be used for counting a specific class of interest. The method is only suitable for counting images with a single dominant object class, which limits the potential applicability. + +Our goal is to build an exemplar-free object counter where we can specify what to count. To this end, we introduce a new counting task in which the user only needs to provide the name of the class for counting rather than the exemplars (see Figure 1b). In this way, the counting model can not only operate in an automatic manner but also allow the user to define what to count by simply providing the class name. Note that the class to count during test time can be arbitrary. For cases where the test class is completely unseen to the trained model, the counter needs to adapt to the unseen class without any annotated data. Hence, we + +name this setting zero-shot object counting (ZSC), inspired by previous zero-shot learning approaches [6, 57]. + +To count without any annotated exemplars, our idea is to identify a few patches in the input image containing the target object that can be used as counting exemplars. Here the challenges are twofold: 1) how to localize patches that contain the object of interest based on the provided class name, and 2) how to select good exemplars for counting. Ideally, good object exemplars are visually representative for most instances in the image, which can benefit the object counter. In addition, we want to avoid selecting patches that contain irrelevant objects or backgrounds, which likely lead to incorrect object counts. + +To this end, we propose a two-step method that first localizes the class-relevant patches which contain the objects of interest based on the given class name, and then selects among these patches the optimal exemplars for counting. We use these selected exemplars, together with a pre-trained exemplar-based counting model, to achieve exemplar-free object counting. + +In particular, to localize the patches containing the objects of interest, we first construct a class prototype in a pretrained embedding space based on the given class name. To construct the class prototype, we train a conditional variational autoencoder (VAE) to generate features for an arbitrary class conditioned on its semantic embedding. The class prototype is computed by taking the average of the generated features. We then select the patches whose embeddings are the $k$ -nearest neighbors of the class prototype as the class-relevant patches. + +After obtaining the class-relevant patches, we further select among them the optimal patches to be used as counting exemplars. Here we observe that the feature maps obtained using good exemplars and bad exemplars often exhibit distinguishable differences. An example of the feature maps obtained with different exemplars is shown in Figure 2. The feature map from a good exemplar typically exhibits some repetitive patterns (e.g., the dots on the feature map) that center around the object areas while the patterns from a bad exemplar are more irregular and occur randomly across the image. Based on this observation, we train a model to measure the goodness of an input patch based on its corresponding feature maps. Specifically, given an arbitrary patch and a pre-trained exemplar-based object counter, we train this model to predict the counting error of the counter when using the patch as the exemplar. Here the counting error can indicate the goodness of the exemplar. After this error predictor is trained, we use it to select those patches with the smallest predicted errors as the final exemplars for counting. + +Experiments on the FSC-147 dataset show that our method outperforms the previous exemplar-free counting method [33] by a large margin. We also provide analyses to show that patches selected by our method can be + +![](images/4b39c3d1f8fd9279d469780fddec6dddadcbe6a39a2a530bde83b39226eee0e0.jpg) +Figure 2. Feature maps obtained using different exemplars given a pre-trained exemplar-based counting model. The feature maps obtained using good exemplars typically exhibit some repetitive patterns while the patterns from bad exemplars are more irregular. + +used in other exemplar-based counting methods to achieve exemplar-free counting. In short, our main contributions can be summarized as follows: + +- We introduce the task of zero-shot object counting that counts the number of instances of a specific class in the input image, given only the class name and without relying on any human-annotated exemplars. +- We propose a simple yet effective patch selection method that can accurately localize the optimal patches across the query image as exemplars for zero-shot object counting. +- We verify the effectiveness of our method on the FSC-147 dataset, through extensive ablation studies and visualization results. + +# 2. Related Work + +# 2.1. Class-specific Object Counting + +Class-specific object counting focuses on counting predefined categories, such as humans [1, 15, 24, 26, 37, 39, 40, 42, 47, 52, 53, 55, 56], animals [4], cells [46], or cars [14, 29]. Generally, existing methods can be categorized into two groups: detection-based methods [8, 14, 18] and regression-based methods [7, 10, 11, 27, 41, 53, 56]. Detection-based methods apply an object detector on the image and count the number of objects based on the detected boxes. Regression-based methods predict a density map for each input image, and the final result is obtained by summing up the pixel values. Both types of methods require abundant training data to learn a good model. Class-specific counters can perform well on trained categories. However, they can not be used to count objects of arbitrary categories at test time. + +# 2.2. Class-agnostic Object Counting + +Class-agnostic object counting aims to count arbitrary categories given only a few exemplars [3, 13, 25, 28, 31, 34, 38, 50, 51]. GMN [28] uses a shared embedding module to + +![](images/6ebfd08a8f53d7e8186002eb73ea2f3d646578d8303afba3c237121bc88582b5.jpg) +Figure 3. Overview of the proposed method. We first use a generative model to obtain a class prototype for the given class (e.g. grape) in a pre-trained feature space. Then given an input query image, we randomly sample a number of patches of various sizes and extract the corresponding feature embedding for each patch. We select the patches whose embeddings are the nearest neighbors of the class prototype as class-relevant patches. Then for each of the selected class-relevant patches, we use a pre-trained exemplar-based counting model to obtain the intermediate feature maps. Our proposed error predictor then takes the feature maps as input and predicts the counting error (here we use normalized counting errors). We select the patches with the smallest predicted errors as the final exemplar patches and use them for counting. + +extract feature maps for both query images and exemplars, which are then concatenated and fed into a matching module to regress the object count. FamNet [34] adopts a similar way to do correlation matching and further applies test-time adaptation. These methods require human-annotated exemplars as inputs. Recently, Ranjan et al. have proposed RepRPN [33], which achieves exemplar-free counting by identifying exemplars from the most frequent objects via a Region Proposal Network (RPN)-based model. However, the class of interest can not be explicitly specified for the RepRPN. In comparison, our proposed method can count instances of a specific class given only the class name. + +# 2.3. Zero-shot Image Classification + +Zero-shot classification aims to classify unseen categories for which data is not available during training [5, 9, 12, 16, 19, 21, 23, 35, 36]. Semantic descriptors are mostly leveraged as a bridge to enable the knowledge transfer between seen and unseen classes. Earlier zero-shot learning (ZSL) works relate the semantic descriptors with visual features in an embedding space and recognize unseen samples by searching their nearest class-level semantic descriptor in this embedding space [17, 36, 43, 54]. Recently, generative models [20, 22, 48, 49] have been widely employed to synthesize unseen class data to facilitate ZSL [30, 44, 45]. Xian et al. [44] use a conditional Wasserstein Generative Adversarial Network (GAN) [2] to generate unseen features which can then be used to train a discriminative classifier for ZSL. In our method, we also train a generative model conditioned on class-specific semantic embedding. Instead + +of using this generative model to hallucinate data, we use it to compute a prototype for each class. This class prototype is then used to select patches that contain objects of interest. + +# 3. Method + +Figure 3 summarizes our proposed method. Given an input query image and a class label, we first use a generative model to construct a class prototype for the given class in a pre-trained feature space. We then randomly sample a number of patches of various sizes and extract the feature embedding for each patch. The class-relevant patches are those patches whose embeddings are the nearest neighbors of the class prototype in the embedding space. We further use an error predictor to select the patches with the smallest predicted errors as the final exemplars for counting. We use the selected exemplars in an exemplar-based object counter to infer the object counts. For the rest of the paper, we denote this exemplar-based counter as the "base counting model". We will first describe how we train this base counting model and then present the details of our patch selection method. + +# 3.1. Training Base Counting Model + +We train our base counting model using abundant training images with annotations. Similar to previous works [34, 38], the base counting model uses the input image and the exemplars to obtain a density map for object counting. The model consists of a feature extractor $F$ and a counter $C$ . Given a query image $I$ and an exemplar $B$ of an arbitrary class $c$ , we input $I$ and $B$ to the feature extractor to obtain the corresponding output, denoted as $F(I)$ and $F(B)$ re + +spectively. $F(I)$ is a feature map of size $d * h_{I} * w_{I}$ and $F(B)$ is a feature map of size $d * h_{B} * w_{B}$ . We further perform global average pooling on $F(B)$ to form a feature vector $b$ of $d$ dimensions. + +After feature extraction, we obtain the similarity map $S$ by correlating the exemplar feature vector $b$ with the image feature map $F(I)$ . Specifically, if $w_{ij} = F_{ij}(I)$ is the channel feature at spatial position $(i,j)$ , $S$ can be computed by: + +$$ +S _ {i j} (I, B) = w _ {i j} ^ {T} b. \tag {1} +$$ + +In the case where $n$ exemplars are given, we use Eq. 1 to calculate $n$ similarity maps, and the final similarity map is the average of these $n$ similarity maps. + +We then concatenate the image feature map $F(I)$ with the similarity map $S$ , and input them into the counter $C$ to predict a density map $D$ . The final predicted count $N$ is obtained by summing over the predicted density map $D$ : + +$$ +N = \sum_ {i, j} D _ {(i, j)}, \tag {2} +$$ + +where $D_{(i,j)}$ denotes the density value for pixel $(i,j)$ . The supervision signal for training the counting model is the $L_{2}$ loss between the predicted density map and the ground truth density map: + +$$ +L _ {\text {c o u n t}} = \left\| D (I, B) - D ^ {*} (I) \right\| _ {2} ^ {2}, \tag {3} +$$ + +where $D^{*}$ denotes the ground truth density map. + +# 3.2. Zero-shot Object Counting + +In this section, we describe how we count objects of any unseen category given only the class name without access to any exemplar. Our strategy is to select a few patches in the image that can be used as exemplars for the base counting model. These patches are selected such that: 1) they contain the objects that we are counting and 2) they benefit the counting model, i.e., lead to small counting errors. + +# 3.2.1 Selecting Class-relevant Patches + +To select patches that contain the objects of interest, we first generate a class prototype based on the given class name using a conditional VAE model. Then we randomly sample a number of patches across the query image and select the class-relevant patches based on the generated prototype. + +Class prototype generation. Inspired by previous zero-shot learning approaches [44, 45], we train a conditional VAE model to generate features for an arbitrary class based on the semantic embedding of the class. The semantic embedding is obtained from a pre-trained text-vision model [32] given the corresponding class name. Specifically, we train the VAE model to reconstruct features in a pre-trained ImageNet feature space. The VAE is composed of an Encoder $E$ , which maps a visual feature $x$ to a latent code $z$ , + +and a decoder $G$ which reconstructs $x$ from $z$ . Both $E$ and $G$ are conditioned on the semantic embedding $a$ . The loss function for training this VAE for an input feature $x$ can be defined as: + +$$ +\begin{array}{l} L _ {V} (x) = \operatorname {K L} (q (z | x, a) | | p (z | a)) \tag {4} \\ - \mathrm {E} _ {q (z | x, a)} [ \log p (x | z, a) ]. \\ \end{array} +$$ + +The first term is the Kullback-Leibler divergence between the VAE posterior $q(z|x,a)$ and a prior distribution $p(z|a)$ . The second term is the decoder's reconstruction error. $q(z|x,a)$ is modeled as $E(x,a)$ and $p(x|z,a)$ is equal to $G(z,a)$ . The prior distribution is assumed to be $\mathcal{N}(0,I)$ for all classes. + +We can use the trained VAE to generate the class prototype for an arbitrary target class for counting. Specifically, given the target class name $y$ , we first generate a set of features by inputting the respective semantic vector $a^y$ and a noise vector $z$ to the decoder $G$ : + +$$ +\mathbb {G} ^ {y} = \{\hat {x} | \hat {x} = G (z, y), z \sim \mathcal {N} (0, I) \}. \tag {5} +$$ + +The class prototype $\mathfrak{p}^y$ is computed by taking the mean of all the features generated by VAE: + +$$ +\mathrm {p} ^ {y} = \frac {1}{| \mathbb {G} ^ {y} |} \sum_ {\hat {x} \in \mathbb {G} ^ {y}} \hat {x} \tag {6} +$$ + +Class-relevant patch selection. The generated class prototype can be considered as a class center representing the distribution of features of the corresponding class in the embedding space. Using the class prototype, we can select the class-relevant patches across the query image. Specifically, we first randomly sample $M$ patches of various sizes $\{b_1, b_2, \dots, b_m\}$ across the query image and extract their corresponding ImageNet features $\{f_1, f_2, \dots, f_m\}$ . To select the class-relevant patches, we calculate the $L_2$ distance between the class prototype and the patch embedding, namely $d_i = \| f_i - \mathrm{p}^y\|_2$ . Then we select the patches whose embeddings are the $k$ -nearest neighbors of the class prototype as the class-relevant patches. Since the ImageNet feature space is highly discriminative, i.e., features close to each other typically belong to the same class, the selected patches are likely to contain the objects of the target class. + +# 3.2.2 Selecting Exemplars for Counting + +Given a set of class-relevant patches and a pre-trained exemplar-based object counter, we aim to select a few exemplars from these patches that are optimal for counting. To do so, we introduce an error prediction network that predicts the counting error of an arbitrary patch when the patch is used as the exemplar. The counting error is calculated from the pre-trained counting model. Specifically, to train this error predictor, given a query image $\bar{I}$ and an arbitrary patch + +$\bar{B}$ cropped from $\bar{I}$ , we first use the base counting model to get the image feature map $F(\bar{I})$ , similarity map $\bar{S}$ , and the final predicted density map $\bar{D}$ . The counting error of the base counting model can be written as: + +$$ +\epsilon = \left| \sum_ {i, j} \bar {D} _ {(i, j)} - \bar {N} ^ {*} \right|, \tag {7} +$$ + +where $\bar{N}^*$ denotes the ground truth object count in image $\bar{I}$ . $\epsilon$ can be used to measure the goodness of $\bar{B}$ as an exemplar for $\bar{I}$ , i.e., a small $\epsilon$ indicates that $\bar{B}$ is a suitable exemplar for counting and vice versa. + +The error predictor $R$ is trained to regress the counting error produced by the base counting model. The input of $R$ is the channel-wise concatenation of the image feature map $F(\bar{I})$ and the similarity map $\tilde{S}$ . The training objective is the minimization of the mean squared error between the output of the predictor $R(F(\bar{I}),\bar{S})$ and the actual counting error produced by the base counting model $\epsilon$ . + +After the error predictor is trained, we can use it to select the optimal patches for counting. The candidates for selection here are the class-relevant patches selected by the class prototype in the previous step. For each candidate patch, we use the trained error predictor to infer the counting error when it is being used as the exemplar. The final selected patches for counting are the patches that yield the top- $s$ smallest counting errors. + +# 3.2.3 Using the Selected Patches as Exemplars + +Using the error predictor, we predict the error for each candidate patch and select the patches that lead to the smallest counting errors. The selected patches can then be used as exemplars for the base counting model to get the density map and the final count. We also conduct experiments to show that these selected patches can serve as exemplars for other exemplar-based counting models to achieve exemplar-free class-agnostic counting. + +# 4. Experiments + +# 4.1. Implementation Details + +Network architecture For the base counting model, we use ResNet-50 as the backbone of the feature extractor, initialized with the weights of a pre-trained ImageNet model. The backbone outputs feature maps of 1024 channels. For each query image, the number of channels is reduced to 256 using an $1 \times 1$ convolution. For each exemplar, the feature maps are first processed with global average pooling and then linearly mapped to obtain a 256-d feature vector. The counter consists of 5 convolutional and bilinear upsampling layers to regress a density map of the same size as the query image. For the feature generation model, both the encoder and the decoder are two-layer fully-connected + +(FC) networks with 4096 hidden units. LeakyReLU and ReLU are the non-linear activation functions in the hidden and output layers, respectively. The dimensions of the latent space and the semantic embeddings are both set to be 512. For the error predictor, 5 convolutional and bilinear upsampling layers are followed by a linear layer to output the counting error. + +Dataset We use the FSC-147 dataset [34] to train the base counting model and the error predictor. FSC-147 is the first large-scale dataset for class-agnostic counting. It includes 6135 images from 147 categories varying from animals, kitchen utensils, to vehicles. The categories in the training, validation, and test sets do not overlap. The feature generator is trained on the MS-COCO detection dataset. Note that the previous exemplar-free method [33] also uses MS-COCO to pre-train their counter. + +Training details Both the base counting model and the error predictor are trained using the AdamW optimizer with a fixed learning rate of $10^{-5}$ . The base counting model is trained for 300 epochs with a batch size of 8. We resize the input query image to a fixed height of 384, and the width is adjusted accordingly to preserve the aspect ratio of the original image. Exemplars are resized to $128 \times 128$ before being input into the feature extractor. The feature generation model is trained using the Adam optimizer and the learning rate is set to be $10^{-4}$ . The semantic embeddings are extracted from CLIP [32]. To select the class-relevant patches, we randomly sample 450 boxes of various sizes across the input query image and select 10 patches whose embeddings are the 10-nearest neighbors of the class prototype. The final selected patches are those that yield the top-3 smallest counting errors predicted by the error predictor. + +# 4.2. Evaluation Metrics + +We use Mean Average Error (MAE) and Root Mean Squared Error (RMSE) to measure the performance of different object counters. Besides, we follow [31] to report the Normalized Relative Error (NAE) and Squared Relative Error (SRE). In particular, MAE = $\frac{1}{n}\sum_{i=1}^{n}|y_i - \hat{y}_i|$ ; RMSE = $\sqrt{\frac{1}{n}\sum_{i=1}^{n}(y_i - \hat{y}_i)^2}$ ; NAE = $\frac{1}{n}\sum_{i=1}^{n}\frac{|y_i - \hat{y}_i|}{y_i}$ ; SRE = $\sqrt{\frac{1}{n}\sum_{i=1}^{n}\frac{(y_i - \hat{y}_i)^2}{y_i}}$ where $n$ is the number of test images, and $y_i$ and $\hat{y}_i$ are the ground truth and the predicted number of objects for image $i$ respectively. + +# 4.3. Comparing Methods + +We compare our method with the previous works on class-agnostic counting. RepRPN-Counter [33] is the only previous class-agnostic counting method that does not require human-annotated exemplars as input. In order to make other exemplar based class-agnostic methods including GMN (General Matching Network [28]), FamNet (Few-shot adaptation and matching Network [34]) and BMNet + +
MethodExemplarsVal SetTest Set
MAERMSENAESREMAERMSENAESRE
GMN [28]GT29.6689.81--26.52124.57--
RPN40.96108.47--39.72142.81--
FamNet+ [34]GT23.7569.070.524.2522.0899.540.446.45
RPN42.85121.590.756.9442.70146.080.747.14
BMNet [38]GT19.0667.950.264.3916.71103.310.263.32
RPN37.26108.540.425.4337.22143.130.415.31
BMNet+ [38]GT15.7458.530.276.5714.6291.830.252.74
RPN35.15106.070.415.2834.52132.640.395.26
RepRPN-Counter [33]-30.4098.73--27.45129.69--
Ours (Base)GT18.5561.120.303.1820.68109.140.367.63
RPN32.1999.210.384.8029.25130.650.354.35
Patch-Selection26.9388.630.364.2622.09115.170.343.74
+ +Table 1. Quantitative comparisons on the FSC-147 dataset. "GT" denotes using human-annotated boxes as exemplars. "RPN" denotes using the top-3 RPN proposals with the highest objectness scores as exemplars. "Patch-Selection" denotes using our selected patches as exemplars. + +(Bilinear Matching Network [38]) work in the exemplar-free setup, we replace the human-provided exemplars with the exemplars generated by a pre-trained object detector. Specifically, we use the RPN of Faster RCNN pre-trained on MS-COCO dataset and select the top-3 proposals with the highest objectness score as the exemplars. We also include the performance of these methods using human-annotated exemplars for a complete comparison. + +# 4.4. Results + +Quantitative results. As shown in Table 1, our proposed method outperforms the previous exemplar-free counting method [33] by a large margin, resulting in a reduction of 10.10 w.r.t. the validation RMSE and 14.52 w.r.t. the test RMSE. We also notice that the performance of all exemplar-based counting methods drops significantly when replacing human-annotated exemplars with RPN generated proposals. The state-of-the-art exemplar-based method BMNet+ [38], for example, shows an 19.90 error increase w.r.t. the test MAE and a 40.81 increase w.r.t. the test RMSE. In comparison, the performance gap is much smaller when using our selected patches as exemplars, as reflected by a 1.41 increase w.r.t. the test MAE and a 6.03 increase w.r.t. the test RMSE. Noticeably, the NAE and the SRE on the test set are even reduced when using our selected patches compared with the human-annotated exemplars. + +Qualitative analysis. In Figure 4, we present a few input images, the image patches selected by our method, and the corresponding density maps. Our method effectively identifies the patches that are suitable for object counting. The density maps produced by our selected patches are meaningful and close to the density maps produced by human-annotated patches. The counting model with random image patches as exemplars, in comparison, fails to output meaningful density maps and infers incorrect object counts. + +# 5. Analyses + +# 5.1. Ablation Studies + +Our proposed patch selection method consists of two steps: the selection of class-relevant patches via a generated class prototype and the selection of the optimal patches via an error predictor. We analyze the contribution of each step quantitatively and qualitatively. Quantitative results are in Table 2. We first evaluate the performance of our baseline, i.e. using 3 randomly sampled patches as exemplars without any selection step. As shown in Table 2, using the class prototype to select class-relevant patches reduces the error rate by 7.19 and 6.07 on the validation and test set of MAE, respectively. Applying the error predictor can improve the baseline performance by 7.22 on the validation MAE and 7.57 on the test MAE. Finally, applying the two components together further boosts performance, achieving 26.93 on the validation MAE and 22.09 on the test MAE. + +We provide further qualitative analysis by visualizing the selected patches. As shown in Figure 5, for each input query image, we show 10 class-relevant patches selected using our generated prototype, ranked by their predicted counting error (from low to high). All the 10 selected class-relevant patches exhibit some class specific features. However, not all these patches are suitable to be used as counting exemplars, i.e., some patches only contain parts of the object, and some patches contain some background. By further applying our proposed error predictor, we can identify the most suitable patches with the smallest predicted counting errors. + +# 5.2. Generalization to Exemplar-based Methods + +Our proposed method can be considered as a general patch selection method that is applicable to other visual counters to achieve exemplar-free counting. To verify that, we use our selected patches as the exemplars for three + +![](images/fe4a6ef528c84641fb0e5dd34b44a7a8276d88d070b0e4c619d7316609b7c106.jpg) + +![](images/8ca765e47c0c7daa9642f1120b10640503f129a405c3ea0307f1142e0fc7fcdb.jpg) + +![](images/d1909e34066588d405bc4d78523cb7389e762ed28ac84e5b336bbde8d1ade92a.jpg) + +![](images/94bdd630442b3b673505ae494ab3ffa239ebd0023791386d504855f8c24bc4f2.jpg) + +![](images/eb3009146f36d56ad021f85e0b4771c8f06a5c28b738f5771aa15c3230cbd957.jpg) + +![](images/f374c2dfeec04c74b949d57f39d8a02f9f0835f1b3f4d6a9028d61dca898e190.jpg) + +![](images/8b309d9a73c65e4e982e77d9e88ec2e160d479f751387141841d308fcf13b8b8.jpg) + +![](images/29b47f7e9b6a9d5ea8fdf3d200a38bfe0d9b75ca54f0b71e926e53ca6fb5f1d6.jpg) + +![](images/4bc34c2ca1145c11f19f0a3e9fc03a84e2f29e8d5859ac7cd3b0be5486b693f2.jpg) + +![](images/a44cff7de693c74ae8f4bfd3d996ad58aa1ae282811b5b9f872751ffde832b3b.jpg) + +![](images/531c80e23d0421edb6b176462146e43fb76a7e403dba021f57de93950817aaf1.jpg) + +![](images/6b2b203afd898aae2fa9579837ebd56b1f319d0c70f9cab13d61a3f08683ae32.jpg) + +![](images/330cdba23ac7d59f52bf378aadf8438e3bcadeb2a7bd0b1e8c7b423755924c50.jpg) +Figure 4. Qualitative results on the FSC-147 dataset. We show the counting exemplars and the corresponding density maps of ground truth boxes, randomly selected patches, and our selected patches respectively. Predicted counting results are shown at the top-right corner. Our method accurately identifies suitable patches for counting and the predicted density maps are close to the ground truth density maps. + +![](images/69dbf44a6ceaf9a2dcb2374ab289efdabb0dfd9300461637ca541da95e0d99b0.jpg) +Ground Truth + +![](images/9158edf2d16c61e5c3520586e1b488792e3cffbdd9fdd417458ba156bad48009.jpg) + +![](images/289d8be36828979647e6e733900eba80a45585256e711c64af9db544502c13a4.jpg) +Ours + +![](images/2bf6900cbb2c147be150f90b4bc4c32a82b1862130611c1fbf3e2a15615a7e57.jpg) +Figure 5. Qualitative ablation analysis. All the 10 selected class-relevant patches exhibit some class-specific attributes. They are ranked by the predicted counting errors and the final selected patches with the smallest errors are framed in green. + +
PrototypePredictorVal SetTest Set
MAERMSENAESREMAERMSENAESRE
--35.20106.700.616.6831.37134.980.525.92
-28.0188.290.394.6625.30113.820.404.88
-27.9888.620.434.5923.80128.360.404.43
26.9388.630.364.2622.09115.170.343.74
+ +Table 2. Ablation study on each component's contribution to the final results. We show the effectiveness of the two steps of our framework: selecting class-relevant patches via a generated class prototype and selecting optimal patches via an error predictor. + +other different exemplar-based methods: FamNet [34], BMNet and BMNet+ [38]. Figure 6 (a) shows the results on the FSC-147 validation set. The baseline uses three randomly sampled patches as the exemplars for the pre-trained exemplar-based counter. By using the generated class prototype to select class-relevant patches, the error rate is reduced by 5.18, 8.59 and 5.60 on FamNet, BMNet and BMNet+, respectively. In addition, as the error predictor is additionally adopted, the error rate is further reduced by 1.76, 1.00 and 1.08 on FamNet, BMNet and BMNet+, respectively. Similarly, Figure 6 (b) shows the results on the FSC + +147 test set. Our method achieves consistent performance improvements for all three methods. + +![](images/d2b04911282de7979d4bf2835d8b21d7e9590aa789438c8d8521920c00a25814.jpg) +(a) + +![](images/9871b996fafadca835a82c199766ca8c4307f6aff46fadae7410b9f81c06a48b.jpg) +(b) + +# 5.3. Multi-class Object Counting + +Our method can count instances of a specific class given the class name, which is particularly useful when there are multiple classes in the same image. In this section, we show some visualization results in this multi-class scenario. As seen in Figure 7, our method selects patches according to the given class name and count instances from that specific class in the input image. Correspondingly, the heatmap highlights the image regions that are most relevant to the specified class. Here the heatmaps are obtained by correlating the exemplar feature vector with the image feature map in a pre-trained ImageNet feature space. Note that we mask out the image region where the activation value in the heatmap is below a threshold for counting purpose. We also show the patches selected using another exemplar-free counting method, RepRPN [33]. The class of RepRPN selected patches can not be explicitly specified. It simply selects patches from the class with the highest number of instances in the image according to the repetition score. + +![](images/efee905dc8491de7e8ab72a66f303efe9a7bfd45def19d6566416ca8dafa8c6a.jpg) + +![](images/b2057ff667537b23fb5a12c10c5f78d9a862efe17544dad0c261cde14cdd70eb.jpg) + +![](images/a21535710190b515d0cc1b8add4a36c28d21c85ccb4ca6cf656c6e1b9f73716e.jpg) +Figure 6. Using our selected patches as exemplars for other exemplar-based class-agnostic counting methods (FamNet, BMNet and BMNet+) on FSC-147 dataset. Blue bars are the MAEs of using three randomly sampled patches. Orange bars are the MAEs of using the class prototype to select class-relevant patches as exemplars. Green bars are the MAEs of using the class prototype and error predictor to select optimal patches as exemplars. +Figure 7. Visualization results of our method in some multi-class examples. Our method selects patches according to the given class name and the corresponding heatmap highlights the relevant areas. + +# 6. Conclusion + +In this paper, we proposed a new task, zero-shot object counting, to count instances of a specific class given only the class name without access to any exemplars. To address this, we developed a simple yet effective method that accurately localizes the optimal patches across the query image that can be used as counting exemplars. Specifically, we construct a class prototype in a pre-trained feature space and use the prototype to select patches that contain objects of interest; then we use an error predictor to select those patches with the smallest predicted errors as the final exemplars for counting. Extensive results demonstrate the effectiveness of our method. We also conduct experiments to show that our selected patches can be used for other exemplar-based counting methods to achieve exemplar-free counting. + +Acknowledgements. This research was partially supported by NSF grants IIS-2123920 and IIS-2212046 and the NASA Biodiversity program (Award 80NSSC21K1027). + +# References + +[1] Shahira Abousamra, Minh Hoai, Dimitris Samaras, and Chao Chen. Localization in the crowd with topological constraints. In AAAI, 2021. 2 +[2] Martin Arjovsky, Soumith Chintala, and Léon Bottou. Wasserstein gan. In ICML, 2017. 3 +[3] Carlos Arteta, Victor S. Lempitsky, Julia Alison Noble, and Andrew Zisserman. Interactive object counting. In ECCV, 2014. 2 +[4] Carlos Arteta, Victor S. Lempitsky, and Andrew Zisserman. Counting in the wild. In ECCV, 2016. 1, 2 +[5] Yuval Atzmon and Gal Chechik. Adaptive confidence smoothing for generalized zero-shot learning. In CVPR, 2019. 3 +[6] Ankan Bansal, Karan Sikka, Gaurav Sharma, Rama Chellappa, and Ajay Divakaran. Zero-shot object detection. In ECCV, 2018. 2 +[7] Antoni B. Chan, Zhang-Sheng John Liang, and Nuno Vasconcelos. Privacy preserving crowd monitoring: Counting people without people models or tracking. In CVPR, 2008. 2 +[8] Prithvijit Chattopadhyay, Ramakrishna Vedantam, Ramprasaath R. Selvaraju, Dhruv Batra, and Devi Parikh. Counting everyday objects in everyday scenes. CVPR, 2017. 2 +[9] Long Chen, Hanwang Zhang, Jun Xiao, W. Liu, and Shih-Fu Chang. Zero-shot visual recognition using semantics-preserving adversarial embedding networks. In CVPR, 2018. 3 +[10] Hisham Cholakkal, Guolei Sun, Fahad Shahbaz Khan, and Ling Shao. Object counting and instance segmentation with image-level supervision. In CVPR, 2019. 2 +[11] Hisham Cholakkal, Guolei Sun, Salman Hameed Khan, Fahad Shahbaz Khan, Ling Shao, and Luc Van Gool. Towards partial supervision for generic object counting in natural scenes. volume 44, 2022. 2 +[12] Andrea Frome, Gregory S. Corrado, Jonathon Shlens, Samy Bengio, Jeffrey Dean, Marc'Aurelio Ranzato, and Tomas Mikolov. Devise: A deep visual-semantic embedding model. In NIPS, 2013. 3 +[13] Shenjian Gong, Shanshan Zhang, Jian Yang, Dengxin Dai, and Bernt Schiele. Class-agnostic object counting robust to intraclass diversity. In ECCV, 2022. 2 +[14] Meng-Ru Hsieh, Yen-Liang Lin, and Winston H. Hsu. Drone-based object counting by spatially regularized regional proposal network. In ICCV, 2017. 2 +[15] Haroon Idrees, Muhammad Tayyab, Kishan Athrey, Dong Zhang, Somaya Ali Al-Maadeed, Nasir M. Rajpoot, and Mubarak Shah. Composition loss for counting, density map estimation and localization in dense crowds. In ECCV, 2018. 2 +[16] Dinesh Jayaraman and Kristen Grauman. Zero-shot recognition with unreliable attributes. In NIPS, 2014. 3 +[17] Christoph H. Lampert, Hannes Nickisch, and Stefan Harmeling. Learning to detect unseen object classes by between-class attribute transfer. CVPR, 2009. 3 + +[18] Issam H. Laradji, Negar Rostamzadeh, Pedro H. O. Pinheiro, David Vázquez, and Mark W. Schmidt. Where are the blobs: Counting by localization with point supervision. In ECCV, 2018. 2 +[19] Hieu Le, Bento Goncalves, Dimitris Samaras, and Heather Lynch. Weakly labeling the antarctic: The penguin colony case. In CVPR Workshops, June 2019. 3 +[20] Hieu Le and Dimitris Samaras. Physics-based shadow image decomposition for shadow removal. Los Alamitos, CA, USA. IEEE Computer Society. 3 +[21] Hieu Le and Dimitris Samaras. From shadow segmentation to shadow removal. In ECCV, 2020. 3 +[22] Hieu Le, Tomas F. Yago Vicente, Vu Nguyen, Minh Hoai, and Dimitris Samaras. A+D Net: Training a shadow detector with adversarial shadow attenuation. In ECCV, 2018. 3 +[23] Hieu Le, Chen-Ping Yu, Gregory Zelinsky, and Dimitris Samaras. Co-localization with category-consistent features and geodesic distance propagation. In ICCV Workshop, 2017. 3 +[24] Dongze Lian, Jing Li, Jia Zheng, Weixin Luo, and Shenghua Gao. Density map regression guided detection network for rgb-d crowd counting and localization. CVPR, 2019. 2 +[25] Chang Liu, Yujie Zhong, Andrew Zisserman, and Weidi Xie. Countr: Transformer-based generalised visual counting. In BMVC, 2022. 2 +[26] Weizhe Liu, N. Durasov, and P. Fua. Leveraging self-supervision for cross-domain crowd counting. In CVPR, 2022. 2 +[27] Weizhe Liu, Mathieu Salzmann, and Pascal V. Fua. Context-aware crowd counting. In CVPR, 2019. 2 +[28] Erika Lu, Weidi Xie, and Andrew Zisserman. Class-agnostic counting. In ACCV, 2018. 1, 2, 5, 6 +[29] Terrell N. Mundhenk, Goran Konjevod, Wesam A. Sakla, and Kofi Boakye. A large contextual dataset for classification, detection and counting of cars with deep learning. In ECCV, 2016. 1, 2 +[30] Sanath Narayan, Akshita Gupta, Fahad Shahbaz Khan, Cees G. M. Snoek, and Ling Shao. Latent embedding feedback and discriminative features for zero-shot classification. In ECCV, 2020. 3 +[31] Thanh Nguyen, Chau Pham, Khoi Nguyen, and Minh Hoai. Few-shot object counting and detection. In ECCV, 2022. 2, 5 +[32] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In ICML, 2021. 4, 5 +[33] Viresh Ranjan and Minh Hoai. Exemplar free class agnostic counting. In ACCV, 2022. 1, 2, 3, 5, 6, 8 +[34] Viresh Ranjan, Udbhav Sharma, Thua Nguyen, and Minh Hoai. Learning to count everything. In CVPR, 2021. 1, 2, 3, 5, 6, 7 +[35] Mahdi Rezaei and Mahsa Shahidi. Zero-shot learning and its applications from autonomous vehicles to Covid-19 diagnosis: A review. In Intelligence-Based Medicine, volume 3, 2020. 3 + +[36] Bernardino Romero-Paredes and Philip H. S. Torr. An embarrassingly simple approach to zero-shot learning. In ICML, 2015. 3 +[37] Deepak Babu Sam, Abhinav Agarwalla, Jimmy Joseph, Vishwanath A. Sindagi, R. Venkatesh Babu, and Vishal M. Patel. Completely self-supervised crowd counting via distribution matching. In ECCV, 2022. 1, 2 +[38] Min Shi, Hao Lu, Chen Feng, Chengxin Liu, and Zhiguo Cao. Represent, compare, and learn: A similarity-aware framework for class-agnostic counting. In CVPR, 2022. 1, 2, 3, 6, 7 +[39] Vishwanath A. Sindagi, Rajeev Yasarla, and Vishal M. Patel. Pushing the frontiers of unconstrained crowd counting: New dataset and benchmark method. In ICCV, 2019. 2 +[40] Jia Wan, Ziquan Liu, and Antoni B. Chan. A generalized loss function for crowd counting and localization. In CVPR, 2021. 2 +[41] Boyu Wang, Huidong Liu, Dimitris Samaras, and Minh Hoai Nguyen. Distribution matching for crowd counting. In NeurIPS, 2020. 2 +[42] Qi Wang, Junyu Gao, Wei Lin, and Xuelong Li. Nwpu-crowd: A large-scale benchmark for crowd counting and localization. IEEE Transactions on Pattern Analysis and Machine Intelligence, 43, 2021. 2 +[43] Yongqin Xian, Zeynep Akata, Gaurav Sharma, Quynh N. Nguyen, Matthias Hein, and Bernt Schiele. Latent embeddings for zero-shot classification. In CVPR, 2016. 3 +[44] Yongqin Xian, Christoph H. Lampert, Bernt Schiele, and Zeynep Akata. Zero-shot learning—a comprehensive evaluation of the good, the bad and the ugly. IEEE Transactions on Pattern Analysis and Machine Intelligence, 41, 2019. 3, 4 +[45] Yongqin Xian, Saurabh Sharma, Bernt Schiele, and Zeynep Akata. F-vaegan-d2: A feature generating framework for any-shot learning. In CVPR, 2019. 3, 4 +[46] Weidi Xie, J. Alison Noble, and Andrew Zisserman. Microscopy cell counting and detection with fully convolutional regression networks. Computer Methods in Biomechanics and Biomedical Engineering: Imaging & Visualization, 6, 2018. 1, 2 +[47] Haipeng Xiong and Angela Yao. Discrete-constrained regression for local counting models. In ECCV, 2022. 2 +[48] Jingyi Xu and Hieu Le. Generating representative samples for few-shot classification. In CVPR, 2022. 3 +[49] Jingyi Xu, Hieu Le, Mingzhen Huang, ShahRukh Athar, and Dimitris Samaras. Variational feature disentangling for fine-grained few-shot classification. In ICCV, 2021. 3 +[50] Shuo Yang, Hung-Ting Su, Winston H. Hsu, and Wen-Chin Chen. Class-agnostic few-shot object counting. In WACV, 2021. 2 +[51] Zhiyuan You, Kai Yang, Wenhan Luo, Xin Lu, Lei Cui, and Xinyi Le. Few-shot object counting with similarity-aware feature enhancement. In WACV, 2023. 2 +[52] Anran Zhang, Lei Yue, Jiayi Shen, Fan Zhu, Xiantong Zhen, Xianbin Cao, and Ling Shao. Attentional neural fields for crowd counting. In ICCV, 2019. 2 + +[53] Cong Zhang, Hongsheng Li, Xiaogang Wang, and Xiaokang Yang. Cross-scene crowd counting via deep convolutional neural networks. In CVPR, 2015. 2 +[54] Li Zhang, Tao Xiang, and Shaogang Gong. Learning a deep embedding model for zero-shot learning. In CVPR, 2017. 3 +[55] Qi Zhang and Antoni Chan. Calibration-free multi-view crowd counting. In ECCV, 2022. 2 +[56] Yingying Zhang, Desen Zhou, Siqin Chen, Shenghua Gao, and Yi Ma. Single-image crowd counting via multi-column convolutional neural network. In CVPR, 2016. 2 +[57] Ye Zheng, Jiahong Wu, Yongqiang Qin, Faen Zhang, and Li Cui. Zero-shot instance segmentation. In CVPR, 2021. 2 \ No newline at end of file diff --git a/2023/Zero-Shot Object Counting/images.zip b/2023/Zero-Shot Object Counting/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..a19d1e78c925145e6c669d1c57566e147f4f0603 --- /dev/null +++ b/2023/Zero-Shot Object Counting/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bdb2dae07e260cd6edb7c0daa74ea8cba9daeca210f2f48a1ec3415a0cd45276 +size 577660 diff --git a/2023/Zero-Shot Object Counting/layout.json b/2023/Zero-Shot Object Counting/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..bb21342d4da05cc260bd80e83eefb0772f859658 --- /dev/null +++ b/2023/Zero-Shot Object Counting/layout.json @@ -0,0 +1,9296 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 211, + 103, + 384, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 211, + 103, + 384, + 121 + ], + "spans": [ + { + "bbox": [ + 211, + 103, + 384, + 121 + ], + "type": "text", + "content": "Zero-Shot Object Counting" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 113, + 152, + 478, + 169 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 152, + 478, + 169 + ], + "spans": [ + { + "bbox": [ + 113, + 152, + 478, + 169 + ], + "type": "text", + "content": "Jingyi " + }, + { + "bbox": [ + 113, + 152, + 478, + 169 + ], + "type": "inline_equation", + "content": "\\mathrm{Xu}^{1}" + }, + { + "bbox": [ + 113, + 152, + 478, + 169 + ], + "type": "text", + "content": ", Hieu " + }, + { + "bbox": [ + 113, + 152, + 478, + 169 + ], + "type": "inline_equation", + "content": "\\mathrm{Le}^{2}" + }, + { + "bbox": [ + 113, + 152, + 478, + 169 + ], + "type": "text", + "content": ", Vu Nguyen" + }, + { + "bbox": [ + 113, + 152, + 478, + 169 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 113, + 152, + 478, + 169 + ], + "type": "text", + "content": ", Viresh Ranjan" + }, + { + "bbox": [ + 113, + 152, + 478, + 169 + ], + "type": "inline_equation", + "content": "^{*3}" + }, + { + "bbox": [ + 113, + 152, + 478, + 169 + ], + "type": "text", + "content": ", and Dimitris Samaras" + }, + { + "bbox": [ + 113, + 152, + 478, + 169 + ], + "type": "inline_equation", + "content": "^{1}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 188, + 176, + 405, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 188, + 176, + 405, + 192 + ], + "spans": [ + { + "bbox": [ + 188, + 176, + 405, + 192 + ], + "type": "text", + "content": "1Stony Brook University 2EPFL 3Amazon" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 143, + 227, + 192, + 239 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 227, + 192, + 239 + ], + "spans": [ + { + "bbox": [ + 143, + 227, + 192, + 239 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 253, + 290, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 253, + 290, + 517 + ], + "spans": [ + { + "bbox": [ + 46, + 253, + 290, + 517 + ], + "type": "text", + "content": "Class-agnostic object counting aims to count object instances of an arbitrary class at test time. Current methods for this challenging problem require human-annotated exemplars as inputs, which are often unavailable for novel categories, especially for autonomous systems. Thus, we propose zero-shot object counting (ZSC), a new setting where only the class name is available during test time. Such a counting system does not require human annotators in the loop and can operate automatically. Starting from a class name, we propose a method that can accurately identify the optimal patches which can then be used as counting exemplars. Specifically, we first construct a class prototype to select the patches that are likely to contain the objects of interest, namely class-relevant patches. Furthermore, we introduce a model that can quantitatively measure how suitable an arbitrary patch is as a counting exemplar. By applying this model to all the candidate patches, we can select the most suitable patches as exemplars for counting. Experimental results on a recent class-agnostic counting dataset, FSC-147, validate the effectiveness of our method. Code is available at https://github.com/cvlab-stonybrook/zero-shot-counting." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 540, + 128, + 553 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 540, + 128, + 553 + ], + "spans": [ + { + "bbox": [ + 47, + 540, + 128, + 553 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 561, + 287, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 561, + 287, + 693 + ], + "spans": [ + { + "bbox": [ + 46, + 561, + 287, + 693 + ], + "type": "text", + "content": "Object counting aims to infer the number of objects in an image. Most of the existing methods focus on counting objects from specialized categories such as human crowds [37], cars [29], animals [4], and cells [46]. These methods count only a single category at a time. Recently, class-agnostic counting [28, 34, 38] has been proposed to count objects of arbitrary categories. Several human-annotated bounding boxes of objects are required to specify the objects of interest (see Figure 1a). However, having humans in the loop is not practical for many real-world applications, such as fully automated wildlife monitoring systems or vi" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 342, + 226, + 419, + 344 + ], + "blocks": [ + { + "bbox": [ + 342, + 226, + 419, + 344 + ], + "lines": [ + { + "bbox": [ + 342, + 226, + 419, + 344 + ], + "spans": [ + { + "bbox": [ + 342, + 226, + 419, + 344 + ], + "type": "image", + "image_path": "0c00134abb2666d7bd32bc16dc84060d555ab990b2ba9dc3c7810e1eea5ab5a2.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 324, + 346, + 416, + 358 + ], + "lines": [ + { + "bbox": [ + 324, + 346, + 416, + 358 + ], + "spans": [ + { + "bbox": [ + 324, + 346, + 416, + 358 + ], + "type": "text", + "content": "(a) Few-shot Counting" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 304, + 362, + 547, + 429 + ], + "lines": [ + { + "bbox": [ + 304, + 362, + 547, + 429 + ], + "spans": [ + { + "bbox": [ + 304, + 362, + 547, + 429 + ], + "type": "text", + "content": "Figure 1. Our proposed task of zero-shot object counting (ZSC). Traditional few-shot counting methods require a few exemplars of the object category (a). We propose zero-shot counting where the counter only needs the class name to count the number of object instances. (b). Few-shot counting methods require human annotators at test time while zero-shot counters can be fully automatic." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 432, + 227, + 509, + 344 + ], + "blocks": [ + { + "bbox": [ + 432, + 227, + 509, + 344 + ], + "lines": [ + { + "bbox": [ + 432, + 227, + 509, + 344 + ], + "spans": [ + { + "bbox": [ + 432, + 227, + 509, + 344 + ], + "type": "image", + "image_path": "6615debf754375cd3058cd6458bfb7a60f5640e9fee3eca317637b25be9849b5.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 432, + 346, + 529, + 358 + ], + "lines": [ + { + "bbox": [ + 432, + 346, + 529, + 358 + ], + "spans": [ + { + "bbox": [ + 432, + 346, + 529, + 358 + ], + "type": "text", + "content": "(b) Zero-Shot Counting" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 437, + 438, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 437, + 438, + 449 + ], + "spans": [ + { + "bbox": [ + 306, + 437, + 438, + 449 + ], + "type": "text", + "content": "sual anomaly detection systems." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 449, + 545, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 449, + 545, + 581 + ], + "spans": [ + { + "bbox": [ + 304, + 449, + 545, + 581 + ], + "type": "text", + "content": "A more practical setting, exemplar-free class-agnostic counting, has been proposed recently by Ranjan et al. [33]. They introduce RepRPN, which first identifies the objects that occur most frequently in the image, and then uses them as exemplars for object counting. Even though RepRPN does not require any annotated boxes at test time, the method simply counts objects from the class with the highest number of instances. Thus, it can not be used for counting a specific class of interest. The method is only suitable for counting images with a single dominant object class, which limits the potential applicability." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 582, + 546, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 582, + 546, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 582, + 546, + 713 + ], + "type": "text", + "content": "Our goal is to build an exemplar-free object counter where we can specify what to count. To this end, we introduce a new counting task in which the user only needs to provide the name of the class for counting rather than the exemplars (see Figure 1b). In this way, the counting model can not only operate in an automatic manner but also allow the user to define what to count by simply providing the class name. Note that the class to count during test time can be arbitrary. For cases where the test class is completely unseen to the trained model, the counter needs to adapt to the unseen class without any annotated data. Hence, we" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 703, + 179, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 703, + 179, + 713 + ], + "spans": [ + { + "bbox": [ + 58, + 703, + 179, + 713 + ], + "type": "text", + "content": "*Work done prior to joining Amazon" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "15548" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 96 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 96 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 96 + ], + "type": "text", + "content": "name this setting zero-shot object counting (ZSC), inspired by previous zero-shot learning approaches [6, 57]." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 98, + 286, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 98, + 286, + 228 + ], + "spans": [ + { + "bbox": [ + 46, + 98, + 286, + 228 + ], + "type": "text", + "content": "To count without any annotated exemplars, our idea is to identify a few patches in the input image containing the target object that can be used as counting exemplars. Here the challenges are twofold: 1) how to localize patches that contain the object of interest based on the provided class name, and 2) how to select good exemplars for counting. Ideally, good object exemplars are visually representative for most instances in the image, which can benefit the object counter. In addition, we want to avoid selecting patches that contain irrelevant objects or backgrounds, which likely lead to incorrect object counts." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 231, + 286, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 231, + 286, + 315 + ], + "spans": [ + { + "bbox": [ + 46, + 231, + 286, + 315 + ], + "type": "text", + "content": "To this end, we propose a two-step method that first localizes the class-relevant patches which contain the objects of interest based on the given class name, and then selects among these patches the optimal exemplars for counting. We use these selected exemplars, together with a pre-trained exemplar-based counting model, to achieve exemplar-free object counting." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 316, + 286, + 435 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 316, + 286, + 435 + ], + "spans": [ + { + "bbox": [ + 46, + 316, + 286, + 435 + ], + "type": "text", + "content": "In particular, to localize the patches containing the objects of interest, we first construct a class prototype in a pretrained embedding space based on the given class name. To construct the class prototype, we train a conditional variational autoencoder (VAE) to generate features for an arbitrary class conditioned on its semantic embedding. The class prototype is computed by taking the average of the generated features. We then select the patches whose embeddings are the " + }, + { + "bbox": [ + 46, + 316, + 286, + 435 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 46, + 316, + 286, + 435 + ], + "type": "text", + "content": "-nearest neighbors of the class prototype as the class-relevant patches." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 437, + 286, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 437, + 286, + 664 + ], + "spans": [ + { + "bbox": [ + 46, + 437, + 286, + 664 + ], + "type": "text", + "content": "After obtaining the class-relevant patches, we further select among them the optimal patches to be used as counting exemplars. Here we observe that the feature maps obtained using good exemplars and bad exemplars often exhibit distinguishable differences. An example of the feature maps obtained with different exemplars is shown in Figure 2. The feature map from a good exemplar typically exhibits some repetitive patterns (e.g., the dots on the feature map) that center around the object areas while the patterns from a bad exemplar are more irregular and occur randomly across the image. Based on this observation, we train a model to measure the goodness of an input patch based on its corresponding feature maps. Specifically, given an arbitrary patch and a pre-trained exemplar-based object counter, we train this model to predict the counting error of the counter when using the patch as the exemplar. Here the counting error can indicate the goodness of the exemplar. After this error predictor is trained, we use it to select those patches with the smallest predicted errors as the final exemplars for counting." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 665, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 665, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 665, + 286, + 713 + ], + "type": "text", + "content": "Experiments on the FSC-147 dataset show that our method outperforms the previous exemplar-free counting method [33] by a large margin. We also provide analyses to show that patches selected by our method can be" + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 315, + 73, + 541, + 178 + ], + "blocks": [ + { + "bbox": [ + 315, + 73, + 541, + 178 + ], + "lines": [ + { + "bbox": [ + 315, + 73, + 541, + 178 + ], + "spans": [ + { + "bbox": [ + 315, + 73, + 541, + 178 + ], + "type": "image", + "image_path": "4b39c3d1f8fd9279d469780fddec6dddadcbe6a39a2a530bde83b39226eee0e0.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 190, + 545, + 235 + ], + "lines": [ + { + "bbox": [ + 305, + 190, + 545, + 235 + ], + "spans": [ + { + "bbox": [ + 305, + 190, + 545, + 235 + ], + "type": "text", + "content": "Figure 2. Feature maps obtained using different exemplars given a pre-trained exemplar-based counting model. The feature maps obtained using good exemplars typically exhibit some repetitive patterns while the patterns from bad exemplars are more irregular." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 305, + 244, + 545, + 280 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 244, + 545, + 280 + ], + "spans": [ + { + "bbox": [ + 305, + 244, + 545, + 280 + ], + "type": "text", + "content": "used in other exemplar-based counting methods to achieve exemplar-free counting. In short, our main contributions can be summarized as follows:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 317, + 289, + 545, + 422 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 317, + 289, + 545, + 337 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 289, + 545, + 337 + ], + "spans": [ + { + "bbox": [ + 317, + 289, + 545, + 337 + ], + "type": "text", + "content": "- We introduce the task of zero-shot object counting that counts the number of instances of a specific class in the input image, given only the class name and without relying on any human-annotated exemplars." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 317, + 339, + 545, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 339, + 545, + 386 + ], + "spans": [ + { + "bbox": [ + 317, + 339, + 545, + 386 + ], + "type": "text", + "content": "- We propose a simple yet effective patch selection method that can accurately localize the optimal patches across the query image as exemplars for zero-shot object counting." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 317, + 388, + 545, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 388, + 545, + 422 + ], + "spans": [ + { + "bbox": [ + 317, + 388, + 545, + 422 + ], + "type": "text", + "content": "- We verify the effectiveness of our method on the FSC-147 dataset, through extensive ablation studies and visualization results." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 444, + 391, + 456 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 444, + 391, + 456 + ], + "spans": [ + { + "bbox": [ + 306, + 444, + 391, + 456 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 464, + 471, + 477 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 464, + 471, + 477 + ], + "spans": [ + { + "bbox": [ + 306, + 464, + 471, + 477 + ], + "type": "text", + "content": "2.1. Class-specific Object Counting" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 483, + 545, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 483, + 545, + 651 + ], + "spans": [ + { + "bbox": [ + 304, + 483, + 545, + 651 + ], + "type": "text", + "content": "Class-specific object counting focuses on counting predefined categories, such as humans [1, 15, 24, 26, 37, 39, 40, 42, 47, 52, 53, 55, 56], animals [4], cells [46], or cars [14, 29]. Generally, existing methods can be categorized into two groups: detection-based methods [8, 14, 18] and regression-based methods [7, 10, 11, 27, 41, 53, 56]. Detection-based methods apply an object detector on the image and count the number of objects based on the detected boxes. Regression-based methods predict a density map for each input image, and the final result is obtained by summing up the pixel values. Both types of methods require abundant training data to learn a good model. Class-specific counters can perform well on trained categories. However, they can not be used to count objects of arbitrary categories at test time." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 658, + 476, + 672 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 658, + 476, + 672 + ], + "spans": [ + { + "bbox": [ + 306, + 658, + 476, + 672 + ], + "type": "text", + "content": "2.2. Class-agnostic Object Counting" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 305, + 677, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 677, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 677, + 545, + 713 + ], + "type": "text", + "content": "Class-agnostic object counting aims to count arbitrary categories given only a few exemplars [3, 13, 25, 28, 31, 34, 38, 50, 51]. GMN [28] uses a shared embedding module to" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "15549" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 66, + 72, + 519, + 250 + ], + "blocks": [ + { + "bbox": [ + 66, + 72, + 519, + 250 + ], + "lines": [ + { + "bbox": [ + 66, + 72, + 519, + 250 + ], + "spans": [ + { + "bbox": [ + 66, + 72, + 519, + 250 + ], + "type": "image", + "image_path": "6ebfd08a8f53d7e8186002eb73ea2f3d646578d8303afba3c237121bc88582b5.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 255, + 548, + 332 + ], + "lines": [ + { + "bbox": [ + 46, + 255, + 548, + 332 + ], + "spans": [ + { + "bbox": [ + 46, + 255, + 548, + 332 + ], + "type": "text", + "content": "Figure 3. Overview of the proposed method. We first use a generative model to obtain a class prototype for the given class (e.g. grape) in a pre-trained feature space. Then given an input query image, we randomly sample a number of patches of various sizes and extract the corresponding feature embedding for each patch. We select the patches whose embeddings are the nearest neighbors of the class prototype as class-relevant patches. Then for each of the selected class-relevant patches, we use a pre-trained exemplar-based counting model to obtain the intermediate feature maps. Our proposed error predictor then takes the feature maps as input and predicts the counting error (here we use normalized counting errors). We select the patches with the smallest predicted errors as the final exemplar patches and use them for counting." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 346, + 289, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 346, + 289, + 491 + ], + "spans": [ + { + "bbox": [ + 46, + 346, + 289, + 491 + ], + "type": "text", + "content": "extract feature maps for both query images and exemplars, which are then concatenated and fed into a matching module to regress the object count. FamNet [34] adopts a similar way to do correlation matching and further applies test-time adaptation. These methods require human-annotated exemplars as inputs. Recently, Ranjan et al. have proposed RepRPN [33], which achieves exemplar-free counting by identifying exemplars from the most frequent objects via a Region Proposal Network (RPN)-based model. However, the class of interest can not be explicitly specified for the RepRPN. In comparison, our proposed method can count instances of a specific class given only the class name." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 502, + 214, + 514 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 502, + 214, + 514 + ], + "spans": [ + { + "bbox": [ + 47, + 502, + 214, + 514 + ], + "type": "text", + "content": "2.3. Zero-shot Image Classification" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 522, + 289, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 522, + 289, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 522, + 289, + 713 + ], + "type": "text", + "content": "Zero-shot classification aims to classify unseen categories for which data is not available during training [5, 9, 12, 16, 19, 21, 23, 35, 36]. Semantic descriptors are mostly leveraged as a bridge to enable the knowledge transfer between seen and unseen classes. Earlier zero-shot learning (ZSL) works relate the semantic descriptors with visual features in an embedding space and recognize unseen samples by searching their nearest class-level semantic descriptor in this embedding space [17, 36, 43, 54]. Recently, generative models [20, 22, 48, 49] have been widely employed to synthesize unseen class data to facilitate ZSL [30, 44, 45]. Xian et al. [44] use a conditional Wasserstein Generative Adversarial Network (GAN) [2] to generate unseen features which can then be used to train a discriminative classifier for ZSL. In our method, we also train a generative model conditioned on class-specific semantic embedding. Instead" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 305, + 346, + 547, + 383 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 346, + 547, + 383 + ], + "spans": [ + { + "bbox": [ + 305, + 346, + 547, + 383 + ], + "type": "text", + "content": "of using this generative model to hallucinate data, we use it to compute a prototype for each class. This class prototype is then used to select patches that contain objects of interest." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 306, + 392, + 362, + 405 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 392, + 362, + 405 + ], + "spans": [ + { + "bbox": [ + 306, + 392, + 362, + 405 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 412, + 547, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 412, + 547, + 593 + ], + "spans": [ + { + "bbox": [ + 304, + 412, + 547, + 593 + ], + "type": "text", + "content": "Figure 3 summarizes our proposed method. Given an input query image and a class label, we first use a generative model to construct a class prototype for the given class in a pre-trained feature space. We then randomly sample a number of patches of various sizes and extract the feature embedding for each patch. The class-relevant patches are those patches whose embeddings are the nearest neighbors of the class prototype in the embedding space. We further use an error predictor to select the patches with the smallest predicted errors as the final exemplars for counting. We use the selected exemplars in an exemplar-based object counter to infer the object counts. For the rest of the paper, we denote this exemplar-based counter as the \"base counting model\". We will first describe how we train this base counting model and then present the details of our patch selection method." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 599, + 474, + 612 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 599, + 474, + 612 + ], + "spans": [ + { + "bbox": [ + 306, + 599, + 474, + 612 + ], + "type": "text", + "content": "3.1. Training Base Counting Model" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "type": "text", + "content": "We train our base counting model using abundant training images with annotations. Similar to previous works [34, 38], the base counting model uses the input image and the exemplars to obtain a density map for object counting. The model consists of a feature extractor " + }, + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "type": "text", + "content": " and a counter " + }, + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "type": "text", + "content": ". Given a query image " + }, + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "type": "text", + "content": " and an exemplar " + }, + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "type": "text", + "content": " of an arbitrary class " + }, + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "type": "text", + "content": ", we input " + }, + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "type": "text", + "content": " to the feature extractor to obtain the corresponding output, denoted as " + }, + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "type": "inline_equation", + "content": "F(I)" + }, + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "type": "inline_equation", + "content": "F(B)" + }, + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "type": "text", + "content": " re" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "text", + "content": "15550" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 287, + 119 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 287, + 119 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 287, + 119 + ], + "type": "text", + "content": "spectively. " + }, + { + "bbox": [ + 47, + 72, + 287, + 119 + ], + "type": "inline_equation", + "content": "F(I)" + }, + { + "bbox": [ + 47, + 72, + 287, + 119 + ], + "type": "text", + "content": " is a feature map of size " + }, + { + "bbox": [ + 47, + 72, + 287, + 119 + ], + "type": "inline_equation", + "content": "d * h_{I} * w_{I}" + }, + { + "bbox": [ + 47, + 72, + 287, + 119 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 72, + 287, + 119 + ], + "type": "inline_equation", + "content": "F(B)" + }, + { + "bbox": [ + 47, + 72, + 287, + 119 + ], + "type": "text", + "content": " is a feature map of size " + }, + { + "bbox": [ + 47, + 72, + 287, + 119 + ], + "type": "inline_equation", + "content": "d * h_{B} * w_{B}" + }, + { + "bbox": [ + 47, + 72, + 287, + 119 + ], + "type": "text", + "content": ". We further perform global average pooling on " + }, + { + "bbox": [ + 47, + 72, + 287, + 119 + ], + "type": "inline_equation", + "content": "F(B)" + }, + { + "bbox": [ + 47, + 72, + 287, + 119 + ], + "type": "text", + "content": " to form a feature vector " + }, + { + "bbox": [ + 47, + 72, + 287, + 119 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 47, + 72, + 287, + 119 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 47, + 72, + 287, + 119 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 47, + 72, + 287, + 119 + ], + "type": "text", + "content": " dimensions." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 120, + 287, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 120, + 287, + 168 + ], + "spans": [ + { + "bbox": [ + 47, + 120, + 287, + 168 + ], + "type": "text", + "content": "After feature extraction, we obtain the similarity map " + }, + { + "bbox": [ + 47, + 120, + 287, + 168 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 47, + 120, + 287, + 168 + ], + "type": "text", + "content": " by correlating the exemplar feature vector " + }, + { + "bbox": [ + 47, + 120, + 287, + 168 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 47, + 120, + 287, + 168 + ], + "type": "text", + "content": " with the image feature map " + }, + { + "bbox": [ + 47, + 120, + 287, + 168 + ], + "type": "inline_equation", + "content": "F(I)" + }, + { + "bbox": [ + 47, + 120, + 287, + 168 + ], + "type": "text", + "content": ". Specifically, if " + }, + { + "bbox": [ + 47, + 120, + 287, + 168 + ], + "type": "inline_equation", + "content": "w_{ij} = F_{ij}(I)" + }, + { + "bbox": [ + 47, + 120, + 287, + 168 + ], + "type": "text", + "content": " is the channel feature at spatial position " + }, + { + "bbox": [ + 47, + 120, + 287, + 168 + ], + "type": "inline_equation", + "content": "(i,j)" + }, + { + "bbox": [ + 47, + 120, + 287, + 168 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 47, + 120, + 287, + 168 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 47, + 120, + 287, + 168 + ], + "type": "text", + "content": " can be computed by:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 129, + 178, + 287, + 194 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 178, + 287, + 194 + ], + "spans": [ + { + "bbox": [ + 129, + 178, + 287, + 194 + ], + "type": "interline_equation", + "content": "S _ {i j} (I, B) = w _ {i j} ^ {T} b. \\tag {1}", + "image_path": "6a537394bddc939529e4f470c316b94119081e55c27797b0f52094b551f9d7ad.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 197, + 287, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 197, + 287, + 232 + ], + "spans": [ + { + "bbox": [ + 47, + 197, + 287, + 232 + ], + "type": "text", + "content": "In the case where " + }, + { + "bbox": [ + 47, + 197, + 287, + 232 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 47, + 197, + 287, + 232 + ], + "type": "text", + "content": " exemplars are given, we use Eq. 1 to calculate " + }, + { + "bbox": [ + 47, + 197, + 287, + 232 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 47, + 197, + 287, + 232 + ], + "type": "text", + "content": " similarity maps, and the final similarity map is the average of these " + }, + { + "bbox": [ + 47, + 197, + 287, + 232 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 47, + 197, + 287, + 232 + ], + "type": "text", + "content": " similarity maps." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 233, + 287, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 233, + 287, + 281 + ], + "spans": [ + { + "bbox": [ + 47, + 233, + 287, + 281 + ], + "type": "text", + "content": "We then concatenate the image feature map " + }, + { + "bbox": [ + 47, + 233, + 287, + 281 + ], + "type": "inline_equation", + "content": "F(I)" + }, + { + "bbox": [ + 47, + 233, + 287, + 281 + ], + "type": "text", + "content": " with the similarity map " + }, + { + "bbox": [ + 47, + 233, + 287, + 281 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 47, + 233, + 287, + 281 + ], + "type": "text", + "content": ", and input them into the counter " + }, + { + "bbox": [ + 47, + 233, + 287, + 281 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 47, + 233, + 287, + 281 + ], + "type": "text", + "content": " to predict a density map " + }, + { + "bbox": [ + 47, + 233, + 287, + 281 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 47, + 233, + 287, + 281 + ], + "type": "text", + "content": ". The final predicted count " + }, + { + "bbox": [ + 47, + 233, + 287, + 281 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 47, + 233, + 287, + 281 + ], + "type": "text", + "content": " is obtained by summing over the predicted density map " + }, + { + "bbox": [ + 47, + 233, + 287, + 281 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 47, + 233, + 287, + 281 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 288, + 287, + 313 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 288, + 287, + 313 + ], + "spans": [ + { + "bbox": [ + 132, + 288, + 287, + 313 + ], + "type": "interline_equation", + "content": "N = \\sum_ {i, j} D _ {(i, j)}, \\tag {2}", + "image_path": "6a08caf6d7c57700f611b9ee926a9430a9cdf0c5d4b5c3f0a4b54c502abe8a7e.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 316, + 287, + 363 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 316, + 287, + 363 + ], + "spans": [ + { + "bbox": [ + 47, + 316, + 287, + 363 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 316, + 287, + 363 + ], + "type": "inline_equation", + "content": "D_{(i,j)}" + }, + { + "bbox": [ + 47, + 316, + 287, + 363 + ], + "type": "text", + "content": " denotes the density value for pixel " + }, + { + "bbox": [ + 47, + 316, + 287, + 363 + ], + "type": "inline_equation", + "content": "(i,j)" + }, + { + "bbox": [ + 47, + 316, + 287, + 363 + ], + "type": "text", + "content": ". The supervision signal for training the counting model is the " + }, + { + "bbox": [ + 47, + 316, + 287, + 363 + ], + "type": "inline_equation", + "content": "L_{2}" + }, + { + "bbox": [ + 47, + 316, + 287, + 363 + ], + "type": "text", + "content": " loss between the predicted density map and the ground truth density map:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 102, + 374, + 287, + 388 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 374, + 287, + 388 + ], + "spans": [ + { + "bbox": [ + 102, + 374, + 287, + 388 + ], + "type": "interline_equation", + "content": "L _ {\\text {c o u n t}} = \\left\\| D (I, B) - D ^ {*} (I) \\right\\| _ {2} ^ {2}, \\tag {3}", + "image_path": "16077280c9e1b90f0f8c51c4a9863671bb9c863e17370128f4bc8b4663e4ed74.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 392, + 242, + 404 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 392, + 242, + 404 + ], + "spans": [ + { + "bbox": [ + 47, + 392, + 242, + 404 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 392, + 242, + 404 + ], + "type": "inline_equation", + "content": "D^{*}" + }, + { + "bbox": [ + 47, + 392, + 242, + 404 + ], + "type": "text", + "content": " denotes the ground truth density map." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 411, + 196, + 424 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 411, + 196, + 424 + ], + "spans": [ + { + "bbox": [ + 47, + 411, + 196, + 424 + ], + "type": "text", + "content": "3.2. Zero-shot Object Counting" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 429, + 287, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 429, + 287, + 514 + ], + "spans": [ + { + "bbox": [ + 47, + 429, + 287, + 514 + ], + "type": "text", + "content": "In this section, we describe how we count objects of any unseen category given only the class name without access to any exemplar. Our strategy is to select a few patches in the image that can be used as exemplars for the base counting model. These patches are selected such that: 1) they contain the objects that we are counting and 2) they benefit the counting model, i.e., lead to small counting errors." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 527, + 217, + 539 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 527, + 217, + 539 + ], + "spans": [ + { + "bbox": [ + 47, + 527, + 217, + 539 + ], + "type": "text", + "content": "3.2.1 Selecting Class-relevant Patches" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 545, + 287, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 545, + 287, + 605 + ], + "spans": [ + { + "bbox": [ + 47, + 545, + 287, + 605 + ], + "type": "text", + "content": "To select patches that contain the objects of interest, we first generate a class prototype based on the given class name using a conditional VAE model. Then we randomly sample a number of patches across the query image and select the class-relevant patches based on the generated prototype." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 606, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 606, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 606, + 287, + 713 + ], + "type": "text", + "content": "Class prototype generation. Inspired by previous zero-shot learning approaches [44, 45], we train a conditional VAE model to generate features for an arbitrary class based on the semantic embedding of the class. The semantic embedding is obtained from a pre-trained text-vision model [32] given the corresponding class name. Specifically, we train the VAE model to reconstruct features in a pre-trained ImageNet feature space. The VAE is composed of an Encoder " + }, + { + "bbox": [ + 47, + 606, + 287, + 713 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 47, + 606, + 287, + 713 + ], + "type": "text", + "content": ", which maps a visual feature " + }, + { + "bbox": [ + 47, + 606, + 287, + 713 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 47, + 606, + 287, + 713 + ], + "type": "text", + "content": " to a latent code " + }, + { + "bbox": [ + 47, + 606, + 287, + 713 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 47, + 606, + 287, + 713 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 72, + 545, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 72, + 545, + 118 + ], + "spans": [ + { + "bbox": [ + 305, + 72, + 545, + 118 + ], + "type": "text", + "content": "and a decoder " + }, + { + "bbox": [ + 305, + 72, + 545, + 118 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 305, + 72, + 545, + 118 + ], + "type": "text", + "content": " which reconstructs " + }, + { + "bbox": [ + 305, + 72, + 545, + 118 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 305, + 72, + 545, + 118 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 305, + 72, + 545, + 118 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 305, + 72, + 545, + 118 + ], + "type": "text", + "content": ". Both " + }, + { + "bbox": [ + 305, + 72, + 545, + 118 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 305, + 72, + 545, + 118 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 72, + 545, + 118 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 305, + 72, + 545, + 118 + ], + "type": "text", + "content": " are conditioned on the semantic embedding " + }, + { + "bbox": [ + 305, + 72, + 545, + 118 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 305, + 72, + 545, + 118 + ], + "type": "text", + "content": ". The loss function for training this VAE for an input feature " + }, + { + "bbox": [ + 305, + 72, + 545, + 118 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 305, + 72, + 545, + 118 + ], + "type": "text", + "content": " can be defined as:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 358, + 127, + 545, + 156 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 358, + 127, + 545, + 156 + ], + "spans": [ + { + "bbox": [ + 358, + 127, + 545, + 156 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} L _ {V} (x) = \\operatorname {K L} (q (z | x, a) | | p (z | a)) \\tag {4} \\\\ - \\mathrm {E} _ {q (z | x, a)} [ \\log p (x | z, a) ]. \\\\ \\end{array}", + "image_path": "1e70d6ae8e05160695b3fb3b475afb49291bf0aa83ebe86d38fbd9441c22924d.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 305, + 163, + 545, + 234 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 163, + 545, + 234 + ], + "spans": [ + { + "bbox": [ + 305, + 163, + 545, + 234 + ], + "type": "text", + "content": "The first term is the Kullback-Leibler divergence between the VAE posterior " + }, + { + "bbox": [ + 305, + 163, + 545, + 234 + ], + "type": "inline_equation", + "content": "q(z|x,a)" + }, + { + "bbox": [ + 305, + 163, + 545, + 234 + ], + "type": "text", + "content": " and a prior distribution " + }, + { + "bbox": [ + 305, + 163, + 545, + 234 + ], + "type": "inline_equation", + "content": "p(z|a)" + }, + { + "bbox": [ + 305, + 163, + 545, + 234 + ], + "type": "text", + "content": ". The second term is the decoder's reconstruction error. " + }, + { + "bbox": [ + 305, + 163, + 545, + 234 + ], + "type": "inline_equation", + "content": "q(z|x,a)" + }, + { + "bbox": [ + 305, + 163, + 545, + 234 + ], + "type": "text", + "content": " is modeled as " + }, + { + "bbox": [ + 305, + 163, + 545, + 234 + ], + "type": "inline_equation", + "content": "E(x,a)" + }, + { + "bbox": [ + 305, + 163, + 545, + 234 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 163, + 545, + 234 + ], + "type": "inline_equation", + "content": "p(x|z,a)" + }, + { + "bbox": [ + 305, + 163, + 545, + 234 + ], + "type": "text", + "content": " is equal to " + }, + { + "bbox": [ + 305, + 163, + 545, + 234 + ], + "type": "inline_equation", + "content": "G(z,a)" + }, + { + "bbox": [ + 305, + 163, + 545, + 234 + ], + "type": "text", + "content": ". The prior distribution is assumed to be " + }, + { + "bbox": [ + 305, + 163, + 545, + 234 + ], + "type": "inline_equation", + "content": "\\mathcal{N}(0,I)" + }, + { + "bbox": [ + 305, + 163, + 545, + 234 + ], + "type": "text", + "content": " for all classes." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 305, + 236, + 545, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 236, + 545, + 295 + ], + "spans": [ + { + "bbox": [ + 305, + 236, + 545, + 295 + ], + "type": "text", + "content": "We can use the trained VAE to generate the class prototype for an arbitrary target class for counting. Specifically, given the target class name " + }, + { + "bbox": [ + 305, + 236, + 545, + 295 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 305, + 236, + 545, + 295 + ], + "type": "text", + "content": ", we first generate a set of features by inputting the respective semantic vector " + }, + { + "bbox": [ + 305, + 236, + 545, + 295 + ], + "type": "inline_equation", + "content": "a^y" + }, + { + "bbox": [ + 305, + 236, + 545, + 295 + ], + "type": "text", + "content": " and a noise vector " + }, + { + "bbox": [ + 305, + 236, + 545, + 295 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 305, + 236, + 545, + 295 + ], + "type": "text", + "content": " to the decoder " + }, + { + "bbox": [ + 305, + 236, + 545, + 295 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 305, + 236, + 545, + 295 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 348, + 305, + 545, + 319 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 348, + 305, + 545, + 319 + ], + "spans": [ + { + "bbox": [ + 348, + 305, + 545, + 319 + ], + "type": "interline_equation", + "content": "\\mathbb {G} ^ {y} = \\{\\hat {x} | \\hat {x} = G (z, y), z \\sim \\mathcal {N} (0, I) \\}. \\tag {5}", + "image_path": "62a4b0bfc85b093e7867d4e3c09fd4c7e78d8ea3c021f00de41a7b670def8a8b.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 305, + 327, + 545, + 351 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 327, + 545, + 351 + ], + "spans": [ + { + "bbox": [ + 305, + 327, + 545, + 351 + ], + "type": "text", + "content": "The class prototype " + }, + { + "bbox": [ + 305, + 327, + 545, + 351 + ], + "type": "inline_equation", + "content": "\\mathfrak{p}^y" + }, + { + "bbox": [ + 305, + 327, + 545, + 351 + ], + "type": "text", + "content": " is computed by taking the mean of all the features generated by VAE:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 381, + 360, + 545, + 385 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 381, + 360, + 545, + 385 + ], + "spans": [ + { + "bbox": [ + 381, + 360, + 545, + 385 + ], + "type": "interline_equation", + "content": "\\mathrm {p} ^ {y} = \\frac {1}{| \\mathbb {G} ^ {y} |} \\sum_ {\\hat {x} \\in \\mathbb {G} ^ {y}} \\hat {x} \\tag {6}", + "image_path": "30632571ecc144d0f7b54d043c8e3aeb6f708b0c947ce35b1150899fc6d97efb.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 305, + 392, + 545, + 585 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 392, + 545, + 585 + ], + "spans": [ + { + "bbox": [ + 305, + 392, + 545, + 585 + ], + "type": "text", + "content": "Class-relevant patch selection. The generated class prototype can be considered as a class center representing the distribution of features of the corresponding class in the embedding space. Using the class prototype, we can select the class-relevant patches across the query image. Specifically, we first randomly sample " + }, + { + "bbox": [ + 305, + 392, + 545, + 585 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 305, + 392, + 545, + 585 + ], + "type": "text", + "content": " patches of various sizes " + }, + { + "bbox": [ + 305, + 392, + 545, + 585 + ], + "type": "inline_equation", + "content": "\\{b_1, b_2, \\dots, b_m\\}" + }, + { + "bbox": [ + 305, + 392, + 545, + 585 + ], + "type": "text", + "content": " across the query image and extract their corresponding ImageNet features " + }, + { + "bbox": [ + 305, + 392, + 545, + 585 + ], + "type": "inline_equation", + "content": "\\{f_1, f_2, \\dots, f_m\\}" + }, + { + "bbox": [ + 305, + 392, + 545, + 585 + ], + "type": "text", + "content": ". To select the class-relevant patches, we calculate the " + }, + { + "bbox": [ + 305, + 392, + 545, + 585 + ], + "type": "inline_equation", + "content": "L_2" + }, + { + "bbox": [ + 305, + 392, + 545, + 585 + ], + "type": "text", + "content": " distance between the class prototype and the patch embedding, namely " + }, + { + "bbox": [ + 305, + 392, + 545, + 585 + ], + "type": "inline_equation", + "content": "d_i = \\| f_i - \\mathrm{p}^y\\|_2" + }, + { + "bbox": [ + 305, + 392, + 545, + 585 + ], + "type": "text", + "content": ". Then we select the patches whose embeddings are the " + }, + { + "bbox": [ + 305, + 392, + 545, + 585 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 305, + 392, + 545, + 585 + ], + "type": "text", + "content": "-nearest neighbors of the class prototype as the class-relevant patches. Since the ImageNet feature space is highly discriminative, i.e., features close to each other typically belong to the same class, the selected patches are likely to contain the objects of the target class." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 306, + 599, + 484, + 611 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 599, + 484, + 611 + ], + "spans": [ + { + "bbox": [ + 306, + 599, + 484, + 611 + ], + "type": "text", + "content": "3.2.2 Selecting Exemplars for Counting" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 305, + 617, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 617, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 617, + 545, + 713 + ], + "type": "text", + "content": "Given a set of class-relevant patches and a pre-trained exemplar-based object counter, we aim to select a few exemplars from these patches that are optimal for counting. To do so, we introduce an error prediction network that predicts the counting error of an arbitrary patch when the patch is used as the exemplar. The counting error is calculated from the pre-trained counting model. Specifically, to train this error predictor, given a query image " + }, + { + "bbox": [ + 305, + 617, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\bar{I}" + }, + { + "bbox": [ + 305, + 617, + 545, + 713 + ], + "type": "text", + "content": " and an arbitrary patch" + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "15551" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "inline_equation", + "content": "\\bar{B}" + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "text", + "content": " cropped from " + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "inline_equation", + "content": "\\bar{I}" + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "text", + "content": ", we first use the base counting model to get the image feature map " + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "inline_equation", + "content": "F(\\bar{I})" + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "text", + "content": ", similarity map " + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "inline_equation", + "content": "\\bar{S}" + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "text", + "content": ", and the final predicted density map " + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "inline_equation", + "content": "\\bar{D}" + }, + { + "bbox": [ + 47, + 72, + 287, + 120 + ], + "type": "text", + "content": ". The counting error of the base counting model can be written as:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 118, + 129, + 287, + 156 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 129, + 287, + 156 + ], + "spans": [ + { + "bbox": [ + 118, + 129, + 287, + 156 + ], + "type": "interline_equation", + "content": "\\epsilon = \\left| \\sum_ {i, j} \\bar {D} _ {(i, j)} - \\bar {N} ^ {*} \\right|, \\tag {7}", + "image_path": "8e607ba4ac822e6db94a329fed21f1b0dcf242c2a91cd86de87f830115c5bf93.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 159, + 287, + 208 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 159, + 287, + 208 + ], + "spans": [ + { + "bbox": [ + 47, + 159, + 287, + 208 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 159, + 287, + 208 + ], + "type": "inline_equation", + "content": "\\bar{N}^*" + }, + { + "bbox": [ + 47, + 159, + 287, + 208 + ], + "type": "text", + "content": " denotes the ground truth object count in image " + }, + { + "bbox": [ + 47, + 159, + 287, + 208 + ], + "type": "inline_equation", + "content": "\\bar{I}" + }, + { + "bbox": [ + 47, + 159, + 287, + 208 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 47, + 159, + 287, + 208 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 47, + 159, + 287, + 208 + ], + "type": "text", + "content": " can be used to measure the goodness of " + }, + { + "bbox": [ + 47, + 159, + 287, + 208 + ], + "type": "inline_equation", + "content": "\\bar{B}" + }, + { + "bbox": [ + 47, + 159, + 287, + 208 + ], + "type": "text", + "content": " as an exemplar for " + }, + { + "bbox": [ + 47, + 159, + 287, + 208 + ], + "type": "inline_equation", + "content": "\\bar{I}" + }, + { + "bbox": [ + 47, + 159, + 287, + 208 + ], + "type": "text", + "content": ", i.e., a small " + }, + { + "bbox": [ + 47, + 159, + 287, + 208 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 47, + 159, + 287, + 208 + ], + "type": "text", + "content": " indicates that " + }, + { + "bbox": [ + 47, + 159, + 287, + 208 + ], + "type": "inline_equation", + "content": "\\bar{B}" + }, + { + "bbox": [ + 47, + 159, + 287, + 208 + ], + "type": "text", + "content": " is a suitable exemplar for counting and vice versa." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 209, + 287, + 292 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 209, + 287, + 292 + ], + "spans": [ + { + "bbox": [ + 46, + 209, + 287, + 292 + ], + "type": "text", + "content": "The error predictor " + }, + { + "bbox": [ + 46, + 209, + 287, + 292 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 46, + 209, + 287, + 292 + ], + "type": "text", + "content": " is trained to regress the counting error produced by the base counting model. The input of " + }, + { + "bbox": [ + 46, + 209, + 287, + 292 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 46, + 209, + 287, + 292 + ], + "type": "text", + "content": " is the channel-wise concatenation of the image feature map " + }, + { + "bbox": [ + 46, + 209, + 287, + 292 + ], + "type": "inline_equation", + "content": "F(\\bar{I})" + }, + { + "bbox": [ + 46, + 209, + 287, + 292 + ], + "type": "text", + "content": " and the similarity map " + }, + { + "bbox": [ + 46, + 209, + 287, + 292 + ], + "type": "inline_equation", + "content": "\\tilde{S}" + }, + { + "bbox": [ + 46, + 209, + 287, + 292 + ], + "type": "text", + "content": ". The training objective is the minimization of the mean squared error between the output of the predictor " + }, + { + "bbox": [ + 46, + 209, + 287, + 292 + ], + "type": "inline_equation", + "content": "R(F(\\bar{I}),\\bar{S})" + }, + { + "bbox": [ + 46, + 209, + 287, + 292 + ], + "type": "text", + "content": " and the actual counting error produced by the base counting model " + }, + { + "bbox": [ + 46, + 209, + 287, + 292 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 46, + 209, + 287, + 292 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 293, + 287, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 293, + 287, + 388 + ], + "spans": [ + { + "bbox": [ + 46, + 293, + 287, + 388 + ], + "type": "text", + "content": "After the error predictor is trained, we can use it to select the optimal patches for counting. The candidates for selection here are the class-relevant patches selected by the class prototype in the previous step. For each candidate patch, we use the trained error predictor to infer the counting error when it is being used as the exemplar. The final selected patches for counting are the patches that yield the top-" + }, + { + "bbox": [ + 46, + 293, + 287, + 388 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 46, + 293, + 287, + 388 + ], + "type": "text", + "content": " smallest counting errors." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 404, + 253, + 417 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 404, + 253, + 417 + ], + "spans": [ + { + "bbox": [ + 47, + 404, + 253, + 417 + ], + "type": "text", + "content": "3.2.3 Using the Selected Patches as Exemplars" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 423, + 287, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 423, + 287, + 520 + ], + "spans": [ + { + "bbox": [ + 46, + 423, + 287, + 520 + ], + "type": "text", + "content": "Using the error predictor, we predict the error for each candidate patch and select the patches that lead to the smallest counting errors. The selected patches can then be used as exemplars for the base counting model to get the density map and the final count. We also conduct experiments to show that these selected patches can serve as exemplars for other exemplar-based counting models to achieve exemplar-free class-agnostic counting." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 531, + 128, + 544 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 531, + 128, + 544 + ], + "spans": [ + { + "bbox": [ + 47, + 531, + 128, + 544 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 551, + 180, + 563 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 551, + 180, + 563 + ], + "spans": [ + { + "bbox": [ + 47, + 551, + 180, + 563 + ], + "type": "text", + "content": "4.1. Implementation Details" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 570, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 570, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 570, + 287, + 713 + ], + "type": "text", + "content": "Network architecture For the base counting model, we use ResNet-50 as the backbone of the feature extractor, initialized with the weights of a pre-trained ImageNet model. The backbone outputs feature maps of 1024 channels. For each query image, the number of channels is reduced to 256 using an " + }, + { + "bbox": [ + 46, + 570, + 287, + 713 + ], + "type": "inline_equation", + "content": "1 \\times 1" + }, + { + "bbox": [ + 46, + 570, + 287, + 713 + ], + "type": "text", + "content": " convolution. For each exemplar, the feature maps are first processed with global average pooling and then linearly mapped to obtain a 256-d feature vector. The counter consists of 5 convolutional and bilinear upsampling layers to regress a density map of the same size as the query image. For the feature generation model, both the encoder and the decoder are two-layer fully-connected" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "text", + "content": "(FC) networks with 4096 hidden units. LeakyReLU and ReLU are the non-linear activation functions in the hidden and output layers, respectively. The dimensions of the latent space and the semantic embeddings are both set to be 512. For the error predictor, 5 convolutional and bilinear upsampling layers are followed by a linear layer to output the counting error." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 156, + 545, + 263 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 156, + 545, + 263 + ], + "spans": [ + { + "bbox": [ + 304, + 156, + 545, + 263 + ], + "type": "text", + "content": "Dataset We use the FSC-147 dataset [34] to train the base counting model and the error predictor. FSC-147 is the first large-scale dataset for class-agnostic counting. It includes 6135 images from 147 categories varying from animals, kitchen utensils, to vehicles. The categories in the training, validation, and test sets do not overlap. The feature generator is trained on the MS-COCO detection dataset. Note that the previous exemplar-free method [33] also uses MS-COCO to pre-train their counter." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 264, + 545, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 264, + 545, + 456 + ], + "spans": [ + { + "bbox": [ + 304, + 264, + 545, + 456 + ], + "type": "text", + "content": "Training details Both the base counting model and the error predictor are trained using the AdamW optimizer with a fixed learning rate of " + }, + { + "bbox": [ + 304, + 264, + 545, + 456 + ], + "type": "inline_equation", + "content": "10^{-5}" + }, + { + "bbox": [ + 304, + 264, + 545, + 456 + ], + "type": "text", + "content": ". The base counting model is trained for 300 epochs with a batch size of 8. We resize the input query image to a fixed height of 384, and the width is adjusted accordingly to preserve the aspect ratio of the original image. Exemplars are resized to " + }, + { + "bbox": [ + 304, + 264, + 545, + 456 + ], + "type": "inline_equation", + "content": "128 \\times 128" + }, + { + "bbox": [ + 304, + 264, + 545, + 456 + ], + "type": "text", + "content": " before being input into the feature extractor. The feature generation model is trained using the Adam optimizer and the learning rate is set to be " + }, + { + "bbox": [ + 304, + 264, + 545, + 456 + ], + "type": "inline_equation", + "content": "10^{-4}" + }, + { + "bbox": [ + 304, + 264, + 545, + 456 + ], + "type": "text", + "content": ". The semantic embeddings are extracted from CLIP [32]. To select the class-relevant patches, we randomly sample 450 boxes of various sizes across the input query image and select 10 patches whose embeddings are the 10-nearest neighbors of the class prototype. The final selected patches are those that yield the top-3 smallest counting errors predicted by the error predictor." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 463, + 419, + 475 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 463, + 419, + 475 + ], + "spans": [ + { + "bbox": [ + 306, + 463, + 419, + 475 + ], + "type": "text", + "content": "4.2. Evaluation Metrics" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 481, + 545, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 481, + 545, + 604 + ], + "spans": [ + { + "bbox": [ + 304, + 481, + 545, + 604 + ], + "type": "text", + "content": "We use Mean Average Error (MAE) and Root Mean Squared Error (RMSE) to measure the performance of different object counters. Besides, we follow [31] to report the Normalized Relative Error (NAE) and Squared Relative Error (SRE). In particular, MAE = " + }, + { + "bbox": [ + 304, + 481, + 545, + 604 + ], + "type": "inline_equation", + "content": "\\frac{1}{n}\\sum_{i=1}^{n}|y_i - \\hat{y}_i|" + }, + { + "bbox": [ + 304, + 481, + 545, + 604 + ], + "type": "text", + "content": "; RMSE = " + }, + { + "bbox": [ + 304, + 481, + 545, + 604 + ], + "type": "inline_equation", + "content": "\\sqrt{\\frac{1}{n}\\sum_{i=1}^{n}(y_i - \\hat{y}_i)^2}" + }, + { + "bbox": [ + 304, + 481, + 545, + 604 + ], + "type": "text", + "content": "; NAE = " + }, + { + "bbox": [ + 304, + 481, + 545, + 604 + ], + "type": "inline_equation", + "content": "\\frac{1}{n}\\sum_{i=1}^{n}\\frac{|y_i - \\hat{y}_i|}{y_i}" + }, + { + "bbox": [ + 304, + 481, + 545, + 604 + ], + "type": "text", + "content": "; SRE = " + }, + { + "bbox": [ + 304, + 481, + 545, + 604 + ], + "type": "inline_equation", + "content": "\\sqrt{\\frac{1}{n}\\sum_{i=1}^{n}\\frac{(y_i - \\hat{y}_i)^2}{y_i}}" + }, + { + "bbox": [ + 304, + 481, + 545, + 604 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 304, + 481, + 545, + 604 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 304, + 481, + 545, + 604 + ], + "type": "text", + "content": " is the number of test images, and " + }, + { + "bbox": [ + 304, + 481, + 545, + 604 + ], + "type": "inline_equation", + "content": "y_i" + }, + { + "bbox": [ + 304, + 481, + 545, + 604 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 481, + 545, + 604 + ], + "type": "inline_equation", + "content": "\\hat{y}_i" + }, + { + "bbox": [ + 304, + 481, + 545, + 604 + ], + "type": "text", + "content": " are the ground truth and the predicted number of objects for image " + }, + { + "bbox": [ + 304, + 481, + 545, + 604 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 304, + 481, + 545, + 604 + ], + "type": "text", + "content": " respectively." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 611, + 425, + 624 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 611, + 425, + 624 + ], + "spans": [ + { + "bbox": [ + 306, + 611, + 425, + 624 + ], + "type": "text", + "content": "4.3. Comparing Methods" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 630, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 630, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 630, + 545, + 713 + ], + "type": "text", + "content": "We compare our method with the previous works on class-agnostic counting. RepRPN-Counter [33] is the only previous class-agnostic counting method that does not require human-annotated exemplars as input. In order to make other exemplar based class-agnostic methods including GMN (General Matching Network [28]), FamNet (Few-shot adaptation and matching Network [34]) and BMNet" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "15552" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 117, + 70, + 478, + 234 + ], + "blocks": [ + { + "bbox": [ + 117, + 70, + 478, + 234 + ], + "lines": [ + { + "bbox": [ + 117, + 70, + 478, + 234 + ], + "spans": [ + { + "bbox": [ + 117, + 70, + 478, + 234 + ], + "type": "table", + "html": "
MethodExemplarsVal SetTest Set
MAERMSENAESREMAERMSENAESRE
GMN [28]GT29.6689.81--26.52124.57--
RPN40.96108.47--39.72142.81--
FamNet+ [34]GT23.7569.070.524.2522.0899.540.446.45
RPN42.85121.590.756.9442.70146.080.747.14
BMNet [38]GT19.0667.950.264.3916.71103.310.263.32
RPN37.26108.540.425.4337.22143.130.415.31
BMNet+ [38]GT15.7458.530.276.5714.6291.830.252.74
RPN35.15106.070.415.2834.52132.640.395.26
RepRPN-Counter [33]-30.4098.73--27.45129.69--
Ours (Base)GT18.5561.120.303.1820.68109.140.367.63
RPN32.1999.210.384.8029.25130.650.354.35
Patch-Selection26.9388.630.364.2622.09115.170.343.74
", + "image_path": "a1d0ad93004fb7f7365ac0106411ed0f96180ca75cccb8b3964276fcb238c793.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 237, + 547, + 270 + ], + "lines": [ + { + "bbox": [ + 46, + 237, + 547, + 270 + ], + "spans": [ + { + "bbox": [ + 46, + 237, + 547, + 270 + ], + "type": "text", + "content": "Table 1. Quantitative comparisons on the FSC-147 dataset. \"GT\" denotes using human-annotated boxes as exemplars. \"RPN\" denotes using the top-3 RPN proposals with the highest objectness scores as exemplars. \"Patch-Selection\" denotes using our selected patches as exemplars." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 46, + 283, + 289, + 379 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 283, + 289, + 379 + ], + "spans": [ + { + "bbox": [ + 46, + 283, + 289, + 379 + ], + "type": "text", + "content": "(Bilinear Matching Network [38]) work in the exemplar-free setup, we replace the human-provided exemplars with the exemplars generated by a pre-trained object detector. Specifically, we use the RPN of Faster RCNN pre-trained on MS-COCO dataset and select the top-3 proposals with the highest objectness score as the exemplars. We also include the performance of these methods using human-annotated exemplars for a complete comparison." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 392, + 105, + 403 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 392, + 105, + 403 + ], + "spans": [ + { + "bbox": [ + 47, + 392, + 105, + 403 + ], + "type": "text", + "content": "4.4. Results" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 412, + 287, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 412, + 287, + 604 + ], + "spans": [ + { + "bbox": [ + 46, + 412, + 287, + 604 + ], + "type": "text", + "content": "Quantitative results. As shown in Table 1, our proposed method outperforms the previous exemplar-free counting method [33] by a large margin, resulting in a reduction of 10.10 w.r.t. the validation RMSE and 14.52 w.r.t. the test RMSE. We also notice that the performance of all exemplar-based counting methods drops significantly when replacing human-annotated exemplars with RPN generated proposals. The state-of-the-art exemplar-based method BMNet+ [38], for example, shows an 19.90 error increase w.r.t. the test MAE and a 40.81 increase w.r.t. the test RMSE. In comparison, the performance gap is much smaller when using our selected patches as exemplars, as reflected by a 1.41 increase w.r.t. the test MAE and a 6.03 increase w.r.t. the test RMSE. Noticeably, the NAE and the SRE on the test set are even reduced when using our selected patches compared with the human-annotated exemplars." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 605, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 605, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 605, + 288, + 715 + ], + "type": "text", + "content": "Qualitative analysis. In Figure 4, we present a few input images, the image patches selected by our method, and the corresponding density maps. Our method effectively identifies the patches that are suitable for object counting. The density maps produced by our selected patches are meaningful and close to the density maps produced by human-annotated patches. The counting model with random image patches as exemplars, in comparison, fails to output meaningful density maps and infers incorrect object counts." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 306, + 281, + 367, + 295 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 281, + 367, + 295 + ], + "spans": [ + { + "bbox": [ + 306, + 281, + 367, + 295 + ], + "type": "text", + "content": "5. Analyses" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 302, + 406, + 315 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 302, + 406, + 315 + ], + "spans": [ + { + "bbox": [ + 306, + 302, + 406, + 315 + ], + "type": "text", + "content": "5.1. Ablation Studies" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 323, + 545, + 501 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 323, + 545, + 501 + ], + "spans": [ + { + "bbox": [ + 304, + 323, + 545, + 501 + ], + "type": "text", + "content": "Our proposed patch selection method consists of two steps: the selection of class-relevant patches via a generated class prototype and the selection of the optimal patches via an error predictor. We analyze the contribution of each step quantitatively and qualitatively. Quantitative results are in Table 2. We first evaluate the performance of our baseline, i.e. using 3 randomly sampled patches as exemplars without any selection step. As shown in Table 2, using the class prototype to select class-relevant patches reduces the error rate by 7.19 and 6.07 on the validation and test set of MAE, respectively. Applying the error predictor can improve the baseline performance by 7.22 on the validation MAE and 7.57 on the test MAE. Finally, applying the two components together further boosts performance, achieving 26.93 on the validation MAE and 22.09 on the test MAE." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 503, + 545, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 503, + 545, + 635 + ], + "spans": [ + { + "bbox": [ + 304, + 503, + 545, + 635 + ], + "type": "text", + "content": "We provide further qualitative analysis by visualizing the selected patches. As shown in Figure 5, for each input query image, we show 10 class-relevant patches selected using our generated prototype, ranked by their predicted counting error (from low to high). All the 10 selected class-relevant patches exhibit some class specific features. However, not all these patches are suitable to be used as counting exemplars, i.e., some patches only contain parts of the object, and some patches contain some background. By further applying our proposed error predictor, we can identify the most suitable patches with the smallest predicted counting errors." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 646, + 533, + 658 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 646, + 533, + 658 + ], + "spans": [ + { + "bbox": [ + 305, + 646, + 533, + 658 + ], + "type": "text", + "content": "5.2. Generalization to Exemplar-based Methods" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 666, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 666, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 666, + 545, + 713 + ], + "type": "text", + "content": "Our proposed method can be considered as a general patch selection method that is applicable to other visual counters to achieve exemplar-free counting. To verify that, we use our selected patches as the exemplars for three" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "15553" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 109, + 72, + 175, + 133 + ], + "blocks": [ + { + "bbox": [ + 109, + 72, + 175, + 133 + ], + "lines": [ + { + "bbox": [ + 109, + 72, + 175, + 133 + ], + "spans": [ + { + "bbox": [ + 109, + 72, + 175, + 133 + ], + "type": "image", + "image_path": "fe4a6ef528c84641fb0e5dd34b44a7a8276d88d070b0e4c619d7316609b7c106.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 193, + 72, + 280, + 132 + ], + "blocks": [ + { + "bbox": [ + 193, + 72, + 280, + 132 + ], + "lines": [ + { + "bbox": [ + 193, + 72, + 280, + 132 + ], + "spans": [ + { + "bbox": [ + 193, + 72, + 280, + 132 + ], + "type": "image", + "image_path": "8ca765e47c0c7daa9642f1120b10640503f129a405c3ea0307f1142e0fc7fcdb.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 291, + 72, + 377, + 133 + ], + "blocks": [ + { + "bbox": [ + 291, + 72, + 377, + 133 + ], + "lines": [ + { + "bbox": [ + 291, + 72, + 377, + 133 + ], + "spans": [ + { + "bbox": [ + 291, + 72, + 377, + 133 + ], + "type": "image", + "image_path": "d1909e34066588d405bc4d78523cb7389e762ed28ac84e5b336bbde8d1ade92a.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 388, + 72, + 474, + 133 + ], + "blocks": [ + { + "bbox": [ + 388, + 72, + 474, + 133 + ], + "lines": [ + { + "bbox": [ + 388, + 72, + 474, + 133 + ], + "spans": [ + { + "bbox": [ + 388, + 72, + 474, + 133 + ], + "type": "image", + "image_path": "94bdd630442b3b673505ae494ab3ffa239ebd0023791386d504855f8c24bc4f2.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 109, + 137, + 175, + 201 + ], + "blocks": [ + { + "bbox": [ + 109, + 137, + 175, + 201 + ], + "lines": [ + { + "bbox": [ + 109, + 137, + 175, + 201 + ], + "spans": [ + { + "bbox": [ + 109, + 137, + 175, + 201 + ], + "type": "image", + "image_path": "eb3009146f36d56ad021f85e0b4771c8f06a5c28b738f5771aa15c3230cbd957.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 193, + 137, + 280, + 201 + ], + "blocks": [ + { + "bbox": [ + 193, + 137, + 280, + 201 + ], + "lines": [ + { + "bbox": [ + 193, + 137, + 280, + 201 + ], + "spans": [ + { + "bbox": [ + 193, + 137, + 280, + 201 + ], + "type": "image", + "image_path": "f374c2dfeec04c74b949d57f39d8a02f9f0835f1b3f4d6a9028d61dca898e190.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 291, + 137, + 377, + 200 + ], + "blocks": [ + { + "bbox": [ + 291, + 137, + 377, + 200 + ], + "lines": [ + { + "bbox": [ + 291, + 137, + 377, + 200 + ], + "spans": [ + { + "bbox": [ + 291, + 137, + 377, + 200 + ], + "type": "image", + "image_path": "8b309d9a73c65e4e982e77d9e88ec2e160d479f751387141841d308fcf13b8b8.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 388, + 137, + 473, + 200 + ], + "blocks": [ + { + "bbox": [ + 388, + 137, + 473, + 200 + ], + "lines": [ + { + "bbox": [ + 388, + 137, + 473, + 200 + ], + "spans": [ + { + "bbox": [ + 388, + 137, + 473, + 200 + ], + "type": "image", + "image_path": "29b47f7e9b6a9d5ea8fdf3d200a38bfe0d9b75ca54f0b71e926e53ca6fb5f1d6.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 109, + 208, + 175, + 258 + ], + "blocks": [ + { + "bbox": [ + 109, + 208, + 175, + 258 + ], + "lines": [ + { + "bbox": [ + 109, + 208, + 175, + 258 + ], + "spans": [ + { + "bbox": [ + 109, + 208, + 175, + 258 + ], + "type": "image", + "image_path": "4bc34c2ca1145c11f19f0a3e9fc03a84e2f29e8d5859ac7cd3b0be5486b693f2.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 194, + 205, + 280, + 263 + ], + "blocks": [ + { + "bbox": [ + 194, + 205, + 280, + 263 + ], + "lines": [ + { + "bbox": [ + 194, + 205, + 280, + 263 + ], + "spans": [ + { + "bbox": [ + 194, + 205, + 280, + 263 + ], + "type": "image", + "image_path": "a44cff7de693c74ae8f4bfd3d996ad58aa1ae282811b5b9f872751ffde832b3b.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 291, + 205, + 377, + 263 + ], + "blocks": [ + { + "bbox": [ + 291, + 205, + 377, + 263 + ], + "lines": [ + { + "bbox": [ + 291, + 205, + 377, + 263 + ], + "spans": [ + { + "bbox": [ + 291, + 205, + 377, + 263 + ], + "type": "image", + "image_path": "531c80e23d0421edb6b176462146e43fb76a7e403dba021f57de93950817aaf1.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 388, + 205, + 473, + 263 + ], + "blocks": [ + { + "bbox": [ + 388, + 205, + 473, + 263 + ], + "lines": [ + { + "bbox": [ + 388, + 205, + 473, + 263 + ], + "spans": [ + { + "bbox": [ + 388, + 205, + 473, + 263 + ], + "type": "image", + "image_path": "6b2b203afd898aae2fa9579837ebd56b1f319d0c70f9cab13d61a3f08683ae32.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 109, + 267, + 173, + 325 + ], + "blocks": [ + { + "bbox": [ + 109, + 267, + 173, + 325 + ], + "lines": [ + { + "bbox": [ + 109, + 267, + 173, + 325 + ], + "spans": [ + { + "bbox": [ + 109, + 267, + 173, + 325 + ], + "type": "image", + "image_path": "330cdba23ac7d59f52bf378aadf8438e3bcadeb2a7bd0b1e8c7b423755924c50.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 340, + 547, + 374 + ], + "lines": [ + { + "bbox": [ + 46, + 340, + 547, + 374 + ], + "spans": [ + { + "bbox": [ + 46, + 340, + 547, + 374 + ], + "type": "text", + "content": "Figure 4. Qualitative results on the FSC-147 dataset. We show the counting exemplars and the corresponding density maps of ground truth boxes, randomly selected patches, and our selected patches respectively. Predicted counting results are shown at the top-right corner. Our method accurately identifies suitable patches for counting and the predicted density maps are close to the ground truth density maps." + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 194, + 266, + 280, + 324 + ], + "blocks": [ + { + "bbox": [ + 194, + 266, + 280, + 324 + ], + "lines": [ + { + "bbox": [ + 194, + 266, + 280, + 324 + ], + "spans": [ + { + "bbox": [ + 194, + 266, + 280, + 324 + ], + "type": "image", + "image_path": "69dbf44a6ceaf9a2dcb2374ab289efdabb0dfd9300461637ca541da95e0d99b0.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 219, + 327, + 269, + 337 + ], + "lines": [ + { + "bbox": [ + 219, + 327, + 269, + 337 + ], + "spans": [ + { + "bbox": [ + 219, + 327, + 269, + 337 + ], + "type": "text", + "content": "Ground Truth" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 291, + 266, + 377, + 324 + ], + "blocks": [ + { + "bbox": [ + 291, + 266, + 377, + 324 + ], + "lines": [ + { + "bbox": [ + 291, + 266, + 377, + 324 + ], + "spans": [ + { + "bbox": [ + 291, + 266, + 377, + 324 + ], + "type": "image", + "image_path": "9158edf2d16c61e5c3520586e1b488792e3cffbdd9fdd417458ba156bad48009.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 388, + 266, + 473, + 324 + ], + "blocks": [ + { + "bbox": [ + 388, + 266, + 473, + 324 + ], + "lines": [ + { + "bbox": [ + 388, + 266, + 473, + 324 + ], + "spans": [ + { + "bbox": [ + 388, + 266, + 473, + 324 + ], + "type": "image", + "image_path": "289d8be36828979647e6e733900eba80a45585256e711c64af9db544502c13a4.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 422, + 327, + 441, + 337 + ], + "lines": [ + { + "bbox": [ + 422, + 327, + 441, + 337 + ], + "spans": [ + { + "bbox": [ + 422, + 327, + 441, + 337 + ], + "type": "text", + "content": "Ours" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 103, + 381, + 491, + 529 + ], + "blocks": [ + { + "bbox": [ + 103, + 381, + 491, + 529 + ], + "lines": [ + { + "bbox": [ + 103, + 381, + 491, + 529 + ], + "spans": [ + { + "bbox": [ + 103, + 381, + 491, + 529 + ], + "type": "image", + "image_path": "2bf6900cbb2c147be150f90b4bc4c32a82b1862130611c1fbf3e2a15615a7e57.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 533, + 546, + 556 + ], + "lines": [ + { + "bbox": [ + 46, + 533, + 546, + 556 + ], + "spans": [ + { + "bbox": [ + 46, + 533, + 546, + 556 + ], + "type": "text", + "content": "Figure 5. Qualitative ablation analysis. All the 10 selected class-relevant patches exhibit some class-specific attributes. They are ranked by the predicted counting errors and the final selected patches with the smallest errors are framed in green." + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 19 + }, + { + "type": "table", + "bbox": [ + 48, + 570, + 288, + 627 + ], + "blocks": [ + { + "bbox": [ + 48, + 570, + 288, + 627 + ], + "lines": [ + { + "bbox": [ + 48, + 570, + 288, + 627 + ], + "spans": [ + { + "bbox": [ + 48, + 570, + 288, + 627 + ], + "type": "table", + "html": "
PrototypePredictorVal SetTest Set
MAERMSENAESREMAERMSENAESRE
--35.20106.700.616.6831.37134.980.525.92
-28.0188.290.394.6625.30113.820.404.88
-27.9888.620.434.5923.80128.360.404.43
26.9388.630.364.2622.09115.170.343.74
", + "image_path": "c1827ed886ca7a02a6055278c94d46bb4b724acc6a035c02b093ab5e21bfc5d1.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "table_body" + } + ], + "index": 21 + }, + { + "bbox": [ + 46, + 635, + 288, + 681 + ], + "lines": [ + { + "bbox": [ + 46, + 635, + 288, + 681 + ], + "spans": [ + { + "bbox": [ + 46, + 635, + 288, + 681 + ], + "type": "text", + "content": "Table 2. Ablation study on each component's contribution to the final results. We show the effectiveness of the two steps of our framework: selecting class-relevant patches via a generated class prototype and selecting optimal patches via an error predictor." + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 304, + 571, + 547, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 571, + 547, + 704 + ], + "spans": [ + { + "bbox": [ + 304, + 571, + 547, + 704 + ], + "type": "text", + "content": "other different exemplar-based methods: FamNet [34], BMNet and BMNet+ [38]. Figure 6 (a) shows the results on the FSC-147 validation set. The baseline uses three randomly sampled patches as the exemplars for the pre-trained exemplar-based counter. By using the generated class prototype to select class-relevant patches, the error rate is reduced by 5.18, 8.59 and 5.60 on FamNet, BMNet and BMNet+, respectively. In addition, as the error predictor is additionally adopted, the error rate is further reduced by 1.76, 1.00 and 1.08 on FamNet, BMNet and BMNet+, respectively. Similarly, Figure 6 (b) shows the results on the FSC" + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "text", + "content": "15554" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "type": "text", + "content": "147 test set. Our method achieves consistent performance improvements for all three methods." + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 58, + 113, + 265, + 225 + ], + "blocks": [ + { + "bbox": [ + 58, + 113, + 265, + 225 + ], + "lines": [ + { + "bbox": [ + 58, + 113, + 265, + 225 + ], + "spans": [ + { + "bbox": [ + 58, + 113, + 265, + 225 + ], + "type": "image", + "image_path": "d2b04911282de7979d4bf2835d8b21d7e9590aa789438c8d8521920c00a25814.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 163, + 232, + 173, + 240 + ], + "lines": [ + { + "bbox": [ + 163, + 232, + 173, + 240 + ], + "spans": [ + { + "bbox": [ + 163, + 232, + 173, + 240 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 58, + 245, + 265, + 357 + ], + "blocks": [ + { + "bbox": [ + 58, + 245, + 265, + 357 + ], + "lines": [ + { + "bbox": [ + 58, + 245, + 265, + 357 + ], + "spans": [ + { + "bbox": [ + 58, + 245, + 265, + 357 + ], + "type": "image", + "image_path": "9871b996fafadca835a82c199766ca8c4307f6aff46fadae7410b9f81c06a48b.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 162, + 363, + 173, + 372 + ], + "lines": [ + { + "bbox": [ + 162, + 363, + 173, + 372 + ], + "spans": [ + { + "bbox": [ + 162, + 363, + 173, + 372 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 479, + 203, + 491 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 479, + 203, + 491 + ], + "spans": [ + { + "bbox": [ + 47, + 479, + 203, + 491 + ], + "type": "text", + "content": "5.3. Multi-class Object Counting" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 498, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 498, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 498, + 288, + 713 + ], + "type": "text", + "content": "Our method can count instances of a specific class given the class name, which is particularly useful when there are multiple classes in the same image. In this section, we show some visualization results in this multi-class scenario. As seen in Figure 7, our method selects patches according to the given class name and count instances from that specific class in the input image. Correspondingly, the heatmap highlights the image regions that are most relevant to the specified class. Here the heatmaps are obtained by correlating the exemplar feature vector with the image feature map in a pre-trained ImageNet feature space. Note that we mask out the image region where the activation value in the heatmap is below a threshold for counting purpose. We also show the patches selected using another exemplar-free counting method, RepRPN [33]. The class of RepRPN selected patches can not be explicitly specified. It simply selects patches from the class with the highest number of instances in the image according to the repetition score." + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 310, + 72, + 531, + 184 + ], + "blocks": [ + { + "bbox": [ + 310, + 72, + 531, + 184 + ], + "lines": [ + { + "bbox": [ + 310, + 72, + 531, + 184 + ], + "spans": [ + { + "bbox": [ + 310, + 72, + 531, + 184 + ], + "type": "image", + "image_path": "efee905dc8491de7e8ab72a66f303efe9a7bfd45def19d6566416ca8dafa8c6a.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 310, + 186, + 531, + 297 + ], + "blocks": [ + { + "bbox": [ + 310, + 186, + 531, + 297 + ], + "lines": [ + { + "bbox": [ + 310, + 186, + 531, + 297 + ], + "spans": [ + { + "bbox": [ + 310, + 186, + 531, + 297 + ], + "type": "image", + "image_path": "b2057ff667537b23fb5a12c10c5f78d9a862efe17544dad0c261cde14cdd70eb.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 310, + 301, + 531, + 411 + ], + "blocks": [ + { + "bbox": [ + 46, + 382, + 287, + 460 + ], + "lines": [ + { + "bbox": [ + 46, + 382, + 287, + 460 + ], + "spans": [ + { + "bbox": [ + 46, + 382, + 287, + 460 + ], + "type": "text", + "content": "Figure 6. Using our selected patches as exemplars for other exemplar-based class-agnostic counting methods (FamNet, BMNet and BMNet+) on FSC-147 dataset. Blue bars are the MAEs of using three randomly sampled patches. Orange bars are the MAEs of using the class prototype to select class-relevant patches as exemplars. Green bars are the MAEs of using the class prototype and error predictor to select optimal patches as exemplars." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 310, + 301, + 531, + 411 + ], + "lines": [ + { + "bbox": [ + 310, + 301, + 531, + 411 + ], + "spans": [ + { + "bbox": [ + 310, + 301, + 531, + 411 + ], + "type": "image", + "image_path": "a21535710190b515d0cc1b8add4a36c28d21c85ccb4ca6cf656c6e1b9f73716e.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 427, + 545, + 462 + ], + "lines": [ + { + "bbox": [ + 305, + 427, + 545, + 462 + ], + "spans": [ + { + "bbox": [ + 305, + 427, + 545, + 462 + ], + "type": "text", + "content": "Figure 7. Visualization results of our method in some multi-class examples. Our method selects patches according to the given class name and the corresponding heatmap highlights the relevant areas." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 481, + 378, + 493 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 481, + 378, + 493 + ], + "spans": [ + { + "bbox": [ + 306, + 481, + 378, + 493 + ], + "type": "text", + "content": "6. Conclusion" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 500, + 545, + 668 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 500, + 545, + 668 + ], + "spans": [ + { + "bbox": [ + 304, + 500, + 545, + 668 + ], + "type": "text", + "content": "In this paper, we proposed a new task, zero-shot object counting, to count instances of a specific class given only the class name without access to any exemplars. To address this, we developed a simple yet effective method that accurately localizes the optimal patches across the query image that can be used as counting exemplars. Specifically, we construct a class prototype in a pre-trained feature space and use the prototype to select patches that contain objects of interest; then we use an error predictor to select those patches with the smallest predicted errors as the final exemplars for counting. Extensive results demonstrate the effectiveness of our method. We also conduct experiments to show that our selected patches can be used for other exemplar-based counting methods to achieve exemplar-free counting." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 673, + 545, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 673, + 545, + 708 + ], + "spans": [ + { + "bbox": [ + 305, + 673, + 545, + 708 + ], + "type": "text", + "content": "Acknowledgements. This research was partially supported by NSF grants IIS-2123920 and IIS-2212046 and the NASA Biodiversity program (Award 80NSSC21K1027)." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "15555" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 92, + 287, + 712 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 53, + 92, + 287, + 125 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 92, + 287, + 125 + ], + "spans": [ + { + "bbox": [ + 53, + 92, + 287, + 125 + ], + "type": "text", + "content": "[1] Shahira Abousamra, Minh Hoai, Dimitris Samaras, and Chao Chen. Localization in the crowd with topological constraints. In AAAI, 2021. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 126, + 286, + 148 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 126, + 286, + 148 + ], + "spans": [ + { + "bbox": [ + 53, + 126, + 286, + 148 + ], + "type": "text", + "content": "[2] Martin Arjovsky, Soumith Chintala, and Léon Bottou. Wasserstein gan. In ICML, 2017. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 150, + 286, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 150, + 286, + 182 + ], + "spans": [ + { + "bbox": [ + 53, + 150, + 286, + 182 + ], + "type": "text", + "content": "[3] Carlos Arteta, Victor S. Lempitsky, Julia Alison Noble, and Andrew Zisserman. Interactive object counting. In ECCV, 2014. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 185, + 286, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 185, + 286, + 206 + ], + "spans": [ + { + "bbox": [ + 53, + 185, + 286, + 206 + ], + "type": "text", + "content": "[4] Carlos Arteta, Victor S. Lempitsky, and Andrew Zisserman. Counting in the wild. In ECCV, 2016. 1, 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 209, + 286, + 240 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 209, + 286, + 240 + ], + "spans": [ + { + "bbox": [ + 53, + 209, + 286, + 240 + ], + "type": "text", + "content": "[5] Yuval Atzmon and Gal Chechik. Adaptive confidence smoothing for generalized zero-shot learning. In CVPR, 2019. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 243, + 286, + 275 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 243, + 286, + 275 + ], + "spans": [ + { + "bbox": [ + 53, + 243, + 286, + 275 + ], + "type": "text", + "content": "[6] Ankan Bansal, Karan Sikka, Gaurav Sharma, Rama Chellappa, and Ajay Divakaran. Zero-shot object detection. In ECCV, 2018. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 278, + 286, + 320 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 278, + 286, + 320 + ], + "spans": [ + { + "bbox": [ + 53, + 278, + 286, + 320 + ], + "type": "text", + "content": "[7] Antoni B. Chan, Zhang-Sheng John Liang, and Nuno Vasconcelos. Privacy preserving crowd monitoring: Counting people without people models or tracking. In CVPR, 2008. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 323, + 286, + 357 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 323, + 286, + 357 + ], + "spans": [ + { + "bbox": [ + 53, + 323, + 286, + 357 + ], + "type": "text", + "content": "[8] Prithvijit Chattopadhyay, Ramakrishna Vedantam, Ramprasaath R. Selvaraju, Dhruv Batra, and Devi Parikh. Counting everyday objects in everyday scenes. CVPR, 2017. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 358, + 286, + 401 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 358, + 286, + 401 + ], + "spans": [ + { + "bbox": [ + 53, + 358, + 286, + 401 + ], + "type": "text", + "content": "[9] Long Chen, Hanwang Zhang, Jun Xiao, W. Liu, and Shih-Fu Chang. Zero-shot visual recognition using semantics-preserving adversarial embedding networks. In CVPR, 2018. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 404, + 286, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 404, + 286, + 437 + ], + "spans": [ + { + "bbox": [ + 48, + 404, + 286, + 437 + ], + "type": "text", + "content": "[10] Hisham Cholakkal, Guolei Sun, Fahad Shahbaz Khan, and Ling Shao. Object counting and instance segmentation with image-level supervision. In CVPR, 2019. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 439, + 286, + 482 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 439, + 286, + 482 + ], + "spans": [ + { + "bbox": [ + 48, + 439, + 286, + 482 + ], + "type": "text", + "content": "[11] Hisham Cholakkal, Guolei Sun, Salman Hameed Khan, Fahad Shahbaz Khan, Ling Shao, and Luc Van Gool. Towards partial supervision for generic object counting in natural scenes. volume 44, 2022. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 484, + 286, + 528 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 484, + 286, + 528 + ], + "spans": [ + { + "bbox": [ + 48, + 484, + 286, + 528 + ], + "type": "text", + "content": "[12] Andrea Frome, Gregory S. Corrado, Jonathon Shlens, Samy Bengio, Jeffrey Dean, Marc'Aurelio Ranzato, and Tomas Mikolov. Devise: A deep visual-semantic embedding model. In NIPS, 2013. 3" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 530, + 286, + 563 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 530, + 286, + 563 + ], + "spans": [ + { + "bbox": [ + 48, + 530, + 286, + 563 + ], + "type": "text", + "content": "[13] Shenjian Gong, Shanshan Zhang, Jian Yang, Dengxin Dai, and Bernt Schiele. Class-agnostic object counting robust to intraclass diversity. In ECCV, 2022. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 565, + 286, + 597 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 565, + 286, + 597 + ], + "spans": [ + { + "bbox": [ + 48, + 565, + 286, + 597 + ], + "type": "text", + "content": "[14] Meng-Ru Hsieh, Yen-Liang Lin, and Winston H. Hsu. Drone-based object counting by spatially regularized regional proposal network. In ICCV, 2017. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 48, + 600, + 286, + 654 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 600, + 286, + 654 + ], + "spans": [ + { + "bbox": [ + 48, + 600, + 286, + 654 + ], + "type": "text", + "content": "[15] Haroon Idrees, Muhammad Tayyab, Kishan Athrey, Dong Zhang, Somaya Ali Al-Maadeed, Nasir M. Rajpoot, and Mubarak Shah. Composition loss for counting, density map estimation and localization in dense crowds. In ECCV, 2018. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 48, + 656, + 286, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 656, + 286, + 677 + ], + "spans": [ + { + "bbox": [ + 48, + 656, + 286, + 677 + ], + "type": "text", + "content": "[16] Dinesh Jayaraman and Kristen Grauman. Zero-shot recognition with unreliable attributes. In NIPS, 2014. 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 48, + 680, + 286, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 680, + 286, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 680, + 286, + 712 + ], + "type": "text", + "content": "[17] Christoph H. Lampert, Hannes Nickisch, and Stefan Harmeling. Learning to detect unseen object classes by between-class attribute transfer. CVPR, 2009. 3" + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 712 + ], + "type": "list", + "angle": 0, + "index": 37, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "text", + "content": "[18] Issam H. Laradji, Negar Rostamzadeh, Pedro H. O. Pinheiro, David Vázquez, and Mark W. Schmidt. Where are the blobs: Counting by localization with point supervision. In ECCV, 2018. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 118, + 545, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 118, + 545, + 149 + ], + "spans": [ + { + "bbox": [ + 307, + 118, + 545, + 149 + ], + "type": "text", + "content": "[19] Hieu Le, Bento Goncalves, Dimitris Samaras, and Heather Lynch. Weakly labeling the antarctic: The penguin colony case. In CVPR Workshops, June 2019. 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 150, + 545, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 150, + 545, + 183 + ], + "spans": [ + { + "bbox": [ + 307, + 150, + 545, + 183 + ], + "type": "text", + "content": "[20] Hieu Le and Dimitris Samaras. Physics-based shadow image decomposition for shadow removal. Los Alamitos, CA, USA. IEEE Computer Society. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 184, + 545, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 184, + 545, + 205 + ], + "spans": [ + { + "bbox": [ + 307, + 184, + 545, + 205 + ], + "type": "text", + "content": "[21] Hieu Le and Dimitris Samaras. From shadow segmentation to shadow removal. In ECCV, 2020. 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 206, + 545, + 237 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 206, + 545, + 237 + ], + "spans": [ + { + "bbox": [ + 307, + 206, + 545, + 237 + ], + "type": "text", + "content": "[22] Hieu Le, Tomas F. Yago Vicente, Vu Nguyen, Minh Hoai, and Dimitris Samaras. A+D Net: Training a shadow detector with adversarial shadow attenuation. In ECCV, 2018. 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 239, + 545, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 239, + 545, + 281 + ], + "spans": [ + { + "bbox": [ + 307, + 239, + 545, + 281 + ], + "type": "text", + "content": "[23] Hieu Le, Chen-Ping Yu, Gregory Zelinsky, and Dimitris Samaras. Co-localization with category-consistent features and geodesic distance propagation. In ICCV Workshop, 2017. 3" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 282, + 545, + 316 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 282, + 545, + 316 + ], + "spans": [ + { + "bbox": [ + 307, + 282, + 545, + 316 + ], + "type": "text", + "content": "[24] Dongze Lian, Jing Li, Jia Zheng, Weixin Luo, and Shenghua Gao. Density map regression guided detection network for rgb-d crowd counting and localization. CVPR, 2019. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 316, + 545, + 348 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 316, + 545, + 348 + ], + "spans": [ + { + "bbox": [ + 307, + 316, + 545, + 348 + ], + "type": "text", + "content": "[25] Chang Liu, Yujie Zhong, Andrew Zisserman, and Weidi Xie. Countr: Transformer-based generalised visual counting. In BMVC, 2022. 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 350, + 545, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 350, + 545, + 380 + ], + "spans": [ + { + "bbox": [ + 307, + 350, + 545, + 380 + ], + "type": "text", + "content": "[26] Weizhe Liu, N. Durasov, and P. Fua. Leveraging self-supervision for cross-domain crowd counting. In CVPR, 2022. 2" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 382, + 545, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 382, + 545, + 403 + ], + "spans": [ + { + "bbox": [ + 307, + 382, + 545, + 403 + ], + "type": "text", + "content": "[27] Weizhe Liu, Mathieu Salzmann, and Pascal V. Fua. Context-aware crowd counting. In CVPR, 2019. 2" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 307, + 404, + 545, + 426 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 404, + 545, + 426 + ], + "spans": [ + { + "bbox": [ + 307, + 404, + 545, + 426 + ], + "type": "text", + "content": "[28] Erika Lu, Weidi Xie, and Andrew Zisserman. Class-agnostic counting. In ACCV, 2018. 1, 2, 5, 6" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 307, + 427, + 545, + 470 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 427, + 545, + 470 + ], + "spans": [ + { + "bbox": [ + 307, + 427, + 545, + 470 + ], + "type": "text", + "content": "[29] Terrell N. Mundhenk, Goran Konjevod, Wesam A. Sakla, and Kofi Boakye. A large contextual dataset for classification, detection and counting of cars with deep learning. In ECCV, 2016. 1, 2" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 307, + 472, + 545, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 472, + 545, + 514 + ], + "spans": [ + { + "bbox": [ + 307, + 472, + 545, + 514 + ], + "type": "text", + "content": "[30] Sanath Narayan, Akshita Gupta, Fahad Shahbaz Khan, Cees G. M. Snoek, and Ling Shao. Latent embedding feedback and discriminative features for zero-shot classification. In ECCV, 2020. 3" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 307, + 515, + 545, + 547 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 515, + 545, + 547 + ], + "spans": [ + { + "bbox": [ + 307, + 515, + 545, + 547 + ], + "type": "text", + "content": "[31] Thanh Nguyen, Chau Pham, Khoi Nguyen, and Minh Hoai. Few-shot object counting and detection. In ECCV, 2022. 2, 5" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 307, + 548, + 545, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 548, + 545, + 613 + ], + "spans": [ + { + "bbox": [ + 307, + 548, + 545, + 613 + ], + "type": "text", + "content": "[32] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In ICML, 2021. 4, 5" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 307, + 614, + 545, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 614, + 545, + 635 + ], + "spans": [ + { + "bbox": [ + 307, + 614, + 545, + 635 + ], + "type": "text", + "content": "[33] Viresh Ranjan and Minh Hoai. Exemplar free class agnostic counting. In ACCV, 2022. 1, 2, 3, 5, 6, 8" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 307, + 636, + 545, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 636, + 545, + 668 + ], + "spans": [ + { + "bbox": [ + 307, + 636, + 545, + 668 + ], + "type": "text", + "content": "[34] Viresh Ranjan, Udbhav Sharma, Thua Nguyen, and Minh Hoai. Learning to count everything. In CVPR, 2021. 1, 2, 3, 5, 6, 7" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 307, + 670, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 670, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 307, + 670, + 545, + 712 + ], + "type": "text", + "content": "[35] Mahdi Rezaei and Mahsa Shahidi. Zero-shot learning and its applications from autonomous vehicles to Covid-19 diagnosis: A review. In Intelligence-Based Medicine, volume 3, 2020. 3" + } + ] + } + ], + "index": 36 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "type": "text", + "content": "15556" + } + ] + } + ], + "index": 38 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 712 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 105 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 105 + ], + "type": "text", + "content": "[36] Bernardino Romero-Paredes and Philip H. S. Torr. An embarrassingly simple approach to zero-shot learning. In ICML, 2015. 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 49, + 107, + 287, + 152 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 107, + 287, + 152 + ], + "spans": [ + { + "bbox": [ + 49, + 107, + 287, + 152 + ], + "type": "text", + "content": "[37] Deepak Babu Sam, Abhinav Agarwalla, Jimmy Joseph, Vishwanath A. Sindagi, R. Venkatesh Babu, and Vishal M. Patel. Completely self-supervised crowd counting via distribution matching. In ECCV, 2022. 1, 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 49, + 153, + 287, + 196 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 153, + 287, + 196 + ], + "spans": [ + { + "bbox": [ + 49, + 153, + 287, + 196 + ], + "type": "text", + "content": "[38] Min Shi, Hao Lu, Chen Feng, Chengxin Liu, and Zhiguo Cao. Represent, compare, and learn: A similarity-aware framework for class-agnostic counting. In CVPR, 2022. 1, 2, 3, 6, 7" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 49, + 198, + 286, + 231 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 198, + 286, + 231 + ], + "spans": [ + { + "bbox": [ + 49, + 198, + 286, + 231 + ], + "type": "text", + "content": "[39] Vishwanath A. Sindagi, Rajeev Yasarla, and Vishal M. Patel. Pushing the frontiers of unconstrained crowd counting: New dataset and benchmark method. In ICCV, 2019. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 49, + 233, + 286, + 265 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 233, + 286, + 265 + ], + "spans": [ + { + "bbox": [ + 49, + 233, + 286, + 265 + ], + "type": "text", + "content": "[40] Jia Wan, Ziquan Liu, and Antoni B. Chan. A generalized loss function for crowd counting and localization. In CVPR, 2021. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 49, + 267, + 286, + 300 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 267, + 286, + 300 + ], + "spans": [ + { + "bbox": [ + 49, + 267, + 286, + 300 + ], + "type": "text", + "content": "[41] Boyu Wang, Huidong Liu, Dimitris Samaras, and Minh Hoai Nguyen. Distribution matching for crowd counting. In NeurIPS, 2020. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 49, + 302, + 286, + 345 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 302, + 286, + 345 + ], + "spans": [ + { + "bbox": [ + 49, + 302, + 286, + 345 + ], + "type": "text", + "content": "[42] Qi Wang, Junyu Gao, Wei Lin, and Xuelong Li. Nwpu-crowd: A large-scale benchmark for crowd counting and localization. IEEE Transactions on Pattern Analysis and Machine Intelligence, 43, 2021. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 49, + 347, + 286, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 347, + 286, + 380 + ], + "spans": [ + { + "bbox": [ + 49, + 347, + 286, + 380 + ], + "type": "text", + "content": "[43] Yongqin Xian, Zeynep Akata, Gaurav Sharma, Quynh N. Nguyen, Matthias Hein, and Bernt Schiele. Latent embeddings for zero-shot classification. In CVPR, 2016. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 49, + 382, + 286, + 435 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 382, + 286, + 435 + ], + "spans": [ + { + "bbox": [ + 49, + 382, + 286, + 435 + ], + "type": "text", + "content": "[44] Yongqin Xian, Christoph H. Lampert, Bernt Schiele, and Zeynep Akata. Zero-shot learning—a comprehensive evaluation of the good, the bad and the ugly. IEEE Transactions on Pattern Analysis and Machine Intelligence, 41, 2019. 3, 4" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 49, + 438, + 286, + 471 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 438, + 286, + 471 + ], + "spans": [ + { + "bbox": [ + 49, + 438, + 286, + 471 + ], + "type": "text", + "content": "[45] Yongqin Xian, Saurabh Sharma, Bernt Schiele, and Zeynep Akata. F-vaegan-d2: A feature generating framework for any-shot learning. In CVPR, 2019. 3, 4" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 49, + 473, + 286, + 527 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 473, + 286, + 527 + ], + "spans": [ + { + "bbox": [ + 49, + 473, + 286, + 527 + ], + "type": "text", + "content": "[46] Weidi Xie, J. Alison Noble, and Andrew Zisserman. Microscopy cell counting and detection with fully convolutional regression networks. Computer Methods in Biomechanics and Biomedical Engineering: Imaging & Visualization, 6, 2018. 1, 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 49, + 529, + 286, + 552 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 529, + 286, + 552 + ], + "spans": [ + { + "bbox": [ + 49, + 529, + 286, + 552 + ], + "type": "text", + "content": "[47] Haipeng Xiong and Angela Yao. Discrete-constrained regression for local counting models. In ECCV, 2022. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 49, + 553, + 286, + 574 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 553, + 286, + 574 + ], + "spans": [ + { + "bbox": [ + 49, + 553, + 286, + 574 + ], + "type": "text", + "content": "[48] Jingyi Xu and Hieu Le. Generating representative samples for few-shot classification. In CVPR, 2022. 3" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 49, + 576, + 286, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 576, + 286, + 609 + ], + "spans": [ + { + "bbox": [ + 49, + 576, + 286, + 609 + ], + "type": "text", + "content": "[49] Jingyi Xu, Hieu Le, Mingzhen Huang, ShahRukh Athar, and Dimitris Samaras. Variational feature disentangling for fine-grained few-shot classification. In ICCV, 2021. 3" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 49, + 611, + 286, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 611, + 286, + 643 + ], + "spans": [ + { + "bbox": [ + 49, + 611, + 286, + 643 + ], + "type": "text", + "content": "[50] Shuo Yang, Hung-Ting Su, Winston H. Hsu, and Wen-Chin Chen. Class-agnostic few-shot object counting. In WACV, 2021. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 49, + 646, + 286, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 646, + 286, + 677 + ], + "spans": [ + { + "bbox": [ + 49, + 646, + 286, + 677 + ], + "type": "text", + "content": "[51] Zhiyuan You, Kai Yang, Wenhan Luo, Xin Lu, Lei Cui, and Xinyi Le. Few-shot object counting with similarity-aware feature enhancement. In WACV, 2023. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 49, + 680, + 286, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 680, + 286, + 712 + ], + "spans": [ + { + "bbox": [ + 49, + 680, + 286, + 712 + ], + "type": "text", + "content": "[52] Anran Zhang, Lei Yue, Jiayi Shen, Fan Zhu, Xiantong Zhen, Xianbin Cao, and Ling Shao. Attentional neural fields for crowd counting. In ICCV, 2019. 2" + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 209 + ], + "type": "list", + "angle": 0, + "index": 23, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "type": "text", + "content": "[53] Cong Zhang, Hongsheng Li, Xiaogang Wang, and Xiaokang Yang. Cross-scene crowd counting via deep convolutional neural networks. In CVPR, 2015. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 107, + 545, + 129 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 107, + 545, + 129 + ], + "spans": [ + { + "bbox": [ + 307, + 107, + 545, + 129 + ], + "type": "text", + "content": "[54] Li Zhang, Tao Xiang, and Shaogang Gong. Learning a deep embedding model for zero-shot learning. In CVPR, 2017. 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 130, + 545, + 152 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 130, + 545, + 152 + ], + "spans": [ + { + "bbox": [ + 307, + 130, + 545, + 152 + ], + "type": "text", + "content": "[55] Qi Zhang and Antoni Chan. Calibration-free multi-view crowd counting. In ECCV, 2022. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 153, + 545, + 185 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 153, + 545, + 185 + ], + "spans": [ + { + "bbox": [ + 307, + 153, + 545, + 185 + ], + "type": "text", + "content": "[56] Yingying Zhang, Desen Zhou, Siqin Chen, Shenghua Gao, and Yi Ma. Single-image crowd counting via multi-column convolutional neural network. In CVPR, 2016. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 186, + 545, + 209 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 186, + 545, + 209 + ], + "spans": [ + { + "bbox": [ + 307, + 186, + 545, + 209 + ], + "type": "text", + "content": "[57] Ye Zheng, Jiahong Wu, Yongqiang Qin, Faen Zhang, and Li Cui. Zero-shot instance segmentation. In CVPR, 2021. 2" + } + ] + } + ], + "index": 22 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "type": "text", + "content": "15557" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/Zero-Shot Pose Transfer for Unrigged Stylized 3D Characters/5ada3ef3-b974-4dfe-9b9c-f168fb79ad07_content_list.json b/2023/Zero-Shot Pose Transfer for Unrigged Stylized 3D Characters/5ada3ef3-b974-4dfe-9b9c-f168fb79ad07_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..9c4c1cd167f7d8519a6d158f4b72b280cb8de42d --- /dev/null +++ b/2023/Zero-Shot Pose Transfer for Unrigged Stylized 3D Characters/5ada3ef3-b974-4dfe-9b9c-f168fb79ad07_content_list.json @@ -0,0 +1,1730 @@ +[ + { + "type": "text", + "text": "Zero-shot Pose Transfer for Unrigged Stylized 3D Characters", + "text_level": 1, + "bbox": [ + 174, + 130, + 794, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Jiashun Wang $^{1*}$ Xueting Li $^{2}$ Sifei Liu $^{2}$ Shalini De Mello $^{2}$ Orazio Gallo $^{2}$ Xiaolong Wang $^{3}$ Jan Kautz $^{2}$ $^{1}$ Carnegie Mellon University ${}^{2}$ NVIDIA ${}^{3}$ UC San Diego", + "bbox": [ + 225, + 178, + 738, + 236 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/b8b5358168abb25f99559384141748e8150d018ef2d2eebe1ebab6a6c5eb3eae.jpg", + "image_caption": [ + "Figure 1. Our algorithm transfers the pose of a reference avatar (source) to stylized characters. Unlike existing methods, at training time our approach needs only the mesh of the source avatar in rest and desired pose, and the mesh of the stylized character only in rest pose." + ], + "image_footnote": [], + "bbox": [ + 78, + 243, + 897, + 521 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 577, + 313, + 593 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Transferring the pose of a reference avatar to stylized 3D characters of various shapes is a fundamental task in computer graphics. Existing methods either require the stylized characters to be rigged, or they use the stylized character in the desired pose as ground truth at training. We present a zero-shot approach that requires only the widely available deformed non-stylized avatars in training, and deforms stylized characters of significantly different shapes at inference. Classical methods achieve strong generalization by deforming the mesh at the triangle level, but this requires labelled correspondences. We leverage the power of local deformation, but without requiring explicit correspondence labels. We introduce a semi-supervised shape-understanding module to bypass the need for explicit correspondences at test time, and an implicit pose deformation module that deforms individual surface points to match the target pose. Furthermore, to encourage realistic and accurate deformation of", + "bbox": [ + 75, + 611, + 473, + 869 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "stylized characters, we introduce an efficient volume-based test-time training procedure. Because it does not need rigging, nor the deformed stylized character at training time, our model generalizes to categories with scarce annotation, such as stylized quadrupeds. Extensive experiments demonstrate the effectiveness of the proposed method compared to the state-of-the-art approaches trained with comparable or more supervision. Our project page is available at https://jiashunwang.github.io/ZPT/", + "bbox": [ + 500, + 578, + 893, + 715 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 500, + 751, + 632, + 767 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Stylized 3D characters, such as those in Fig. 1, are commonly used in animation, movies, and video games. Deforming these characters to mimic natural human or animal poses has been a long-standing task in computer graphics. Different from the 3D models of natural humans and animals, stylized 3D characters are created by professional artists through imagination and exaggeration. As a result, each stylized character has a distinct skeleton, shape, mesh", + "bbox": [ + 496, + 779, + 893, + 902 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Work done during Jiashun Wang's internship at NVIDIA.", + "bbox": [ + 94, + 886, + 406, + 900 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "8704", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "topology, and usually include various accessories, such as a cloak or wings (see Fig. 1). These variations hinder the process of matching the pose of a stylized 3D character to that of a reference avatar, generally making manual rigging a requirement. Unfortunately, rigging is a tedious process that requires manual effort to create the skeleton and skinning weights for each character. Even when provided with manually annotated rigs, transferring poses from a source avatar onto stylized characters is not trivial when the source and target skeletons differ. Automating this procedure is still an open research problem and is the focus of many recent works [2, 4, 24, 52]. Meanwhile, non-stylized 3D humans and animals have been well-studied by numerous prior works [35, 40, 54, 62, 68]. A few methods generously provide readily available annotated datasets [11, 12, 41, 68], or carefully designed parametric models [40, 51, 68]. By taking advantage of these datasets [12, 41], several learning-based methods [7, 14, 35, 62, 67] disentangle and transfer poses between human meshes using neural networks. However, these methods (referred to as \"part-level\" in the following) carry out pose transfer by either globally deforming the whole body mesh [14, 22, 47, 67] or by transforming body parts [35, 48], both of which lead to overfitting on the training human meshes and fail to generalize to stylized characters with significantly different body part shapes. Interestingly, classical mesh deformation methods [55, 56] (referred to as \"local\" in the following) can transfer poses between a pair of meshes with significant shape differences by computing and transferring per-triangle transformations through correspondence. Though these methods require manual correspondence annotation between the source and target meshes, they provide a key insight that by transforming individual triangles instead of body parts, the mesh deformation methods are more agnostic to a part's shape and can generalize to meshes with different shapes.", + "bbox": [ + 76, + 90, + 472, + 621 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We marry the benefits of learning-based methods [7, 14, 35, 62, 67] with the classic local deformation approach [55] and present a model for unrigged, stylized character deformation guided by a non-stylized biped or quadruped avatar. Notably, our model only requires easily accessible posed human or animal meshes for training and can be directly applied to deform 3D stylized characters with a significantly different shape at inference. To this end, we implicitly operationalize the key insight from the local deformation method [55] by modeling the shape and pose of a 3D character with a correspondence-aware shape understanding module and an implicit pose deformation module. The shape understanding module learns to predict the part segmentation label (i.e., the coarse-level correspondence) for each surface point, besides representing the shape of a 3D character as a latent shape code. The pose deformation module is conditioned on the shape code and deforms individual surface point guided by a target pose code sampled", + "bbox": [ + 76, + 628, + 472, + 902 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "from a prior pose latent space [50]. Furthermore, to encourage realistic deformation and generalize to rare poses, we propose a novel volume-based test-time training procedure that can be efficiently applied to unseen stylized characters.", + "bbox": [ + 496, + 90, + 890, + 151 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "During inference, by mapping biped or quadruped poses from videos, in addition to meshes to the prior pose latent space using existing works [32, 51, 53], we can transfer poses from different modalities onto unrigged 3D stylized characters. Our main contributions are:", + "bbox": [ + 496, + 152, + 893, + 226 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We propose a solution to a practical and challenging task - learning a model for stylized 3D character deformation with only posed human or animal meshes.", + "- We develop a correspondence-aware shape understanding module, an implicit pose deformation module, and a volume-based test-time training procedure to generalize the proposed model to unseen stylized characters and arbitrary poses in a zero-shot manner.", + "- We carry out extensive experiments on both humans and quadrupeds to show that our method produces more visually pleasing and accurate deformations compared to baselines trained with comparable or more supervision." + ], + "bbox": [ + 517, + 227, + 890, + 422 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 500, + 436, + 640, + 452 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Deformation Transfer. Deformation transfer is a longstanding problem in the computer graphics community [3, 6, 8, 9, 55, 65]. Sumner et al. [55] apply an affine transformation to each triangle of the mesh to solve an optimization problem that matches the deformation of the source mesh while maintaining the shape of the target mesh. Ben-Chen et al. [9] enclose the source and target shapes with two cages and transfer the Jacobians of the source deformation to the target shape. However, these methods need tedious human efforts to annotate the correspondence between the source and target shapes. More recently, several deep learning methods are developed to solve the deformation transfer task. However, they either require manually providing the correspondence [66] or cannot generalize [14, 22, 67] to stylized characters with different shapes. Gao et al. [22] propose a VAE-GAN based method to leverage the cycle consistency between the source and target shapes. Nonetheless, it can only work on shapes used in training. Wang et al. [62] introduce conditional normalization used in style transfer for 3D deformation transfer. But the method is limited to clothed-humans and cannot handle the large shape variations of stylized characters.", + "bbox": [ + 496, + 462, + 890, + 792 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We argue that these learning-based methods cannot generalize to stylized characters because they rely on encoding their global information (e.g., body or parts), which is different from traditional works that focus on local deformation, e.g., the affine transformation applied to each triangle in [55]. Using a neural network to encode the global information easily leads to overfitting. For example, models", + "bbox": [ + 496, + 795, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "8705", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "trained on human meshes cannot generalize to a stylized humanoid character. At the same time, early works only focus on local information and cannot model global information such as correspondence between the source and target shapes, which is why they all need human effort to annotate the correspondence. Our method tries to learn the correspondence and deform locally at the same time.", + "bbox": [ + 75, + 90, + 470, + 196 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Skeleton-based Pose Transfer. Besides mesh deformation transfer, an alternative way to transfer pose is to utilize skeletons. Motion retargeting is also a common name used for transferring poses from one motion sequence to another. Gleicher et al. [24] propose a space-time constrained solver aiming to satisfy the kinematics-level constraints and to preserve the characters' original identity. Following works [5, 19, 33] try to solve inverse-kinematics or inverse rate control to achieve pose transfer. There are also dynamics-based methods [4, 59] that consider physics during the retargeting process. Recently, learning-based methods [20, 27, 37, 60, 61] train deep neural networks to predict the transformation of the skeleton. Aberman et al. [2] propose a pooling-based method to transfer poses between meshes with different skeletons.", + "bbox": [ + 75, + 209, + 468, + 435 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "All these works highly rely on the skeleton for pose transfer. Other works try to estimate the rigging of the template shape [7, 39, 52, 63, 64] when a skeleton is not available. But if the prediction of the skinning weights fails, the retargeting fails as well. Liao et al. [36] propose a model that learns to predict the skinning weights and pose transfer jointly using ground truth skinning weights and paired motion data as supervision, which limits the generalization of this method to categories where annotations are more scarce compared to humans (e.g., quadrupeds). Instead, our method uses posed human or animal meshes for training and deforms stylized characters of different shapes at inference.", + "bbox": [ + 75, + 449, + 468, + 630 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Implicit 3D shape representation. Implicit 3D shape representations have shown great success in reconstructing static shapes [13,16,18,21,23,29,42,43,49] and deformable ones [10,28,34,44-48,58]. DeepSDF [49] proposes to use an MLP to predict the signed distance field (SDF) value of a query point in 3D space, where a shape code is jointly optimized in an auto-decoding manner. Occupancy flow [45] generalizes the Occupancy Networks [42] to learn a temporally and spatially continuous vector field with a NeuralODE [15]. Inspired by parametric models, NPMs [47] disentangles and represents the shape and pose of dynamic humans by learning an implicit shape and pose function, respectively. Different from these implicit shape representation works that focus on reconstructing static or deformable meshes, we further exploit the inherent continuity and locality of implicit functions to deform stylized characters to match a target pose in a zero-shot manner.", + "bbox": [ + 75, + 643, + 470, + 900 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 500, + 89, + 589, + 104 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We aim to transfer the pose of a biped or quadruped avatar to an unrigged, stylized 3D character. We tackle this problem by modeling the shape and pose of a 3D character using a correspondence-aware shape understanding module and an implicit pose deformation module, inspired by classical mesh deformation methods [55, 56]. The shape understanding module (Sec. 3.1, Fig. 2) predicts a latent shape code and part segmentation label of a 3D character in rest pose, while the pose deformation module (Sec. 3.2, Fig. 3) deforms the character in the rest pose given the predicted shape code and a target pose code. Moreover, to produce natural deformations and generalize to rare poses unseen at training, we introduce an efficient volume-based test-time training procedure (Sec 3.3) for unseen stylized characters. All three modules, trained only with posed, unclothed human meshes, and unrigged, stylized characters in a rest pose, are directly applied to unseen stylized characters at inference. We explain our method for humans, and describe how we extend it to quadrupeds in Sec. 4.6.", + "bbox": [ + 496, + 114, + 890, + 402 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Correspondence-Aware Shape Understanding", + "text_level": 1, + "bbox": [ + 498, + 410, + 887, + 426 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Given a 3D character in rest pose, we propose a shape understanding module to represent its shape information as a latent code, and to predict a body part segmentation label for each surface point.", + "bbox": [ + 496, + 434, + 890, + 494 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To learn a representative shape code, we employ an implicit auto-decoder [47, 49] that reconstructs the 3D character taking the shape code as input. During training, we jointly optimize the shape code of each training sample and the decoder. Given an unseen character (i.e., a stylized 3D character) during inference, we obtain its shape code by freezing the decoder and optimizing the shape code to reconstruct the given character. Specifically, as shown in Fig. 2, given the concatenation of a query point $x \\in \\mathbb{R}^3$ and the shape code $s \\in \\mathbb{R}^d$ , we first obtain an embedding $e \\in \\mathbb{R}^d$ via an MLP denoted as $\\mathcal{F}$ . Conditioned on the embedding $e$ , the occupancy $\\hat{o}_x \\in \\mathbb{R}$ of $x$ is then predicted by another MLP denoted as $\\mathcal{O}$ . The occupancy indicates if the query point $x$ is inside or outside the body surface and can be supervised by the ground truth occupancy as:", + "bbox": [ + 496, + 494, + 890, + 720 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathcal {O}} = - \\sum_ {x} \\left(o _ {x} \\cdot \\log \\left(\\hat {o} _ {x}\\right) + \\left(1 - o _ {x}\\right) \\cdot \\log \\left(1 - \\hat {o} _ {x}\\right)\\right), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 509, + 731, + 890, + 762 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $o_x$ is the ground truth occupancy at point $x$ .", + "bbox": [ + 500, + 765, + 831, + 779 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Since our shape code eventually serves as a condition for the pose deformation module, we argue that it should also capture the part correspondence knowledge across different instances, in addition to the shape information (e.g., height, weight, and shape of each body part). This insight has been utilized by early local mesh deformation method [55], which explicitly utilizes correspondence to transfer local transformations between the source and target meshes. Our", + "bbox": [ + 496, + 780, + 890, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "8706", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/84b1ba766b54c9ef332c96b1d8eabc7c5bc78378df1476b6d69cda375015e6e6.jpg", + "image_caption": [ + "Figure 2. The shape understanding module (Sec. 3.1). Given a query point and a learnable shape code, we take MLPs to predict the occupancy, part segmentation label and further use an inverse MLP to regress the query point." + ], + "image_footnote": [], + "bbox": [ + 101, + 78, + 851, + 265 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/9368775deb240878960eedeec753f6ad31bb00e7b841c77c983e7bcc2ec94a62.jpg", + "image_caption": [ + "Figure 3. The pose deformation module (Sec. 3.2). Given a query point on the surface, the learned shape code and a target pose code, we use an MLP to predict the offset of the query point." + ], + "image_footnote": [], + "bbox": [ + 81, + 313, + 467, + 433 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "pose deformation process could also benefit from learning part correspondence. Take the various headgear, hats, and horns on the stylized characters's heads in Fig. 1 as an example. If these components can be \"understood\" as extensions of the character's heads by their shape codes, they will move smoothly with the character's heads during pose deformation. Thus, besides mesh reconstruction, we effectively task our shape understanding module with an additional objective: predicting part-level correspondence instantiated as the part segmentation label. Specifically, we propose to utilize an MLP $\\mathcal{P}$ to additionally predict a part label $p_x = (p_x^1,\\dots,p_x^K)^T\\in \\mathbb{R}^K$ for each surface point $x$ . Thanks to the densely annotated human mesh dataset, we can also supervise part segmentation learning with ground truth labels via:", + "bbox": [ + 75, + 497, + 468, + 723 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathcal {P}} = \\sum_ {x} (- \\sum_ {k = 1} ^ {K} \\mathbb {1} _ {x} ^ {k} \\log \\left(p _ {x} ^ {k}\\right)), \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 171, + 729, + 468, + 771 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $K$ is the total number of body parts, and $\\mathbb{1}_x^k = 1$ if $x$ belongs to the $k^{th}$ part and $\\mathbb{1}_x^k = 0$ otherwise.", + "bbox": [ + 75, + 779, + 468, + 810 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To prepare the shape understanding module for stylized characters during inference, besides unclothed human meshes, we also include unrigged 3D stylized characters in rest pose during training. These characters in rest pose are easily accessible and do not require any annotation. For shape reconstruction, Eq. 1 can be similarly applied to the", + "bbox": [ + 75, + 810, + 468, + 901 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "stylized characters. However, as there is no part segmentation annotation for stylized characters, we propose a self-supervised inverse constraint inspired by correspondence learning methods [17,38] to facilitate part segmentation prediction on these characters. Specifically, we reconstruct the query point's coordinates from the concatenation of the shape code $s$ and the embedding $e$ through an MLP $\\mathcal{Q}$ and add an auxiliary objective as:", + "bbox": [ + 496, + 330, + 892, + 450 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathcal {Q}} = \\left| \\left| \\mathcal {Q} (s, e) - x \\right| \\right| ^ {2}. \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 616, + 462, + 890, + 479 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Intuitively, for stylized characters without part annotation, the model learned without this objective may converge to a trivial solution where similar embeddings are predicted for points with the same occupancy value, even when they are far away from each other, and belong to different body parts. Tab. 4 further quantitatively verifies the effectiveness of this constraint. Beyond facilitating shape understanding, the predicted part segmentation label is further utilized in the volume-based test-time training module which will be introduced in Sec. 3.3.", + "bbox": [ + 496, + 489, + 892, + 638 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. Implicit Pose Deformation Module", + "text_level": 1, + "bbox": [ + 500, + 650, + 802, + 666 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Given the learned shape code and a target pose, the pose deformation module deforms each surface point of the character to match the target pose. In the following, we first describe how we represent a human pose and then introduce the implicit function used for pose deformation.", + "bbox": [ + 496, + 672, + 890, + 748 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Instead of learning a latent pose space from scratch as in [36, 47], we propose to represent a human pose by the corresponding pose code in the latent space of VPoser [51]. Our intuition is that VPoser is trained with an abundance of posed humans from the large-scale AMASS dataset [41]. This facilitates faster training and provides robustness to overfitting. Furthermore, human poses can be successfully estimated from different modalities (e.g., videos or meshes), and mapped to the latent space of VPoser by existing methods [32, 51, 53]. By taking advantage of these works, our", + "bbox": [ + 496, + 750, + 892, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "8707", + "bbox": [ + 480, + 944, + 514, + 955 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "model can be applied to transfer poses from various modalities to an unrigged stylized character without any additional effort. A few examples can be found in the supplementary.", + "bbox": [ + 75, + 90, + 468, + 136 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To deform a character to match the given pose, we learn a neural implicit function $\\mathcal{M}$ that takes the sampled pose code $m\\in \\mathbb{R}^{32}$ , the learned shape code, and a query point $x$ around the character's surface as inputs and outputs the offset (denoted as $\\Delta \\hat{x}\\in \\mathbb{R}^3$ ) of $x$ in 3D space. Given the densely annotated human mesh dataset, we directly use the ground truth offset $\\Delta x$ as supervision. The training objective for our pose deformation module is defined as:", + "bbox": [ + 75, + 136, + 468, + 257 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathcal {D}} = \\sum_ {x} \\left| \\left| \\Delta \\hat {x} - \\Delta x \\right| \\right| ^ {2}. \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 186, + 268, + 468, + 299 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Essentially, our implicit pose deformation module is similar in spirit to early local mesh deformation methods [55] and has two key advantages compared to the part-level pose transfer methods [22, 36, 62]. First, our implicit pose deformation network is agnostic to mesh topology and resolution. Thus our model can be directly applied to unseen 3D stylized characters with significantly different resolutions and mesh topology compared to the training human meshes during inference. Second, stylized characters often include distinct body part shapes compared to humans. For example, the characters shown in Fig. 1 include big heads or various accessories. Previous part-level methods [36] that learn to predict a bone transformation and skinning weight for each body part usually fail on these unique body parts, since they are different from the corresponding human body parts used for training. In contrast, by learning to deform individual surface point, implicit functions are more agnostic to the overall shape of a body part and thus can generalize better to stylized characters with significantly different body part shapes. Fig. 4 and Fig. 6 show these advantages.", + "bbox": [ + 75, + 309, + 470, + 611 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3. Volume-based Test-time Training", + "text_level": 1, + "bbox": [ + 76, + 619, + 369, + 636 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The shape understanding and pose deformation modules discussed above are trained with only posed human meshes and unrigged 3D stylized characters in rest pose. When applied to unseen characters with significantly different shapes, we observe surface distortion introduced by the pose deformation module. Moreover, it is challenging for the module to fully capture the long tail of the pose distribution. To resolve these issues, we propose to apply test-time training [57] and fine-tune the pose deformation module on unseen stylized characters.", + "bbox": [ + 75, + 643, + 468, + 794 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To encourage natural pose deformation, we further propose a volume-preserving constraint during test-time training. Our key insight is that preserving the volume of each part in the rest pose mesh during pose deformation results in less distortion [35, 62]. However, it is non-trivial to compute the precise volume of each body part, which can have complex geometry. Instead, we propose to preserve the Eu", + "bbox": [ + 75, + 795, + 468, + 900 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "ccludean distance between pairs of vertices sampled from the surface of the mesh, as a proxy for constraining the volume. Specifically, given a mesh in rest pose, we randomly sample two points $x_{i}^{c}$ and $x_{j}^{c}$ on the surface within the same part $c$ using the part segmentation prediction from the shape understanding module. We calculate the offset of these two points $\\Delta \\hat{x}_{i}^{c}$ and $\\Delta \\hat{x}_{j}^{c}$ using our pose deformation module and minimize the change in the distance between them by:", + "bbox": [ + 496, + 90, + 890, + 212 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {v} = \\sum_ {c} \\sum_ {i} \\sum_ {j} \\left(\\left| \\left| x _ {i} ^ {c} - x _ {j} ^ {c} \\right| \\right| - \\left| \\left| \\left(x _ {i} ^ {c} + \\Delta \\hat {x} _ {i} ^ {c}\\right) - \\left(x _ {j} ^ {c} + \\Delta \\hat {x} _ {j} ^ {c}\\right) \\right| \\right|\\right) ^ {2}. \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 498, + 222, + 893, + 268 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "By sampling a large number of point pairs within a part and minimizing Eq. 5, we can approximately maintain the volume of each body part during pose deformation.", + "bbox": [ + 496, + 265, + 890, + 310 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Furthermore, in order to generalize the pose deformation module to long-tail poses that are rarely seen during training, we propose to utilize the source character in rest pose and its deformed shape as paired training data during test-time training. Specifically, we take the source character in rest pose, its target pose code, and its optimized shape code as inputs and we output the movement $\\Delta \\hat{x}^{dr}$ , where $x^{dr}$ is a query point from the source character. We minimize the L2 distance between the predicted movement $\\Delta \\hat{x}^{dr}$ and the ground truth movement $\\Delta x^{dr}$ ,", + "bbox": [ + 496, + 310, + 890, + 460 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {d r} = \\sum_ {x ^ {d r}} \\left| \\left| \\Delta \\hat {x} ^ {d r} - \\Delta x ^ {d r} \\right| \\right| ^ {2}. \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 594, + 472, + 890, + 503 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Besides the volume-preserving constraint and the reconstruction of the source character, we also employ the edge loss $\\mathcal{L}_e$ used in [25, 36, 62]. Overall, the objectives for the test-time training procedure are $\\mathcal{L}_{\\mathcal{T}} = \\lambda_v\\mathcal{L}_v + \\lambda_e\\mathcal{L}_e + \\lambda_{dr}\\mathcal{L}_{dr}$ , where $\\lambda_v, \\lambda_e$ , and $\\lambda_{dr}$ are hyper-parameters balancing the loss weights.", + "bbox": [ + 496, + 507, + 890, + 598 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 500, + 611, + 632, + 628 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Datasets", + "text_level": 1, + "bbox": [ + 500, + 635, + 601, + 650 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To train the shape understanding module, we use 40 human meshes sampled from the SMPL [40] parametric model. We use both the occupancy and part segmentation label of these meshes as supervision (see Sec. 3.1). To generalize the shape understanding module to stylized characters, we further include 600 stylized characters from RigNet [63]. Note that we only use the rest pose mesh (i.e., occupancy label) of the characters in [63] for training. To train our pose deformation module, we construct paired training data by deforming each of the 40 SMPL characters discussed above with 5000 pose codes sampled from the VPoser's [50] latent space. In total, we collect 200,000 training pairs, with each pair including an unclothed human mesh in rest pose and the same human mesh in target pose.", + "bbox": [ + 496, + 659, + 890, + 869 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "After training the shape understanding and pose deformation modules, we test them on the Mixamo [1] dataset,", + "bbox": [ + 498, + 869, + 890, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "8708", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "which includes challenging stylized characters, and the MGN [11] dataset, which includes clothed humans. The characters in both datasets have different shapes compared to the unclothed SMPL meshes we used for training, demonstrating the generalization ability of the proposed method. Following [36], we test on 19 stylized characters, with each deformed by 28 motion sequences from the Mixamo dataset. For the MGN dataset, we test on 16 clothed characters, with each deformed by 200 target poses. Both the testing characters and poses are unseen during training.", + "bbox": [ + 75, + 90, + 468, + 241 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "For quadrupeds, since there is no dataset including large-scale paired stylized quadrupeds for quantitative evaluation, we split all characters from the SMAL [68] dataset and use the first 34 shapes (i.e., cats, dogs, and horses) for training. We further collect 81 stylized quadrupeds in rest pose from the RigNet [63] to improve generalization of the shape understanding module. Similarly to the human category, we use occupancy and part segmentation supervision for the SMAL shapes and only the occupancy supervision for RigNet meshes. To train the pose deformation module, we deform each of the 34 characters in SMAL by 2000 poses sampled from the latent space of BARC [54], a 3D reconstruction model trained for the dog category. We quantitatively evaluate our model on the hippo meshes from the SMAL dataset, which have larger shape variance compared to the cats, dogs, and horses used for training. We produce the testing data by deforming each hippo mesh with 500 unseen target poses from SMAL [68]. We show qualitative pose transfer on stylized quadrupeds in Fig. 1.", + "bbox": [ + 75, + 242, + 470, + 530 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Implementation Details", + "text_level": 1, + "bbox": [ + 76, + 540, + 294, + 556 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We use the ADAM [30] optimizer to train both the shape understanding and pose deformation modules. For the shape understanding module, we use a learning rate of $1e - 4$ for both the decoder and shape code optimization, with a batch size of 64. Given a new character at inference time, we fix the decoder and only optimize the shape code for the new character with the same optimizer and learning rate. For the pose deformation module, we use a learning rate of $3e - 4$ with a batch size of 128. For test-time training, we use a batch size of 1 and a learning rate of $5e - 3$ with the ADAM optimizer. We set $\\lambda_v$ , $\\lambda_e$ , and $\\lambda_{dr}$ (See Sec. 3.3) as 0.05, 0.01, and 1 respectively.", + "bbox": [ + 75, + 564, + 468, + 744 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3. Metrics and Baselines for Comparison", + "text_level": 1, + "bbox": [ + 76, + 756, + 406, + 772 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Metrics. We use Point-wise Mesh Euclidean Distance (PMD) [36, 62] to evaluate pose transfer error. The PMD metric reveals pose similarity of the predicted deformation compared to its ground truth. However, as shown in Fig. 4, PMD can not fully show the smoothness and realism of the generated results. Thus, we adopt an edge length score (ELS) metric to evaluate the character's smoothness after the deformation. Specifically, we compare each edge's", + "bbox": [ + 75, + 779, + 467, + 900 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/df81a559b2c5a17b2caaba17ccca1bc676cda20344b82987e19d802d405b76ac.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
DatasetMetricSPT*(full) [36]NBS [35]SPT [36]Ours
MGN [11]PMD ↓1.621.331.820.99
ELS ↑0.860.700.850.89
Mixamo [1]PMD ↓3.057.045.295.06
ELS ↑0.610.660.590.88
", + "bbox": [ + 506, + 89, + 883, + 155 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 1. Quantitative comparison on MGN and Mixamo. Our method achieves the lowest PMD with the highest ELS. We provide the performance of the SPT*(full) method, which uses more supervision than the other methods as a reference. Our method is even better or comparable to it.", + "bbox": [ + 498, + 165, + 890, + 234 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "length in the deformed mesh with the corresponding edge's length in the ground truth mesh. We define the score as", + "bbox": [ + 498, + 244, + 890, + 275 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {1}{| \\mathcal {E} |} \\sum_ {\\{i, j \\} \\sim \\mathcal {E}} 1 - \\left| \\frac {| | \\hat {V} _ {i} - \\hat {V} _ {j} | | _ {2}}{| | V _ {i} - V _ {j} | | _ {2}} - 1 \\right|, \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 580, + 287, + 890, + 330 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $\\mathcal{E}$ indicates all edges of the mesh, $|\\mathcal{E}|$ is the number of the edges in the mesh. $\\hat{V}_i$ and $\\hat{V}_j$ are the vertices in the deformed mesh. $V_{i}$ and $V_{j}$ are the vertices in the ground truth mesh. For all the evaluation metrics, we scale the template character to be 1 meter tall, following [36].", + "bbox": [ + 496, + 335, + 890, + 411 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Baselines. We compare our method with Neural Blend Shapes (NBS) [35] and Skeleton-free Pose Transfer (SPT) [36]. NBS is a rigging prediction method trained on the SMPL and MGN datasets, which include naked and clothed human meshes with ground truth rigging information. For SPT, we show the results of two versions, one is trained only on the AMASS dataset, named SPT, which has a comparable level of supervision to our method. We also test the SPT*(full) version, which is trained on the AMASS, RigNet and Mixamo datasets, using both stylized characters' skinning weights as supervision and paired stylized characters in rest pose and target pose.", + "bbox": [ + 496, + 412, + 890, + 594 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.4. Human-like Character Pose Transfer", + "text_level": 1, + "bbox": [ + 500, + 604, + 821, + 619 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We report the PMD metric on the MGN and Mixamo datasets in Tab. 1. We also include the performance of SPT*(full) for reference. On the MGN dataset which includes clothed humans, our method which is trained with only unclothed humans achieve the best PMD score than all baseline methods, including baselines trained with more supervision (i.e., the NBS [35] learned with clothed humans and the SPT*(full) [36] learned with skinning weight and paired motion data). For the stylized characters, our method outperforms the SPT baseline learned with a comparable amount of supervision and gets competitive results with the NBS [35] and SPT*(full) baseline trained with more supervision. Furthermore, when testing on the more challenging, less human-like characters (e.g., a mouse with a big head in Fig. 1), the baselines produce noticeable artifacts and rough surfaces, which can be observed in the qualitative comparisons in Fig. 4. We provide the PMD value for each character in the supplementary.", + "bbox": [ + 496, + 628, + 890, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "8709", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/4373cf912884415bb198f2ddc519573b1b1bd0db9d3334d3f0dc36b7681ae064.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 169, + 71, + 243, + 154 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/c3dff5e8dad3476f2d059883ef3d74710ea2d082abb446921120c15296e71eaf.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 169, + 157, + 243, + 233 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/952d1fa77e29412929bf41fbba9b85a6ff632e47932a3704ef5124044cf10bce.jpg", + "image_caption": [ + "Source" + ], + "image_footnote": [], + "bbox": [ + 171, + 239, + 230, + 310 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/b1e6dd9f49d0f714b3af78032fbc4563ca35b398a8d566e0f0b1a4db98344968.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 258, + 73, + 357, + 152 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/3a7faa429e1cac3f2a004eb6235ddf01d8e00711b8c40b5dfcc94d7124f2b473.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 261, + 154, + 354, + 233 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/486462331e99d4af63c55bab579dded47eaed63a4861602bab0512fb89cf06fc.jpg", + "image_caption": [ + "Target" + ], + "image_footnote": [], + "bbox": [ + 264, + 238, + 357, + 313 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/e81d777fd3fcf00216ef88027d1be0cfe145ddf5d34f3843baeff045094f35f8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 369, + 73, + 472, + 232 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/add374012e2123a228fcefe50ac76f5ab02ddf5d96b4deeff31e6509a1abf23e.jpg", + "image_caption": [ + "NBS [35]" + ], + "image_footnote": [], + "bbox": [ + 388, + 238, + 449, + 313 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/cd03b14f06fc44b5f5948f3046818372a221cbafefcd8bc05645c834e87bffa7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 496, + 71, + 578, + 154 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/ed7ecd982161013fa5ed72db836e9357462eda131b179bc48db7d1b9db2cb163.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 496, + 154, + 576, + 236 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/608f1fbb2f0219e590b4adf949c59768bc908cc850bbaac5cdd0db00d9a53261.jpg", + "image_caption": [ + "SPT [36]" + ], + "image_footnote": [], + "bbox": [ + 508, + 239, + 565, + 311 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/0525239d0042cc48abf9e1689fc7ce77d739b320fa767bc0d09b3929d1475690.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 612, + 71, + 669, + 152 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/2c977e4e5a8760dfd73cb0657491914acda2ba3239bc9daef811cdeab81b6850.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 612, + 154, + 692, + 234 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/ee6cb725ab001d3127ba7fdfc22842d58bbb3a697126f8a3c69c2e3bae3de251.jpg", + "image_caption": [ + "Ours" + ], + "image_footnote": [], + "bbox": [ + 619, + 238, + 678, + 313 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/238231f66f59fef062c826ccc38980589d86e8a09236c4769eabe7137514659c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 720, + 71, + 797, + 154 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/b9a701a555f870eeececddefa3c053b9380b1dad28769b8d15b8461255ead581.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 722, + 154, + 795, + 234 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/3e575671bfe48c8a1a7512eb1aaf766a19aa0fafbff6e85a954d9ff09e98021c.jpg", + "image_caption": [ + "GT" + ], + "image_footnote": [], + "bbox": [ + 728, + 238, + 792, + 311 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/28a16782138344dbf2157544077b57705a8741cef1658f2c44d1f82f17586ce9.jpg", + "image_caption": [ + "Figure 5. Part segmentation visualization. NBS makes wrong predictions for hair while SPT may mix the upper legs." + ], + "image_footnote": [], + "bbox": [ + 91, + 386, + 434, + 648 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We show the ELS score comparison of different methods on the MGN and Mixamo datasets in Tab. 1. For both clothed humans and stylized characters, our method can generate more realistic results which are consistent with the target mesh and achieves the best ELS score.", + "bbox": [ + 75, + 700, + 468, + 775 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We visually compare our method and the baseline methods in Fig. 4 on the Mixamo dataset. Although NBS is trained with a clothed-human dataset, when testing on the human-like characters, it still fails on parts that are separate from the body such as the hair and the pants. When using only naked human meshes as supervision, SPT cannot generalize to challenging human-like characters, producing rough mesh surface with spikes.", + "bbox": [ + 75, + 779, + 470, + 902 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/480e4d440ac18475299a55cbd4e001af2d9ae4c3a990eda816afb0c67df4c577.jpg", + "image_caption": [ + "Figure 4. Qualitative comparison on Mixamo. The average PMD of these three results for NBS, SPT, and Ours are 8.16, 6.13, and 5.16 respectively and the average ELS for NBS, SPT, and Ours are 0.65, 0.78, and 0.93 respectively. Our method can successfully transfer the pose to challenging stylized characters (e.g., the mouse with a big head in the second row)." + ], + "image_footnote": [], + "bbox": [ + 506, + 381, + 624, + 434 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/8cce0cbc2c5271d9463dbb46bc10c1a1b56108aafeda84a396a145f818b87816.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 508, + 439, + 625, + 479 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/10a7eafaed4ee1a0eb0a58c40916888b9122ff4860b3fc3a70e31137b6426508.jpg", + "image_caption": [ + "SPT [36]" + ], + "image_footnote": [], + "bbox": [ + 511, + 487, + 622, + 537 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/9f2bfc42b2287de21b24f395c4be405319035d4b96ab4529afe1588ede1f50ae.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 633, + 382, + 741, + 428 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/22672dbf3808361df124c6dee0ccc9a194ed70ba2360e598c0d2e4622fe62124.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 633, + 438, + 750, + 479 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/21f7c6cc2c5c91b5743f1aa1e50965fcf337a4ddff7269bbfc7896af3776ffaf.jpg", + "image_caption": [ + "Ours" + ], + "image_footnote": [], + "bbox": [ + 633, + 487, + 743, + 536 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/017e7ce6912aaa677ff04da374dba94d3829be6008c0d2a19eacd9b95346d390.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 751, + 382, + 857, + 429 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/4bdb35185ddeab2f0f2d7bc5aaaebc7cbd67ac759c0ab2c00383fd165bab2ad4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 754, + 438, + 872, + 481 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/b70da364d0efbbd894e4ad8718211b20b1a9777381f9d1d7a40d2d2bf72e3534.jpg", + "image_caption": [ + "GT", + "Figure 6. Quadrupedal pose transfer visualization. Our method can achieve smooth and accurate pose transfer while SPT fails on the mouth and leg regions." + ], + "image_footnote": [], + "bbox": [ + 754, + 487, + 864, + 535 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/cacbf14f020635b037d362c51c25b3bc2e0add3ea22e75a482015375212a1382.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MetricNBS [35]SPT [36]Ours
Accuracy ↑67.8%75.6%86.9%
", + "bbox": [ + 558, + 621, + 834, + 652 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 2. Part prediction accuracy on Mixamo [1]. Our method achieves the best part segmentation accuracy.", + "bbox": [ + 500, + 662, + 890, + 691 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.5. Part Understanding Comparison", + "text_level": 1, + "bbox": [ + 500, + 710, + 787, + 728 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "As discussed in Sec. 3.1, part segmentation plays an important role in both shape understanding and pose deformation. Though NBS [35] and SPT [36] do not explicitly predict part segmentation label, they are both skinning weight-based methods and we can derive the part segmentation label from the predicted skinning weights. Specifically, by selecting the maximum weight of each vertex, we can convert the skinning weight prediction to part segmentation labels for the vertices. We compare our part prediction results with those derived from SPT and NBS. We report the part segmentation accuracy on the Mixamo datasets in Tab. 2", + "bbox": [ + 496, + 734, + 893, + 902 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "8710", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/8c1b2f4ff36dcae4437e98663db3b57ac789d1a88eb4f73a2ca28d0574424ce4.jpg", + "image_caption": [ + "Figure 7. Qualitative comparison for ablation study. Removing the constraint (eq. 1) in shape understanding leads to wrong pose deformation results. The volume preserving loss (eq. 5) helps to maintain the identity, e.g., the thickness of the arms in first row." + ], + "image_footnote": [], + "bbox": [ + 89, + 73, + 441, + 314 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/3678981c2558f768c774c1d992e16cc744308b5218126001fdf01af245c32d3a.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MetricSPT [36]OursMetricSPT [36]Ours
PMD ↓10.288.28ELS ↑0.280.86
", + "bbox": [ + 101, + 383, + 442, + 414 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "and visualize the part segmentation results in Fig. 5. Even trained with only part segmentation supervision of human meshes, our method can successfully segment each part for the stylized characters. On the contrary, SPT uses graph convolution network [31] to predict the skinning weights. When training only with human meshes, it often fails to distinguish different parts. As shown in Fig. 5, it mixes up the right and left upper legs, and incorrectly classifies the shoulder as the head. Though NBS is trained with clothed humans, it always classifies human hair as the human body for characters from Mixamo. This is because that NBS uses the MeshCNN [26] as the shape encoder. As a result, it is sensitive to mesh topology and cannot generalize to meshes with disconnected parts (e.g., disconnected hair and head). Tab. 2 further quantitatively demonstrates that our method achieves the best part segmentation accuracy, demonstrating its ability to correctly interpret the shape and part information in stylized characters.", + "bbox": [ + 75, + 458, + 472, + 731 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.6. Quadrupedal Pose Transfer Comparison", + "text_level": 1, + "bbox": [ + 76, + 741, + 426, + 758 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "To further show the generalization ability of our method, we conduct experiments on quadrupeds. We report the PMD and ELS score of our method and the SPT [36] in Tab. 3. When testing on hippos with large shape gap from the training meshes, SPT has a hard time generalizing both in terms of pose transfer accuracy and natural deformation. While our method achieves both better qualitative and quantitative results. We visualize the qualitative comparisons in Fig. 6. SPT produces obvious artifacts on the hippo's mouth", + "bbox": [ + 75, + 763, + 468, + 902 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/9c2fc818be70357453cc8987f0da7374f84f7b0d10915ad2a88d0bf0c98689c4.jpg", + "image_caption": [ + "Figure 8. Part prediction on stylized quadrupeds. Our method successfully predicts the parts of unseen stylized quadrupeds." + ], + "image_footnote": [], + "bbox": [ + 517, + 73, + 867, + 143 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/654945bb1d1502ca0e35cb28c00ec9d2eea13ee4e035665264b2cada172ef8b6.jpg", + "table_caption": [ + "Table 3. Comparison on Hippos from SMAL [68]. Our method achieves better pose transfer accuracy with more smooth results." + ], + "table_footnote": [], + "table_body": "
MetricOurs w/o invOurs w/o volumeOurs
PMD ↓1.261.020.99
ELS ↑0.880.880.89
", + "bbox": [ + 542, + 185, + 846, + 228 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 4. Ablation study on inverse MLP and volume preserving loss. The inverse MLP and volume preserving loss helps to improve pose transfer accuracy and produce smooth deformation.", + "bbox": [ + 498, + 234, + 890, + 277 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "and legs, while our method achieves accurate pose transfer and maintains the shape characteristics of the original character at the same time. We provide more results in the supplementary. We also show the part segmentation results on stylized characters by our method in Fig. 8. Even for unique parts such as the hats and antlers, our method correctly assigns them to the head part.", + "bbox": [ + 496, + 285, + 890, + 391 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.7. Ablation Study", + "text_level": 1, + "bbox": [ + 500, + 400, + 651, + 416 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "To evaluate the key components of our method, we conduct ablation studies on the MGN dataset by removing the inverse constraint (Eq. 3) in the shape understanding module and the volume-preserving loss (Eq. 5) used during the test-time training produce, we name them as \"ours w/o inv\" and \"ours w/o $v$ \" respectively. We report the PMD and ELS metrics in Tab. 4. The model learned without the inverse constraint or volume-preserving loss has worse PMD and ELS score than our full model, indicating the contribution of these two objectives. We also provide qualitative results in Fig. 7. We use red boxes to point out the artifacts. As shown in Fig. 7, our model trained without the inverse constraint produces less accurate pose transfer results. Moreover, adding the volume-preserving loss helps to maintain the character's local details such as the thickness of the arms.", + "bbox": [ + 496, + 424, + 890, + 664 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 679, + 617, + 694 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this paper, we present a model that deforms unrigged, stylized characters guided by a biped or quadruped avatar. Our model is trained with only easily accessible posed human or animal meshes, yet can be applied to unseen stylized characters in a zero-shot manner during inference. To this end, we draw key insights from classic mesh deformation method and develop a correspondence-aware shape understanding module, an implicit pose deformation module and a volume-based test-time training procedure. We carry out extensive experiments on both the biped and quadruped category and show that our method produces more realistic and accurate deformation compared to baselines learned with comparable or more supervision.", + "bbox": [ + 496, + 704, + 890, + 900 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8711", + "bbox": [ + 482, + 944, + 513, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Mixamo. http://www MIXamo.com/. Accessed on November 09th, 2022. 5, 6, 7", + "[2] Kfir Aberman, Peizhuo Li, Dani Lischinski, Olga Sorkine-Hornung, Daniel Cohen-Or, and Baoquan Chen. Skeleton-aware networks for deep motion retargeting. In ACM Transactions on Graphics (SIGGRAPH), 2020. 2, 3", + "[3] Noam Aigerman, Kunal Gupta, Vladimir G Kim, Siddhartha Chaudhuri, Jun Saito, and Thibault Groueix. Neural jacobian fields: Learning intrinsic mappings of arbitrary meshes. arXiv preprint arXiv:2205.02904, 2022. 2", + "[4] Mazen Al Borno, Ludovic Righetti, Michael J Black, Scott L Delp, Eugene Fiume, and Javier Romero. Robust physics-based motion retargeting with realistic body shapes. In Computer Graphics Forum. Wiley Online Library, 2018. 2, 3", + "[5] Andreas Aristidou and Joan Lasenby. FABRIK: A fast, iterative solver for the inverse kinematics problem. Graphical Models, 2011. 3", + "[6] Quentin Avril, Donya Ghafourzadeh, Srinivasan Ramachandran, Sahel Fallahdoust, Sarah Ribet, Olivier Dionne, Martin de Lasa, and Eric Paquette. Animation setup transfer for 3D characters. In Computer Graphics Forum, 2016. 2", + "[7] Ilya Baran and Jovan Popovic. Automatic rigging and animation of 3D characters. In ACM Transactions on Graphics (SIGGRAPH), 2007. 2, 3", + "[8] Ilya Baran, Daniel Vlasic, Eitan Grinspun, and Jovan Popovic. Semantic deformation transfer. In ACM Transactions on Graphics (ToG). 2009. 2", + "[9] Mirela Ben-Chen, Ofir Weber, and Craig Gotsman. Spatial deformation transfer. In Proceedings of the 2009 ACM SIGGRAPH/Eurographics Symposium on Computer Animation, 2009. 2", + "[10] Bharat Lal Bhatnagar, Cristian Sminchisescu, Christian Theobalt, and Gerard Pons-Moll. Combining implicit function learning and parametric models for 3D human reconstruction. In European Conference on Computer Vision (ECCV), 2020. 3", + "[11] Bharat Lal Bhatnagar, Garvita Tiwari, Christian Theobalt, and Gerard Pons-Moll. Multi-garment net: Learning to dress 3D people from images. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2, 6", + "[12] Federica Bogo, Javier Romero, Matthew Loper, and Michael J Black. FAUST: Dataset and evaluation for 3D mesh registration. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2014. 2", + "[13] Rohan Chabra, Jan E Lenssen, Eddy Ilg, Tanner Schmidt, Julian Straub, Steven Lovegrove, and Richard Newcombe. Deep local shapes: Learning local sdf priors for detailed 3D reconstruction. In European Conference on Computer Vision (ECCV), 2020. 3", + "[14] Haoyu Chen, Hao Tang, Henglin Shi, Wei Peng, Nicu Sebe, and Guoying Zhao. Intrinsic-extrinsic preserved gans for unsupervised 3D pose transfer. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2", + "[15] Ricky TQ Chen, Yulia Rubanova, Jesse Bettencourt, and David K Duvenaud. Neural ordinary differential equa" + ], + "bbox": [ + 78, + 114, + 467, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "tions. Advances in Neural Information Processing Systems (NeurIPS), 2018. 3", + "[16] Zhiqin Chen and Hao Zhang. Learning implicit fields for generative shape modeling. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 3", + "[17] An-Chieh Cheng, Xueting Li, Min Sun, Ming-Hsuan Yang, and Sifei Liu. Learning 3D dense correspondence via canonical point autoencoder. In Advances in Neural Information Processing Systems (NeurIPS), 2021. 4", + "[18] Julian Chibane, Thiemo Alldieck, and Gerard Pons-Moll. Implicit functions in feature space for 3D shape reconstruction and completion. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 3", + "[19] Kwang-Jin Choi and Hyeong-Seok Ko. Online motion retargeting. Comput. Animat. Virtual Worlds, 2000. 3", + "[20] Brian Delhaisse, Domingo Esteban, Leonel Rozo, and Darwin Caldwell. Transfer learning of shared latent spaces between robots with similar kinematic structure. In International Joint Conference on Neural Networks (IJCNN), 2017. 3", + "[21] Philipp Erler, Paul Guerrero, Stefan Ohrhallinger, Niloy J Mitra, and Michael Wimmer. Points2surf learning implicit surfaces from point clouds. In European Conference on Computer Vision (ECCV), 2020. 3", + "[22] Lin Gao, Jie Yang, Yi-Ling Qiao, Yu-Kun Lai, Paul L Rosin, Weiwei Xu, and Shihong Xia. Automatic unpaired shape deformation transfer. ACM Transactions on Graphics (ToG), 2018. 2, 5", + "[23] Kyle Genova, Forrester Cole, Daniel Vlasic, Aaron Sarna, William T Freeman, and Thomas Funkhouser. Learning shape templates with structured implicit functions. In IEEE International Conference on Computer Vision (ICCV), 2019. 3", + "[24] Michael Gleicher. Retargetting motion to new characters. In Proceedings of the 25th annual conference on Computer graphics and interactive techniques, 1998. 2, 3", + "[25] Thibault Groueix, Matthew Fisher, Vladimir G Kim, Bryan C Russell, and Mathieu Aubry. 3D-CODED: 3D correspondences by deep deformation. In European Conference on Computer Vision (ECCV), 2018. 5", + "[26] Rana Hanocka, Amir Hertz, Noa Fish, Raja Giryes, Shachar Fleishman, and Daniel Cohen-Or. MeshCNN: a network with an edge. ACM Transactions on Graphics (ToG), 2019. 8", + "[27] Hanyoung Jang, Byungjun Kwon, Moonwon Yu, Seong Uk Kim, and Jongmin Kim. A variational U-Net for motion retargeting. In Comput. Animat. Virtual Worlds, 2020. 3", + "[28] Boyan Jiang, Yinda Zhang, Xingkui Wei, Xiangyang Xue, and Yanwei Fu. Learning compositional representation for 4D captures with neural ode. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 3", + "[29] Chiyu Jiang, Avneesh Sud, Ameesh Makadia, Jingwei Huang, Matthias Nießner, Thomas Funkhouser, et al. Local implicit grid representations for 3D scenes. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 3" + ], + "bbox": [ + 503, + 93, + 890, + 898 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "8712", + "bbox": [ + 482, + 945, + 514, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[30] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. International Conference on Learning Representations (ICLR), 2015. 6", + "[31] Thomas N Kipf and Max Welling. Semi-supervised classification with graph convolutional networks. In International Conference on Learning Representations (ICLR), 2017. 8", + "[32] Muhammed Kocabas, Nikos Athanasiou, and Michael J Black. Vibe: Video inference for human body pose and shape estimation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2, 4", + "[33] Jehee Lee and Sung Yong Shin. A hierarchical approach to interactive motion editing for human-like figures. In Proceedings of the 26th annual conference on Computer graphics and interactive techniques, 1999. 3", + "[34] Jiahui Lei and Kostas Daniilidis. CaDeX: Learning canonical deformation coordinate space for dynamic surface representation via neural homeomorphism. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 3", + "[35] Peizhuo Li, Kfir Aberman, Rana Hanocka, Libin Liu, Olga Sorkine-Hornung, and Baoquan Chen. Learning skeletal articulations with neural blend shapes. In ACM Transactions on Graphics (SIGGRAPH), 2021. 2, 5, 6, 7", + "[36] Zhouyingcheng Liao, Jimei Yang, Jun Saito, Gerard Pons-Moll, and Yang Zhou. Skeleton-free pose transfer for stylized 3D characters. In European Conference on Computer Vision (ECCV), 2022. 3, 4, 5, 6, 7, 8", + "[37] Jongin Lim, Hyung Jin Chang, and Jin Young Choi. PMnet: Learning of disentangled pose and movement for unsupervised motion retargeting. In British Machine Vision Conference (BMVC), 2019. 3", + "[38] Feng Liu and Xiaoming Liu. Learning implicit functions for topology-varying dense 3D shape correspondence. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 4", + "[39] Lijuan Liu, Youyi Zheng, Di Tang, Yi Yuan, Changjie Fan, and Kun Zhou. Neuroskinning: Automatic skin binding for production characters with deep graph networks. ACM Transactions on Graphics (ToG), 2019. 3", + "[40] Matthew Loper, Naureen Mahmood, Javier Romero, Gerard Pons-Moll, and Michael J Black. SMPL: A skinned multiperson linear model. ACM Transactions on Graphics (ToG), 2015. 2, 5", + "[41] Naureen Mahmood, Nima Ghorbani, Nikolaus F. Troje, Gerard Pons-Moll, and Michael J. Black. AMASS: Archive of motion capture as surface shapes. In IEEE International Conference on Computer Vision (ICCV), 2019. 2, 4", + "[42] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3D reconstruction in function space. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 3", + "[43] Mateusz Michalkiewicz, Jhony K Pontes, Dominic Jack, Mahsa Baktashmotlagh, and Anders Eriksson. Deep level sets: Implicit surface representations for 3D shape inference. arXiv preprint arXiv:1901.06802, 2019. 3", + "[44] Marko Mihajlovic, Yan Zhang, Michael J Black, and Siyu Tang. LEAP: Learning articulated occupancy of people. In" + ], + "bbox": [ + 78, + 90, + 470, + 901 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 3", + "[45] Michael Niemeyer, Lars Mescheder, Michael Oechsle, and Andreas Geiger. Occupancy flow: 4D reconstruction by learning particle dynamics. In IEEE International Conference on Computer Vision (ICCV), 2019. 3", + "[46] Atsuhiro Noguchi, Umar Iqbal, Jonathan Tremblay, Tatsuya Harada, and Orazio Gallo. Watch it move: Unsupervised discovery of 3D joints for re-posing of articulated objects. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 3", + "[47] Pablo Palafox, Aljaž Božić, Justus Thies, Matthias Nießner, and Angela Dai. NPMs: Neural parametric models for 3D deformable shapes. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2, 3, 4", + "[48] Pablo Palafox, Nikolaos Sarafianos, Tony Tung, and Angela Dai. SPAMs: Structured implicit parametric models. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2, 3", + "[49] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. DeepSDF: Learning continuous signed distance functions for shape representation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 3", + "[50] Georgios Pavlakos, Vasileios Choutas, Nima Ghorbani, Timo Bolkart, Ahmed AA Osman, Dimitrios Tzionas, and Michael J Black. Expressive body capture: 3D hands, face, and body from a single image. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2, 5", + "[51] Georgios Pavlakos, Vasileios Choutas, Nima Ghorbani, Timo Bolkart, Ahmed A. A. Osman, Dimitrios Tzionas, and Michael J. Black. Expressive body capture: 3D hands, face, and body from a single image. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2, 4", + "[52] Martin Poirier and Eric Paquette. Rig retargeting for 3d animation. In Proceedings of the Graphics Interface 2009 Conference, 2009. 2, 3", + "[53] Davis Rempe, Tolga Birdal, Aaron Hertzmann, Jimei Yang, Srinath Sridhar, and Leonidas J Guibas. Humor: 3D human motion model for robust pose estimation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2, 4", + "[54] Nadine Ruegg, Silvia Zuffi, Konrad Schindler, and Michael J Black. BARC: Learning to regress 3D dog shape from images by exploiting breed information. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2, 6", + "[55] Robert W Sumner and Jovan Popovic. Deformation transfer for triangle meshes. ACM Transactions on Graphics (ToG), 2004. 2, 3, 5", + "[56] Robert W Sumner, Johannes Schmid, and Mark Pauly. Embedded deformation for shape manipulation. In ACM Transactions on Graphics (SIGGRAPH). 2007. 2, 3", + "[57] Yu Sun, Xiaolong Wang, Zhuang Liu, John Miller, Alexei Efros, and Moritz Hardt. Test-time training with self-supervision for generalization under distribution shifts. In International Conference on Machine Learning (ICML), 2020. 5" + ], + "bbox": [ + 501, + 92, + 890, + 898 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "8713", + "bbox": [ + 482, + 945, + 514, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[58] Ramana Sundararaman, Gautam Pai, and Maks Ovsjanikov. Implicit field supervision for robust non-rigid shape matching. In Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part III, pages 344-362. Springer, 2022. 3", + "[59] Seyoon Tak and Hyeong-Seok Ko. A physically-based motion retargeting filter. In ACM Transactions on Graphics (ToG), 2005. 3", + "[60] Ruben Villegas, Duygu Ceylan, Aaron Hertzmann, Jimei Yang, and Jun Saito. Contact-aware retargeting of skinned motion. In IEEE International Conference on Computer Vision (ICCV), 2021. 3", + "[61] Ruben Villegas, Jimei Yang, Duygu Ceylan, and Honglak Lee. Neural kinematic networks for unsupervised motion retargeting. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018. 3", + "[62] Jiashun Wang, Chao Wen, Yanwei Fu, Haitao Lin, Tianyun Zou, Xiangyang Xue, and Yinda Zhang. Neural pose transfer by spatially adaptive instance normalization. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2, 5, 6", + "[63] Zhan Xu, Yang Zhou, Evangelos Kalogerakis, Chris Landreth, and Karan Singh. RigNet: Neural rigging for articulated characters. In ACM Transactions on Graphics (SIGGRAPH), 2020. 3, 5, 6", + "[64] Zhan Xu, Yang Zhou, Evangelos Kalogerakis, and Karan Singh. Predicting animation skeletons for 3D articulated models via volumetric nets. In International Conference on 3D Vision, 2019. 3", + "[65] Jie Yang, Lin Gao, Yu-Kun Lai, Paul L Rosin, and Shihong Xia. Biharmonic deformation transfer with automatic key point selection. Graphical Models, 2018. 2", + "[66] Wang Yifan, Noam Aigerman, Vladimir G Kim, Siddhartha Chaudhuri, and Olga Sorkine-Hornung. Neural cages for detail-preserving 3D deformations. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2", + "[67] Keyang Zhou, Bharat Lal Bhatnagar, and Gerard Pons-Moll. Unsupervised shape and pose disentanglement for 3D meshes. In European Conference on Computer Vision (ECCV), 2020. 2", + "[68] Silvia Zuffi, Angjoo Kanazawa, David W Jacobs, and Michael J Black. 3D Menagerie: Modeling the 3D shape and pose of animals. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2017. 2, 6, 8" + ], + "bbox": [ + 78, + 90, + 468, + 712 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "8714", + "bbox": [ + 482, + 945, + 514, + 955 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/2023/Zero-Shot Pose Transfer for Unrigged Stylized 3D Characters/5ada3ef3-b974-4dfe-9b9c-f168fb79ad07_model.json b/2023/Zero-Shot Pose Transfer for Unrigged Stylized 3D Characters/5ada3ef3-b974-4dfe-9b9c-f168fb79ad07_model.json new file mode 100644 index 0000000000000000000000000000000000000000..0bd1b9f99d41ccfd415d849bfe1b624eb1cc80df --- /dev/null +++ b/2023/Zero-Shot Pose Transfer for Unrigged Stylized 3D Characters/5ada3ef3-b974-4dfe-9b9c-f168fb79ad07_model.json @@ -0,0 +1,2532 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.0, + 0.812, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.175, + 0.131, + 0.795, + 0.154 + ], + "angle": 0, + "content": "Zero-shot Pose Transfer for Unrigged Stylized 3D Characters" + }, + { + "type": "text", + "bbox": [ + 0.226, + 0.179, + 0.74, + 0.237 + ], + "angle": 0, + "content": "Jiashun Wang\\(^{1*}\\) Xueting Li\\(^{2}\\) Sifei Liu\\(^{2}\\) Shalini De Mello\\(^{2}\\) Orazio Gallo\\(^{2}\\) Xiaolong Wang\\(^{3}\\) Jan Kautz\\(^{2}\\) \n\\(^{1}\\)Carnegie Mellon University \\({}^{2}\\)NVIDIA \\({}^{3}\\)UC San Diego" + }, + { + "type": "image", + "bbox": [ + 0.08, + 0.244, + 0.898, + 0.522 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.533, + 0.893, + 0.564 + ], + "angle": 0, + "content": "Figure 1. Our algorithm transfers the pose of a reference avatar (source) to stylized characters. Unlike existing methods, at training time our approach needs only the mesh of the source avatar in rest and desired pose, and the mesh of the stylized character only in rest pose." + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.578, + 0.314, + 0.594 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.612, + 0.474, + 0.87 + ], + "angle": 0, + "content": "Transferring the pose of a reference avatar to stylized 3D characters of various shapes is a fundamental task in computer graphics. Existing methods either require the stylized characters to be rigged, or they use the stylized character in the desired pose as ground truth at training. We present a zero-shot approach that requires only the widely available deformed non-stylized avatars in training, and deforms stylized characters of significantly different shapes at inference. Classical methods achieve strong generalization by deforming the mesh at the triangle level, but this requires labelled correspondences. We leverage the power of local deformation, but without requiring explicit correspondence labels. We introduce a semi-supervised shape-understanding module to bypass the need for explicit correspondences at test time, and an implicit pose deformation module that deforms individual surface points to match the target pose. Furthermore, to encourage realistic and accurate deformation of" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.579, + 0.895, + 0.716 + ], + "angle": 0, + "content": "stylized characters, we introduce an efficient volume-based test-time training procedure. Because it does not need rigging, nor the deformed stylized character at training time, our model generalizes to categories with scarce annotation, such as stylized quadrupeds. Extensive experiments demonstrate the effectiveness of the proposed method compared to the state-of-the-art approaches trained with comparable or more supervision. Our project page is available at https://jiashunwang.github.io/ZPT/" + }, + { + "type": "title", + "bbox": [ + 0.501, + 0.752, + 0.633, + 0.768 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.78, + 0.894, + 0.903 + ], + "angle": 0, + "content": "Stylized 3D characters, such as those in Fig. 1, are commonly used in animation, movies, and video games. Deforming these characters to mimic natural human or animal poses has been a long-standing task in computer graphics. Different from the 3D models of natural humans and animals, stylized 3D characters are created by professional artists through imagination and exaggeration. As a result, each stylized character has a distinct skeleton, shape, mesh" + }, + { + "type": "page_footnote", + "bbox": [ + 0.096, + 0.887, + 0.407, + 0.901 + ], + "angle": 0, + "content": "*Work done during Jiashun Wang's internship at NVIDIA." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "8704" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.473, + 0.622 + ], + "angle": 0, + "content": "topology, and usually include various accessories, such as a cloak or wings (see Fig. 1). These variations hinder the process of matching the pose of a stylized 3D character to that of a reference avatar, generally making manual rigging a requirement. Unfortunately, rigging is a tedious process that requires manual effort to create the skeleton and skinning weights for each character. Even when provided with manually annotated rigs, transferring poses from a source avatar onto stylized characters is not trivial when the source and target skeletons differ. Automating this procedure is still an open research problem and is the focus of many recent works [2, 4, 24, 52]. Meanwhile, non-stylized 3D humans and animals have been well-studied by numerous prior works [35, 40, 54, 62, 68]. A few methods generously provide readily available annotated datasets [11, 12, 41, 68], or carefully designed parametric models [40, 51, 68]. By taking advantage of these datasets [12, 41], several learning-based methods [7, 14, 35, 62, 67] disentangle and transfer poses between human meshes using neural networks. However, these methods (referred to as \"part-level\" in the following) carry out pose transfer by either globally deforming the whole body mesh [14, 22, 47, 67] or by transforming body parts [35, 48], both of which lead to overfitting on the training human meshes and fail to generalize to stylized characters with significantly different body part shapes. Interestingly, classical mesh deformation methods [55, 56] (referred to as \"local\" in the following) can transfer poses between a pair of meshes with significant shape differences by computing and transferring per-triangle transformations through correspondence. Though these methods require manual correspondence annotation between the source and target meshes, they provide a key insight that by transforming individual triangles instead of body parts, the mesh deformation methods are more agnostic to a part's shape and can generalize to meshes with different shapes." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.629, + 0.473, + 0.903 + ], + "angle": 0, + "content": "We marry the benefits of learning-based methods [7, 14, 35, 62, 67] with the classic local deformation approach [55] and present a model for unrigged, stylized character deformation guided by a non-stylized biped or quadruped avatar. Notably, our model only requires easily accessible posed human or animal meshes for training and can be directly applied to deform 3D stylized characters with a significantly different shape at inference. To this end, we implicitly operationalize the key insight from the local deformation method [55] by modeling the shape and pose of a 3D character with a correspondence-aware shape understanding module and an implicit pose deformation module. The shape understanding module learns to predict the part segmentation label (i.e., the coarse-level correspondence) for each surface point, besides representing the shape of a 3D character as a latent shape code. The pose deformation module is conditioned on the shape code and deforms individual surface point guided by a target pose code sampled" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.152 + ], + "angle": 0, + "content": "from a prior pose latent space [50]. Furthermore, to encourage realistic deformation and generalize to rare poses, we propose a novel volume-based test-time training procedure that can be efficiently applied to unseen stylized characters." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.153, + 0.894, + 0.227 + ], + "angle": 0, + "content": "During inference, by mapping biped or quadruped poses from videos, in addition to meshes to the prior pose latent space using existing works [32, 51, 53], we can transfer poses from different modalities onto unrigged 3D stylized characters. Our main contributions are:" + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.228, + 0.891, + 0.272 + ], + "angle": 0, + "content": "- We propose a solution to a practical and challenging task - learning a model for stylized 3D character deformation with only posed human or animal meshes." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.273, + 0.892, + 0.348 + ], + "angle": 0, + "content": "- We develop a correspondence-aware shape understanding module, an implicit pose deformation module, and a volume-based test-time training procedure to generalize the proposed model to unseen stylized characters and arbitrary poses in a zero-shot manner." + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.349, + 0.892, + 0.424 + ], + "angle": 0, + "content": "- We carry out extensive experiments on both humans and quadrupeds to show that our method produces more visually pleasing and accurate deformations compared to baselines trained with comparable or more supervision." + }, + { + "type": "list", + "bbox": [ + 0.518, + 0.228, + 0.892, + 0.424 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.438, + 0.642, + 0.453 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.463, + 0.892, + 0.794 + ], + "angle": 0, + "content": "Deformation Transfer. Deformation transfer is a longstanding problem in the computer graphics community [3, 6, 8, 9, 55, 65]. Sumner et al. [55] apply an affine transformation to each triangle of the mesh to solve an optimization problem that matches the deformation of the source mesh while maintaining the shape of the target mesh. Ben-Chen et al. [9] enclose the source and target shapes with two cages and transfer the Jacobians of the source deformation to the target shape. However, these methods need tedious human efforts to annotate the correspondence between the source and target shapes. More recently, several deep learning methods are developed to solve the deformation transfer task. However, they either require manually providing the correspondence [66] or cannot generalize [14, 22, 67] to stylized characters with different shapes. Gao et al. [22] propose a VAE-GAN based method to leverage the cycle consistency between the source and target shapes. Nonetheless, it can only work on shapes used in training. Wang et al. [62] introduce conditional normalization used in style transfer for 3D deformation transfer. But the method is limited to clothed-humans and cannot handle the large shape variations of stylized characters." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.796, + 0.892, + 0.901 + ], + "angle": 0, + "content": "We argue that these learning-based methods cannot generalize to stylized characters because they rely on encoding their global information (e.g., body or parts), which is different from traditional works that focus on local deformation, e.g., the affine transformation applied to each triangle in [55]. Using a neural network to encode the global information easily leads to overfitting. For example, models" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "8705" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.471, + 0.198 + ], + "angle": 0, + "content": "trained on human meshes cannot generalize to a stylized humanoid character. At the same time, early works only focus on local information and cannot model global information such as correspondence between the source and target shapes, which is why they all need human effort to annotate the correspondence. Our method tries to learn the correspondence and deform locally at the same time." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.21, + 0.47, + 0.436 + ], + "angle": 0, + "content": "Skeleton-based Pose Transfer. Besides mesh deformation transfer, an alternative way to transfer pose is to utilize skeletons. Motion retargeting is also a common name used for transferring poses from one motion sequence to another. Gleicher et al. [24] propose a space-time constrained solver aiming to satisfy the kinematics-level constraints and to preserve the characters' original identity. Following works [5, 19, 33] try to solve inverse-kinematics or inverse rate control to achieve pose transfer. There are also dynamics-based methods [4, 59] that consider physics during the retargeting process. Recently, learning-based methods [20, 27, 37, 60, 61] train deep neural networks to predict the transformation of the skeleton. Aberman et al. [2] propose a pooling-based method to transfer poses between meshes with different skeletons." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.45, + 0.47, + 0.631 + ], + "angle": 0, + "content": "All these works highly rely on the skeleton for pose transfer. Other works try to estimate the rigging of the template shape [7, 39, 52, 63, 64] when a skeleton is not available. But if the prediction of the skinning weights fails, the retargeting fails as well. Liao et al. [36] propose a model that learns to predict the skinning weights and pose transfer jointly using ground truth skinning weights and paired motion data as supervision, which limits the generalization of this method to categories where annotations are more scarce compared to humans (e.g., quadrupeds). Instead, our method uses posed human or animal meshes for training and deforms stylized characters of different shapes at inference." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.645, + 0.471, + 0.901 + ], + "angle": 0, + "content": "Implicit 3D shape representation. Implicit 3D shape representations have shown great success in reconstructing static shapes [13,16,18,21,23,29,42,43,49] and deformable ones [10,28,34,44-48,58]. DeepSDF [49] proposes to use an MLP to predict the signed distance field (SDF) value of a query point in 3D space, where a shape code is jointly optimized in an auto-decoding manner. Occupancy flow [45] generalizes the Occupancy Networks [42] to learn a temporally and spatially continuous vector field with a NeuralODE [15]. Inspired by parametric models, NPMs [47] disentangles and represents the shape and pose of dynamic humans by learning an implicit shape and pose function, respectively. Different from these implicit shape representation works that focus on reconstructing static or deformable meshes, we further exploit the inherent continuity and locality of implicit functions to deform stylized characters to match a target pose in a zero-shot manner." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.09, + 0.591, + 0.105 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.115, + 0.892, + 0.403 + ], + "angle": 0, + "content": "We aim to transfer the pose of a biped or quadruped avatar to an unrigged, stylized 3D character. We tackle this problem by modeling the shape and pose of a 3D character using a correspondence-aware shape understanding module and an implicit pose deformation module, inspired by classical mesh deformation methods [55, 56]. The shape understanding module (Sec. 3.1, Fig. 2) predicts a latent shape code and part segmentation label of a 3D character in rest pose, while the pose deformation module (Sec. 3.2, Fig. 3) deforms the character in the rest pose given the predicted shape code and a target pose code. Moreover, to produce natural deformations and generalize to rare poses unseen at training, we introduce an efficient volume-based test-time training procedure (Sec 3.3) for unseen stylized characters. All three modules, trained only with posed, unclothed human meshes, and unrigged, stylized characters in a rest pose, are directly applied to unseen stylized characters at inference. We explain our method for humans, and describe how we extend it to quadrupeds in Sec. 4.6." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.411, + 0.888, + 0.427 + ], + "angle": 0, + "content": "3.1. Correspondence-Aware Shape Understanding" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.435, + 0.892, + 0.495 + ], + "angle": 0, + "content": "Given a 3D character in rest pose, we propose a shape understanding module to represent its shape information as a latent code, and to predict a body part segmentation label for each surface point." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.495, + 0.892, + 0.722 + ], + "angle": 0, + "content": "To learn a representative shape code, we employ an implicit auto-decoder [47, 49] that reconstructs the 3D character taking the shape code as input. During training, we jointly optimize the shape code of each training sample and the decoder. Given an unseen character (i.e., a stylized 3D character) during inference, we obtain its shape code by freezing the decoder and optimizing the shape code to reconstruct the given character. Specifically, as shown in Fig. 2, given the concatenation of a query point \\( x \\in \\mathbb{R}^3 \\) and the shape code \\( s \\in \\mathbb{R}^d \\), we first obtain an embedding \\( e \\in \\mathbb{R}^d \\) via an MLP denoted as \\( \\mathcal{F} \\). Conditioned on the embedding \\( e \\), the occupancy \\( \\hat{o}_x \\in \\mathbb{R} \\) of \\( x \\) is then predicted by another MLP denoted as \\( \\mathcal{O} \\). The occupancy indicates if the query point \\( x \\) is inside or outside the body surface and can be supervised by the ground truth occupancy as:" + }, + { + "type": "equation", + "bbox": [ + 0.51, + 0.732, + 0.892, + 0.763 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathcal {O}} = - \\sum_ {x} \\left(o _ {x} \\cdot \\log \\left(\\hat {o} _ {x}\\right) + \\left(1 - o _ {x}\\right) \\cdot \\log \\left(1 - \\hat {o} _ {x}\\right)\\right), \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.766, + 0.833, + 0.78 + ], + "angle": 0, + "content": "where \\(o_x\\) is the ground truth occupancy at point \\(x\\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.781, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Since our shape code eventually serves as a condition for the pose deformation module, we argue that it should also capture the part correspondence knowledge across different instances, in addition to the shape information (e.g., height, weight, and shape of each body part). This insight has been utilized by early local mesh deformation method [55], which explicitly utilizes correspondence to transfer local transformations between the source and target meshes. Our" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "8706" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.102, + 0.079, + 0.852, + 0.266 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.276, + 0.893, + 0.304 + ], + "angle": 0, + "content": "Figure 2. The shape understanding module (Sec. 3.1). Given a query point and a learnable shape code, we take MLPs to predict the occupancy, part segmentation label and further use an inverse MLP to regress the query point." + }, + { + "type": "image", + "bbox": [ + 0.082, + 0.314, + 0.468, + 0.434 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.441, + 0.471, + 0.484 + ], + "angle": 0, + "content": "Figure 3. The pose deformation module (Sec. 3.2). Given a query point on the surface, the learned shape code and a target pose code, we use an MLP to predict the offset of the query point." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.498, + 0.47, + 0.724 + ], + "angle": 0, + "content": "pose deformation process could also benefit from learning part correspondence. Take the various headgear, hats, and horns on the stylized characters's heads in Fig. 1 as an example. If these components can be \"understood\" as extensions of the character's heads by their shape codes, they will move smoothly with the character's heads during pose deformation. Thus, besides mesh reconstruction, we effectively task our shape understanding module with an additional objective: predicting part-level correspondence instantiated as the part segmentation label. Specifically, we propose to utilize an MLP \\(\\mathcal{P}\\) to additionally predict a part label \\(p_x = (p_x^1,\\dots,p_x^K)^T\\in \\mathbb{R}^K\\) for each surface point \\(x\\). Thanks to the densely annotated human mesh dataset, we can also supervise part segmentation learning with ground truth labels via:" + }, + { + "type": "equation", + "bbox": [ + 0.173, + 0.73, + 0.469, + 0.772 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathcal {P}} = \\sum_ {x} (- \\sum_ {k = 1} ^ {K} \\mathbb {1} _ {x} ^ {k} \\log \\left(p _ {x} ^ {k}\\right)), \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.78, + 0.469, + 0.811 + ], + "angle": 0, + "content": "where \\(K\\) is the total number of body parts, and \\(\\mathbb{1}_x^k = 1\\) if \\(x\\) belongs to the \\(k^{th}\\) part and \\(\\mathbb{1}_x^k = 0\\) otherwise." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.811, + 0.47, + 0.902 + ], + "angle": 0, + "content": "To prepare the shape understanding module for stylized characters during inference, besides unclothed human meshes, we also include unrigged 3D stylized characters in rest pose during training. These characters in rest pose are easily accessible and do not require any annotation. For shape reconstruction, Eq. 1 can be similarly applied to the" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.331, + 0.893, + 0.452 + ], + "angle": 0, + "content": "stylized characters. However, as there is no part segmentation annotation for stylized characters, we propose a self-supervised inverse constraint inspired by correspondence learning methods [17,38] to facilitate part segmentation prediction on these characters. Specifically, we reconstruct the query point's coordinates from the concatenation of the shape code \\( s \\) and the embedding \\( e \\) through an MLP \\( \\mathcal{Q} \\) and add an auxiliary objective as:" + }, + { + "type": "equation", + "bbox": [ + 0.617, + 0.463, + 0.892, + 0.481 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathcal {Q}} = \\left| \\left| \\mathcal {Q} (s, e) - x \\right| \\right| ^ {2}. \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.49, + 0.893, + 0.64 + ], + "angle": 0, + "content": "Intuitively, for stylized characters without part annotation, the model learned without this objective may converge to a trivial solution where similar embeddings are predicted for points with the same occupancy value, even when they are far away from each other, and belong to different body parts. Tab. 4 further quantitatively verifies the effectiveness of this constraint. Beyond facilitating shape understanding, the predicted part segmentation label is further utilized in the volume-based test-time training module which will be introduced in Sec. 3.3." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.651, + 0.803, + 0.667 + ], + "angle": 0, + "content": "3.2. Implicit Pose Deformation Module" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.674, + 0.892, + 0.749 + ], + "angle": 0, + "content": "Given the learned shape code and a target pose, the pose deformation module deforms each surface point of the character to match the target pose. In the following, we first describe how we represent a human pose and then introduce the implicit function used for pose deformation." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.75, + 0.893, + 0.901 + ], + "angle": 0, + "content": "Instead of learning a latent pose space from scratch as in [36, 47], we propose to represent a human pose by the corresponding pose code in the latent space of VPoser [51]. Our intuition is that VPoser is trained with an abundance of posed humans from the large-scale AMASS dataset [41]. This facilitates faster training and provides robustness to overfitting. Furthermore, human poses can be successfully estimated from different modalities (e.g., videos or meshes), and mapped to the latent space of VPoser by existing methods [32, 51, 53]. By taking advantage of these works, our" + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "8707" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.137 + ], + "angle": 0, + "content": "model can be applied to transfer poses from various modalities to an unrigged stylized character without any additional effort. A few examples can be found in the supplementary." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.137, + 0.47, + 0.258 + ], + "angle": 0, + "content": "To deform a character to match the given pose, we learn a neural implicit function \\(\\mathcal{M}\\) that takes the sampled pose code \\(m\\in \\mathbb{R}^{32}\\), the learned shape code, and a query point \\(x\\) around the character's surface as inputs and outputs the offset (denoted as \\(\\Delta \\hat{x}\\in \\mathbb{R}^3\\)) of \\(x\\) in 3D space. Given the densely annotated human mesh dataset, we directly use the ground truth offset \\(\\Delta x\\) as supervision. The training objective for our pose deformation module is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.187, + 0.269, + 0.469, + 0.3 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathcal {D}} = \\sum_ {x} \\left| \\left| \\Delta \\hat {x} - \\Delta x \\right| \\right| ^ {2}. \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.31, + 0.471, + 0.612 + ], + "angle": 0, + "content": "Essentially, our implicit pose deformation module is similar in spirit to early local mesh deformation methods [55] and has two key advantages compared to the part-level pose transfer methods [22, 36, 62]. First, our implicit pose deformation network is agnostic to mesh topology and resolution. Thus our model can be directly applied to unseen 3D stylized characters with significantly different resolutions and mesh topology compared to the training human meshes during inference. Second, stylized characters often include distinct body part shapes compared to humans. For example, the characters shown in Fig. 1 include big heads or various accessories. Previous part-level methods [36] that learn to predict a bone transformation and skinning weight for each body part usually fail on these unique body parts, since they are different from the corresponding human body parts used for training. In contrast, by learning to deform individual surface point, implicit functions are more agnostic to the overall shape of a body part and thus can generalize better to stylized characters with significantly different body part shapes. Fig. 4 and Fig. 6 show these advantages." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.621, + 0.37, + 0.637 + ], + "angle": 0, + "content": "3.3. Volume-based Test-time Training" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.645, + 0.469, + 0.795 + ], + "angle": 0, + "content": "The shape understanding and pose deformation modules discussed above are trained with only posed human meshes and unrigged 3D stylized characters in rest pose. When applied to unseen characters with significantly different shapes, we observe surface distortion introduced by the pose deformation module. Moreover, it is challenging for the module to fully capture the long tail of the pose distribution. To resolve these issues, we propose to apply test-time training [57] and fine-tune the pose deformation module on unseen stylized characters." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.796, + 0.469, + 0.901 + ], + "angle": 0, + "content": "To encourage natural pose deformation, we further propose a volume-preserving constraint during test-time training. Our key insight is that preserving the volume of each part in the rest pose mesh during pose deformation results in less distortion [35, 62]. However, it is non-trivial to compute the precise volume of each body part, which can have complex geometry. Instead, we propose to preserve the Eu" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.213 + ], + "angle": 0, + "content": "ccludean distance between pairs of vertices sampled from the surface of the mesh, as a proxy for constraining the volume. Specifically, given a mesh in rest pose, we randomly sample two points \\( x_{i}^{c} \\) and \\( x_{j}^{c} \\) on the surface within the same part \\( c \\) using the part segmentation prediction from the shape understanding module. We calculate the offset of these two points \\( \\Delta \\hat{x}_{i}^{c} \\) and \\( \\Delta \\hat{x}_{j}^{c} \\) using our pose deformation module and minimize the change in the distance between them by:" + }, + { + "type": "equation", + "bbox": [ + 0.499, + 0.223, + 0.894, + 0.269 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {v} = \\sum_ {c} \\sum_ {i} \\sum_ {j} \\left(\\left| \\left| x _ {i} ^ {c} - x _ {j} ^ {c} \\right| \\right| - \\left| \\left| \\left(x _ {i} ^ {c} + \\Delta \\hat {x} _ {i} ^ {c}\\right) - \\left(x _ {j} ^ {c} + \\Delta \\hat {x} _ {j} ^ {c}\\right) \\right| \\right|\\right) ^ {2}. \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.266, + 0.892, + 0.311 + ], + "angle": 0, + "content": "By sampling a large number of point pairs within a part and minimizing Eq. 5, we can approximately maintain the volume of each body part during pose deformation." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.311, + 0.892, + 0.462 + ], + "angle": 0, + "content": "Furthermore, in order to generalize the pose deformation module to long-tail poses that are rarely seen during training, we propose to utilize the source character in rest pose and its deformed shape as paired training data during test-time training. Specifically, we take the source character in rest pose, its target pose code, and its optimized shape code as inputs and we output the movement \\(\\Delta \\hat{x}^{dr}\\), where \\(x^{dr}\\) is a query point from the source character. We minimize the L2 distance between the predicted movement \\(\\Delta \\hat{x}^{dr}\\) and the ground truth movement \\(\\Delta x^{dr}\\)," + }, + { + "type": "equation", + "bbox": [ + 0.595, + 0.473, + 0.891, + 0.505 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {d r} = \\sum_ {x ^ {d r}} \\left| \\left| \\Delta \\hat {x} ^ {d r} - \\Delta x ^ {d r} \\right| \\right| ^ {2}. \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.508, + 0.892, + 0.599 + ], + "angle": 0, + "content": "Besides the volume-preserving constraint and the reconstruction of the source character, we also employ the edge loss \\(\\mathcal{L}_e\\) used in [25, 36, 62]. Overall, the objectives for the test-time training procedure are \\(\\mathcal{L}_{\\mathcal{T}} = \\lambda_v\\mathcal{L}_v + \\lambda_e\\mathcal{L}_e + \\lambda_{dr}\\mathcal{L}_{dr}\\), where \\(\\lambda_v, \\lambda_e\\), and \\(\\lambda_{dr}\\) are hyper-parameters balancing the loss weights." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.612, + 0.633, + 0.629 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.636, + 0.602, + 0.651 + ], + "angle": 0, + "content": "4.1. Datasets" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.66, + 0.892, + 0.871 + ], + "angle": 0, + "content": "To train the shape understanding module, we use 40 human meshes sampled from the SMPL [40] parametric model. We use both the occupancy and part segmentation label of these meshes as supervision (see Sec. 3.1). To generalize the shape understanding module to stylized characters, we further include 600 stylized characters from RigNet [63]. Note that we only use the rest pose mesh (i.e., occupancy label) of the characters in [63] for training. To train our pose deformation module, we construct paired training data by deforming each of the 40 SMPL characters discussed above with 5000 pose codes sampled from the VPoser's [50] latent space. In total, we collect 200,000 training pairs, with each pair including an unclothed human mesh in rest pose and the same human mesh in target pose." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.871, + 0.892, + 0.901 + ], + "angle": 0, + "content": "After training the shape understanding and pose deformation modules, we test them on the Mixamo [1] dataset," + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "8708" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.242 + ], + "angle": 0, + "content": "which includes challenging stylized characters, and the MGN [11] dataset, which includes clothed humans. The characters in both datasets have different shapes compared to the unclothed SMPL meshes we used for training, demonstrating the generalization ability of the proposed method. Following [36], we test on 19 stylized characters, with each deformed by 28 motion sequences from the Mixamo dataset. For the MGN dataset, we test on 16 clothed characters, with each deformed by 200 target poses. Both the testing characters and poses are unseen during training." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.243, + 0.471, + 0.531 + ], + "angle": 0, + "content": "For quadrupeds, since there is no dataset including large-scale paired stylized quadrupeds for quantitative evaluation, we split all characters from the SMAL [68] dataset and use the first 34 shapes (i.e., cats, dogs, and horses) for training. We further collect 81 stylized quadrupeds in rest pose from the RigNet [63] to improve generalization of the shape understanding module. Similarly to the human category, we use occupancy and part segmentation supervision for the SMAL shapes and only the occupancy supervision for RigNet meshes. To train the pose deformation module, we deform each of the 34 characters in SMAL by 2000 poses sampled from the latent space of BARC [54], a 3D reconstruction model trained for the dog category. We quantitatively evaluate our model on the hippo meshes from the SMAL dataset, which have larger shape variance compared to the cats, dogs, and horses used for training. We produce the testing data by deforming each hippo mesh with 500 unseen target poses from SMAL [68]. We show qualitative pose transfer on stylized quadrupeds in Fig. 1." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.541, + 0.295, + 0.557 + ], + "angle": 0, + "content": "4.2. Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.565, + 0.47, + 0.746 + ], + "angle": 0, + "content": "We use the ADAM [30] optimizer to train both the shape understanding and pose deformation modules. For the shape understanding module, we use a learning rate of \\( 1e - 4 \\) for both the decoder and shape code optimization, with a batch size of 64. Given a new character at inference time, we fix the decoder and only optimize the shape code for the new character with the same optimizer and learning rate. For the pose deformation module, we use a learning rate of \\( 3e - 4 \\) with a batch size of 128. For test-time training, we use a batch size of 1 and a learning rate of \\( 5e - 3 \\) with the ADAM optimizer. We set \\( \\lambda_v \\), \\( \\lambda_e \\), and \\( \\lambda_{dr} \\) (See Sec. 3.3) as 0.05, 0.01, and 1 respectively." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.757, + 0.407, + 0.773 + ], + "angle": 0, + "content": "4.3. Metrics and Baselines for Comparison" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.78, + 0.468, + 0.901 + ], + "angle": 0, + "content": "Metrics. We use Point-wise Mesh Euclidean Distance (PMD) [36, 62] to evaluate pose transfer error. The PMD metric reveals pose similarity of the predicted deformation compared to its ground truth. However, as shown in Fig. 4, PMD can not fully show the smoothness and realism of the generated results. Thus, we adopt an edge length score (ELS) metric to evaluate the character's smoothness after the deformation. Specifically, we compare each edge's" + }, + { + "type": "table", + "bbox": [ + 0.508, + 0.09, + 0.885, + 0.156 + ], + "angle": 0, + "content": "
DatasetMetricSPT*(full) [36]NBS [35]SPT [36]Ours
MGN [11]PMD ↓1.621.331.820.99
ELS ↑0.860.700.850.89
Mixamo [1]PMD ↓3.057.045.295.06
ELS ↑0.610.660.590.88
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.166, + 0.892, + 0.236 + ], + "angle": 0, + "content": "Table 1. Quantitative comparison on MGN and Mixamo. Our method achieves the lowest PMD with the highest ELS. We provide the performance of the SPT*(full) method, which uses more supervision than the other methods as a reference. Our method is even better or comparable to it." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.246, + 0.892, + 0.276 + ], + "angle": 0, + "content": "length in the deformed mesh with the corresponding edge's length in the ground truth mesh. We define the score as" + }, + { + "type": "equation", + "bbox": [ + 0.581, + 0.289, + 0.892, + 0.331 + ], + "angle": 0, + "content": "\\[\n\\frac {1}{| \\mathcal {E} |} \\sum_ {\\{i, j \\} \\sim \\mathcal {E}} 1 - \\left| \\frac {| | \\hat {V} _ {i} - \\hat {V} _ {j} | | _ {2}}{| | V _ {i} - V _ {j} | | _ {2}} - 1 \\right|, \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.336, + 0.892, + 0.412 + ], + "angle": 0, + "content": "where \\(\\mathcal{E}\\) indicates all edges of the mesh, \\(|\\mathcal{E}|\\) is the number of the edges in the mesh. \\(\\hat{V}_i\\) and \\(\\hat{V}_j\\) are the vertices in the deformed mesh. \\(V_{i}\\) and \\(V_{j}\\) are the vertices in the ground truth mesh. For all the evaluation metrics, we scale the template character to be 1 meter tall, following [36]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.413, + 0.892, + 0.595 + ], + "angle": 0, + "content": "Baselines. We compare our method with Neural Blend Shapes (NBS) [35] and Skeleton-free Pose Transfer (SPT) [36]. NBS is a rigging prediction method trained on the SMPL and MGN datasets, which include naked and clothed human meshes with ground truth rigging information. For SPT, we show the results of two versions, one is trained only on the AMASS dataset, named SPT, which has a comparable level of supervision to our method. We also test the SPT*(full) version, which is trained on the AMASS, RigNet and Mixamo datasets, using both stylized characters' skinning weights as supervision and paired stylized characters in rest pose and target pose." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.606, + 0.822, + 0.62 + ], + "angle": 0, + "content": "4.4. Human-like Character Pose Transfer" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.629, + 0.892, + 0.901 + ], + "angle": 0, + "content": "We report the PMD metric on the MGN and Mixamo datasets in Tab. 1. We also include the performance of SPT*(full) for reference. On the MGN dataset which includes clothed humans, our method which is trained with only unclothed humans achieve the best PMD score than all baseline methods, including baselines trained with more supervision (i.e., the NBS [35] learned with clothed humans and the SPT*(full) [36] learned with skinning weight and paired motion data). For the stylized characters, our method outperforms the SPT baseline learned with a comparable amount of supervision and gets competitive results with the NBS [35] and SPT*(full) baseline trained with more supervision. Furthermore, when testing on the more challenging, less human-like characters (e.g., a mouse with a big head in Fig. 1), the baselines produce noticeable artifacts and rough surfaces, which can be observed in the qualitative comparisons in Fig. 4. We provide the PMD value for each character in the supplementary." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "8709" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.171, + 0.072, + 0.245, + 0.155 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.171, + 0.158, + 0.244, + 0.234 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.173, + 0.241, + 0.232, + 0.311 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.179, + 0.319, + 0.226, + 0.332 + ], + "angle": 0, + "content": "Source" + }, + { + "type": "image", + "bbox": [ + 0.259, + 0.074, + 0.359, + 0.153 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.262, + 0.155, + 0.356, + 0.234 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.265, + 0.239, + 0.358, + 0.314 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.287, + 0.318, + 0.332, + 0.332 + ], + "angle": 0, + "content": "Target" + }, + { + "type": "image", + "bbox": [ + 0.37, + 0.074, + 0.473, + 0.233 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.389, + 0.239, + 0.45, + 0.314 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.386, + 0.318, + 0.452, + 0.333 + ], + "angle": 0, + "content": "NBS [35]" + }, + { + "type": "image", + "bbox": [ + 0.498, + 0.073, + 0.579, + 0.155 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.498, + 0.155, + 0.577, + 0.237 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.509, + 0.241, + 0.566, + 0.312 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.506, + 0.318, + 0.568, + 0.333 + ], + "angle": 0, + "content": "SPT [36]" + }, + { + "type": "image", + "bbox": [ + 0.614, + 0.073, + 0.67, + 0.154 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.614, + 0.155, + 0.693, + 0.235 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.62, + 0.239, + 0.679, + 0.314 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.631, + 0.318, + 0.666, + 0.332 + ], + "angle": 0, + "content": "Ours" + }, + { + "type": "image", + "bbox": [ + 0.721, + 0.073, + 0.799, + 0.155 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.723, + 0.155, + 0.796, + 0.235 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.729, + 0.239, + 0.794, + 0.313 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.747, + 0.318, + 0.773, + 0.332 + ], + "angle": 0, + "content": "GT" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.336, + 0.894, + 0.379 + ], + "angle": 0, + "content": "Figure 4. Qualitative comparison on Mixamo. The average PMD of these three results for NBS, SPT, and Ours are 8.16, 6.13, and 5.16 respectively and the average ELS for NBS, SPT, and Ours are 0.65, 0.78, and 0.93 respectively. Our method can successfully transfer the pose to challenging stylized characters (e.g., the mouse with a big head in the second row)." + }, + { + "type": "image", + "bbox": [ + 0.092, + 0.387, + 0.436, + 0.649 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.651, + 0.47, + 0.68 + ], + "angle": 0, + "content": "Figure 5. Part segmentation visualization. NBS makes wrong predictions for hair while SPT may mix the upper legs." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.701, + 0.47, + 0.776 + ], + "angle": 0, + "content": "We show the ELS score comparison of different methods on the MGN and Mixamo datasets in Tab. 1. For both clothed humans and stylized characters, our method can generate more realistic results which are consistent with the target mesh and achieves the best ELS score." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.78, + 0.471, + 0.903 + ], + "angle": 0, + "content": "We visually compare our method and the baseline methods in Fig. 4 on the Mixamo dataset. Although NBS is trained with a clothed-human dataset, when testing on the human-like characters, it still fails on parts that are separate from the body such as the hair and the pants. When using only naked human meshes as supervision, SPT cannot generalize to challenging human-like characters, producing rough mesh surface with spikes." + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.382, + 0.625, + 0.435 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.509, + 0.44, + 0.627, + 0.481 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.512, + 0.488, + 0.624, + 0.538 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.538, + 0.544, + 0.601, + 0.559 + ], + "angle": 0, + "content": "SPT [36]" + }, + { + "type": "image", + "bbox": [ + 0.635, + 0.383, + 0.743, + 0.429 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.634, + 0.439, + 0.75, + 0.481 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.634, + 0.488, + 0.745, + 0.537 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.677, + 0.545, + 0.712, + 0.557 + ], + "angle": 0, + "content": "Ours" + }, + { + "type": "image", + "bbox": [ + 0.752, + 0.383, + 0.858, + 0.43 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.755, + 0.439, + 0.873, + 0.482 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.755, + 0.488, + 0.866, + 0.536 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.805, + 0.544, + 0.83, + 0.557 + ], + "angle": 0, + "content": "GT" + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.565, + 0.894, + 0.608 + ], + "angle": 0, + "content": "Figure 6. Quadrupedal pose transfer visualization. Our method can achieve smooth and accurate pose transfer while SPT fails on the mouth and leg regions." + }, + { + "type": "table", + "bbox": [ + 0.56, + 0.622, + 0.835, + 0.653 + ], + "angle": 0, + "content": "
MetricNBS [35]SPT [36]Ours
Accuracy ↑67.8%75.6%86.9%
" + }, + { + "type": "table_caption", + "bbox": [ + 0.5, + 0.663, + 0.892, + 0.692 + ], + "angle": 0, + "content": "Table 2. Part prediction accuracy on Mixamo [1]. Our method achieves the best part segmentation accuracy." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.711, + 0.789, + 0.729 + ], + "angle": 0, + "content": "4.5. Part Understanding Comparison" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.735, + 0.894, + 0.903 + ], + "angle": 0, + "content": "As discussed in Sec. 3.1, part segmentation plays an important role in both shape understanding and pose deformation. Though NBS [35] and SPT [36] do not explicitly predict part segmentation label, they are both skinning weight-based methods and we can derive the part segmentation label from the predicted skinning weights. Specifically, by selecting the maximum weight of each vertex, we can convert the skinning weight prediction to part segmentation labels for the vertices. We compare our part prediction results with those derived from SPT and NBS. We report the part segmentation accuracy on the Mixamo datasets in Tab. 2" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "8710" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.091, + 0.074, + 0.442, + 0.315 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.317, + 0.471, + 0.373 + ], + "angle": 0, + "content": "Figure 7. Qualitative comparison for ablation study. Removing the constraint (eq. 1) in shape understanding leads to wrong pose deformation results. The volume preserving loss (eq. 5) helps to maintain the identity, e.g., the thickness of the arms in first row." + }, + { + "type": "table", + "bbox": [ + 0.102, + 0.384, + 0.443, + 0.415 + ], + "angle": 0, + "content": "
MetricSPT [36]OursMetricSPT [36]Ours
PMD ↓10.288.28ELS ↑0.280.86
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.421, + 0.47, + 0.451 + ], + "angle": 0, + "content": "Table 3. Comparison on Hippos from SMAL [68]. Our method achieves better pose transfer accuracy with more smooth results." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.459, + 0.473, + 0.732 + ], + "angle": 0, + "content": "and visualize the part segmentation results in Fig. 5. Even trained with only part segmentation supervision of human meshes, our method can successfully segment each part for the stylized characters. On the contrary, SPT uses graph convolution network [31] to predict the skinning weights. When training only with human meshes, it often fails to distinguish different parts. As shown in Fig. 5, it mixes up the right and left upper legs, and incorrectly classifies the shoulder as the head. Though NBS is trained with clothed humans, it always classifies human hair as the human body for characters from Mixamo. This is because that NBS uses the MeshCNN [26] as the shape encoder. As a result, it is sensitive to mesh topology and cannot generalize to meshes with disconnected parts (e.g., disconnected hair and head). Tab. 2 further quantitatively demonstrates that our method achieves the best part segmentation accuracy, demonstrating its ability to correctly interpret the shape and part information in stylized characters." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.742, + 0.427, + 0.759 + ], + "angle": 0, + "content": "4.6. Quadrupedal Pose Transfer Comparison" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.765, + 0.47, + 0.903 + ], + "angle": 0, + "content": "To further show the generalization ability of our method, we conduct experiments on quadrupeds. We report the PMD and ELS score of our method and the SPT [36] in Tab. 3. When testing on hippos with large shape gap from the training meshes, SPT has a hard time generalizing both in terms of pose transfer accuracy and natural deformation. While our method achieves both better qualitative and quantitative results. We visualize the qualitative comparisons in Fig. 6. SPT produces obvious artifacts on the hippo's mouth" + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.074, + 0.868, + 0.144 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.5, + 0.149, + 0.892, + 0.178 + ], + "angle": 0, + "content": "Figure 8. Part prediction on stylized quadrupeds. Our method successfully predicts the parts of unseen stylized quadrupeds." + }, + { + "type": "table", + "bbox": [ + 0.543, + 0.186, + 0.848, + 0.229 + ], + "angle": 0, + "content": "
MetricOurs w/o invOurs w/o volumeOurs
PMD ↓1.261.020.99
ELS ↑0.880.880.89
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.235, + 0.892, + 0.278 + ], + "angle": 0, + "content": "Table 4. Ablation study on inverse MLP and volume preserving loss. The inverse MLP and volume preserving loss helps to improve pose transfer accuracy and produce smooth deformation." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.286, + 0.892, + 0.392 + ], + "angle": 0, + "content": "and legs, while our method achieves accurate pose transfer and maintains the shape characteristics of the original character at the same time. We provide more results in the supplementary. We also show the part segmentation results on stylized characters by our method in Fig. 8. Even for unique parts such as the hats and antlers, our method correctly assigns them to the head part." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.401, + 0.653, + 0.417 + ], + "angle": 0, + "content": "4.7. Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.425, + 0.892, + 0.665 + ], + "angle": 0, + "content": "To evaluate the key components of our method, we conduct ablation studies on the MGN dataset by removing the inverse constraint (Eq. 3) in the shape understanding module and the volume-preserving loss (Eq. 5) used during the test-time training produce, we name them as \"ours w/o inv\" and \"ours w/o \\( v \\)\" respectively. We report the PMD and ELS metrics in Tab. 4. The model learned without the inverse constraint or volume-preserving loss has worse PMD and ELS score than our full model, indicating the contribution of these two objectives. We also provide qualitative results in Fig. 7. We use red boxes to point out the artifacts. As shown in Fig. 7, our model trained without the inverse constraint produces less accurate pose transfer results. Moreover, adding the volume-preserving loss helps to maintain the character's local details such as the thickness of the arms." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.68, + 0.619, + 0.695 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.705, + 0.892, + 0.901 + ], + "angle": 0, + "content": "In this paper, we present a model that deforms unrigged, stylized characters guided by a biped or quadruped avatar. Our model is trained with only easily accessible posed human or animal meshes, yet can be applied to unseen stylized characters in a zero-shot manner during inference. To this end, we draw key insights from classic mesh deformation method and develop a correspondence-aware shape understanding module, an implicit pose deformation module and a volume-based test-time training procedure. We carry out extensive experiments on both the biped and quadruped category and show that our method produces more realistic and accurate deformation compared to baselines learned with comparable or more supervision." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.514, + 0.957 + ], + "angle": 0, + "content": "8711" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.105 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.116, + 0.468, + 0.142 + ], + "angle": 0, + "content": "[1] Mixamo. http://www MIXamo.com/. Accessed on November 09th, 2022. 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.145, + 0.468, + 0.2 + ], + "angle": 0, + "content": "[2] Kfir Aberman, Peizhuo Li, Dani Lischinski, Olga Sorkine-Hornung, Daniel Cohen-Or, and Baoquan Chen. Skeleton-aware networks for deep motion retargeting. In ACM Transactions on Graphics (SIGGRAPH), 2020. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.203, + 0.468, + 0.257 + ], + "angle": 0, + "content": "[3] Noam Aigerman, Kunal Gupta, Vladimir G Kim, Siddhartha Chaudhuri, Jun Saito, and Thibault Groueix. Neural jacobian fields: Learning intrinsic mappings of arbitrary meshes. arXiv preprint arXiv:2205.02904, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.26, + 0.468, + 0.315 + ], + "angle": 0, + "content": "[4] Mazen Al Borno, Ludovic Righetti, Michael J Black, Scott L Delp, Eugene Fiume, and Javier Romero. Robust physics-based motion retargeting with realistic body shapes. In Computer Graphics Forum. Wiley Online Library, 2018. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.317, + 0.468, + 0.357 + ], + "angle": 0, + "content": "[5] Andreas Aristidou and Joan Lasenby. FABRIK: A fast, iterative solver for the inverse kinematics problem. Graphical Models, 2011. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.36, + 0.468, + 0.415 + ], + "angle": 0, + "content": "[6] Quentin Avril, Donya Ghafourzadeh, Srinivasan Ramachandran, Sahel Fallahdoust, Sarah Ribet, Olivier Dionne, Martin de Lasa, and Eric Paquette. Animation setup transfer for 3D characters. In Computer Graphics Forum, 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.417, + 0.468, + 0.457 + ], + "angle": 0, + "content": "[7] Ilya Baran and Jovan Popovic. Automatic rigging and animation of 3D characters. In ACM Transactions on Graphics (SIGGRAPH), 2007. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.46, + 0.468, + 0.5 + ], + "angle": 0, + "content": "[8] Ilya Baran, Daniel Vlasic, Eitan Grinspun, and Jovan Popovic. Semantic deformation transfer. In ACM Transactions on Graphics (ToG). 2009. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.503, + 0.468, + 0.557 + ], + "angle": 0, + "content": "[9] Mirela Ben-Chen, Ofir Weber, and Craig Gotsman. Spatial deformation transfer. In Proceedings of the 2009 ACM SIGGRAPH/Eurographics Symposium on Computer Animation, 2009. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.561, + 0.468, + 0.629 + ], + "angle": 0, + "content": "[10] Bharat Lal Bhatnagar, Cristian Sminchisescu, Christian Theobalt, and Gerard Pons-Moll. Combining implicit function learning and parametric models for 3D human reconstruction. In European Conference on Computer Vision (ECCV), 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.632, + 0.468, + 0.686 + ], + "angle": 0, + "content": "[11] Bharat Lal Bhatnagar, Garvita Tiwari, Christian Theobalt, and Gerard Pons-Moll. Multi-garment net: Learning to dress 3D people from images. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.688, + 0.468, + 0.743 + ], + "angle": 0, + "content": "[12] Federica Bogo, Javier Romero, Matthew Loper, and Michael J Black. FAUST: Dataset and evaluation for 3D mesh registration. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2014. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.746, + 0.468, + 0.813 + ], + "angle": 0, + "content": "[13] Rohan Chabra, Jan E Lenssen, Eddy Ilg, Tanner Schmidt, Julian Straub, Steven Lovegrove, and Richard Newcombe. Deep local shapes: Learning local sdf priors for detailed 3D reconstruction. In European Conference on Computer Vision (ECCV), 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.816, + 0.468, + 0.871 + ], + "angle": 0, + "content": "[14] Haoyu Chen, Hao Tang, Henglin Shi, Wei Peng, Nicu Sebe, and Guoying Zhao. Intrinsic-extrinsic preserved gans for unsupervised 3D pose transfer. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.873, + 0.468, + 0.901 + ], + "angle": 0, + "content": "[15] Ricky TQ Chen, Yulia Rubanova, Jesse Bettencourt, and David K Duvenaud. Neural ordinary differential equa" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.116, + 0.468, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.535, + 0.094, + 0.892, + 0.12 + ], + "angle": 0, + "content": "tions. Advances in Neural Information Processing Systems (NeurIPS), 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.124, + 0.892, + 0.164 + ], + "angle": 0, + "content": "[16] Zhiqin Chen and Hao Zhang. Learning implicit fields for generative shape modeling. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.167, + 0.892, + 0.222 + ], + "angle": 0, + "content": "[17] An-Chieh Cheng, Xueting Li, Min Sun, Ming-Hsuan Yang, and Sifei Liu. Learning 3D dense correspondence via canonical point autoencoder. In Advances in Neural Information Processing Systems (NeurIPS), 2021. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.225, + 0.892, + 0.279 + ], + "angle": 0, + "content": "[18] Julian Chibane, Thiemo Alldieck, and Gerard Pons-Moll. Implicit functions in feature space for 3D shape reconstruction and completion. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.282, + 0.892, + 0.31 + ], + "angle": 0, + "content": "[19] Kwang-Jin Choi and Hyeong-Seok Ko. Online motion retargeting. Comput. Animat. Virtual Worlds, 2000. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.312, + 0.892, + 0.379 + ], + "angle": 0, + "content": "[20] Brian Delhaisse, Domingo Esteban, Leonel Rozo, and Darwin Caldwell. Transfer learning of shared latent spaces between robots with similar kinematic structure. In International Joint Conference on Neural Networks (IJCNN), 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.384, + 0.892, + 0.438 + ], + "angle": 0, + "content": "[21] Philipp Erler, Paul Guerrero, Stefan Ohrhallinger, Niloy J Mitra, and Michael Wimmer. Points2surf learning implicit surfaces from point clouds. In European Conference on Computer Vision (ECCV), 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.442, + 0.892, + 0.495 + ], + "angle": 0, + "content": "[22] Lin Gao, Jie Yang, Yi-Ling Qiao, Yu-Kun Lai, Paul L Rosin, Weiwei Xu, and Shihong Xia. Automatic unpaired shape deformation transfer. ACM Transactions on Graphics (ToG), 2018. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.499, + 0.892, + 0.566 + ], + "angle": 0, + "content": "[23] Kyle Genova, Forrester Cole, Daniel Vlasic, Aaron Sarna, William T Freeman, and Thomas Funkhouser. Learning shape templates with structured implicit functions. In IEEE International Conference on Computer Vision (ICCV), 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.571, + 0.892, + 0.612 + ], + "angle": 0, + "content": "[24] Michael Gleicher. Retargetting motion to new characters. In Proceedings of the 25th annual conference on Computer graphics and interactive techniques, 1998. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.615, + 0.892, + 0.669 + ], + "angle": 0, + "content": "[25] Thibault Groueix, Matthew Fisher, Vladimir G Kim, Bryan C Russell, and Mathieu Aubry. 3D-CODED: 3D correspondences by deep deformation. In European Conference on Computer Vision (ECCV), 2018. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.673, + 0.892, + 0.726 + ], + "angle": 0, + "content": "[26] Rana Hanocka, Amir Hertz, Noa Fish, Raja Giryes, Shachar Fleishman, and Daniel Cohen-Or. MeshCNN: a network with an edge. ACM Transactions on Graphics (ToG), 2019. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.731, + 0.892, + 0.772 + ], + "angle": 0, + "content": "[27] Hanyoung Jang, Byungjun Kwon, Moonwon Yu, Seong Uk Kim, and Jongmin Kim. A variational U-Net for motion retargeting. In Comput. Animat. Virtual Worlds, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.774, + 0.892, + 0.829 + ], + "angle": 0, + "content": "[28] Boyan Jiang, Yinda Zhang, Xingkui Wei, Xiangyang Xue, and Yanwei Fu. Learning compositional representation for 4D captures with neural ode. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.832, + 0.892, + 0.899 + ], + "angle": 0, + "content": "[29] Chiyu Jiang, Avneesh Sud, Ameesh Makadia, Jingwei Huang, Matthias Nießner, Thomas Funkhouser, et al. Local implicit grid representations for 3D scenes. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 3" + }, + { + "type": "list", + "bbox": [ + 0.504, + 0.094, + 0.892, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.516, + 0.957 + ], + "angle": 0, + "content": "8712" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.134 + ], + "angle": 0, + "content": "[30] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. International Conference on Learning Representations (ICLR), 2015. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.134, + 0.471, + 0.176 + ], + "angle": 0, + "content": "[31] Thomas N Kipf and Max Welling. Semi-supervised classification with graph convolutional networks. In International Conference on Learning Representations (ICLR), 2017. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.177, + 0.471, + 0.232 + ], + "angle": 0, + "content": "[32] Muhammed Kocabas, Nikos Athanasiou, and Michael J Black. Vibe: Video inference for human body pose and shape estimation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.233, + 0.471, + 0.288 + ], + "angle": 0, + "content": "[33] Jehee Lee and Sung Yong Shin. A hierarchical approach to interactive motion editing for human-like figures. In Proceedings of the 26th annual conference on Computer graphics and interactive techniques, 1999. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.289, + 0.471, + 0.355 + ], + "angle": 0, + "content": "[34] Jiahui Lei and Kostas Daniilidis. CaDeX: Learning canonical deformation coordinate space for dynamic surface representation via neural homeomorphism. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.357, + 0.471, + 0.413 + ], + "angle": 0, + "content": "[35] Peizhuo Li, Kfir Aberman, Rana Hanocka, Libin Liu, Olga Sorkine-Hornung, and Baoquan Chen. Learning skeletal articulations with neural blend shapes. In ACM Transactions on Graphics (SIGGRAPH), 2021. 2, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.414, + 0.471, + 0.468 + ], + "angle": 0, + "content": "[36] Zhouyingcheng Liao, Jimei Yang, Jun Saito, Gerard Pons-Moll, and Yang Zhou. Skeleton-free pose transfer for stylized 3D characters. In European Conference on Computer Vision (ECCV), 2022. 3, 4, 5, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.469, + 0.471, + 0.524 + ], + "angle": 0, + "content": "[37] Jongin Lim, Hyung Jin Chang, and Jin Young Choi. PMnet: Learning of disentangled pose and movement for unsupervised motion retargeting. In British Machine Vision Conference (BMVC), 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.525, + 0.471, + 0.579 + ], + "angle": 0, + "content": "[38] Feng Liu and Xiaoming Liu. Learning implicit functions for topology-varying dense 3D shape correspondence. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.58, + 0.471, + 0.636 + ], + "angle": 0, + "content": "[39] Lijuan Liu, Youyi Zheng, Di Tang, Yi Yuan, Changjie Fan, and Kun Zhou. Neuroskinning: Automatic skin binding for production characters with deep graph networks. ACM Transactions on Graphics (ToG), 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.636, + 0.471, + 0.69 + ], + "angle": 0, + "content": "[40] Matthew Loper, Naureen Mahmood, Javier Romero, Gerard Pons-Moll, and Michael J Black. SMPL: A skinned multiperson linear model. ACM Transactions on Graphics (ToG), 2015. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.691, + 0.471, + 0.747 + ], + "angle": 0, + "content": "[41] Naureen Mahmood, Nima Ghorbani, Nikolaus F. Troje, Gerard Pons-Moll, and Michael J. Black. AMASS: Archive of motion capture as surface shapes. In IEEE International Conference on Computer Vision (ICCV), 2019. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.748, + 0.471, + 0.816 + ], + "angle": 0, + "content": "[42] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3D reconstruction in function space. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.817, + 0.471, + 0.872 + ], + "angle": 0, + "content": "[43] Mateusz Michalkiewicz, Jhony K Pontes, Dominic Jack, Mahsa Baktashmotlagh, and Anders Eriksson. Deep level sets: Implicit surface representations for 3D shape inference. arXiv preprint arXiv:1901.06802, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.873, + 0.471, + 0.902 + ], + "angle": 0, + "content": "[44] Marko Mihajlovic, Yan Zhang, Michael J Black, and Siyu Tang. LEAP: Learning articulated occupancy of people. In" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.471, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.535, + 0.093, + 0.892, + 0.12 + ], + "angle": 0, + "content": "IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.121, + 0.892, + 0.176 + ], + "angle": 0, + "content": "[45] Michael Niemeyer, Lars Mescheder, Michael Oechsle, and Andreas Geiger. Occupancy flow: 4D reconstruction by learning particle dynamics. In IEEE International Conference on Computer Vision (ICCV), 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.177, + 0.892, + 0.245 + ], + "angle": 0, + "content": "[46] Atsuhiro Noguchi, Umar Iqbal, Jonathan Tremblay, Tatsuya Harada, and Orazio Gallo. Watch it move: Unsupervised discovery of 3D joints for re-posing of articulated objects. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.246, + 0.892, + 0.301 + ], + "angle": 0, + "content": "[47] Pablo Palafox, Aljaž Božić, Justus Thies, Matthias Nießner, and Angela Dai. NPMs: Neural parametric models for 3D deformable shapes. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2, 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.302, + 0.892, + 0.357 + ], + "angle": 0, + "content": "[48] Pablo Palafox, Nikolaos Sarafianos, Tony Tung, and Angela Dai. SPAMs: Structured implicit parametric models. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.358, + 0.892, + 0.427 + ], + "angle": 0, + "content": "[49] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. DeepSDF: Learning continuous signed distance functions for shape representation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.428, + 0.892, + 0.497 + ], + "angle": 0, + "content": "[50] Georgios Pavlakos, Vasileios Choutas, Nima Ghorbani, Timo Bolkart, Ahmed AA Osman, Dimitrios Tzionas, and Michael J Black. Expressive body capture: 3D hands, face, and body from a single image. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.497, + 0.892, + 0.567 + ], + "angle": 0, + "content": "[51] Georgios Pavlakos, Vasileios Choutas, Nima Ghorbani, Timo Bolkart, Ahmed A. A. Osman, Dimitrios Tzionas, and Michael J. Black. Expressive body capture: 3D hands, face, and body from a single image. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.567, + 0.892, + 0.608 + ], + "angle": 0, + "content": "[52] Martin Poirier and Eric Paquette. Rig retargeting for 3d animation. In Proceedings of the Graphics Interface 2009 Conference, 2009. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.609, + 0.892, + 0.677 + ], + "angle": 0, + "content": "[53] Davis Rempe, Tolga Birdal, Aaron Hertzmann, Jimei Yang, Srinath Sridhar, and Leonidas J Guibas. Humor: 3D human motion model for robust pose estimation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.679, + 0.892, + 0.747 + ], + "angle": 0, + "content": "[54] Nadine Ruegg, Silvia Zuffi, Konrad Schindler, and Michael J Black. BARC: Learning to regress 3D dog shape from images by exploiting breed information. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.748, + 0.892, + 0.789 + ], + "angle": 0, + "content": "[55] Robert W Sumner and Jovan Popovic. Deformation transfer for triangle meshes. ACM Transactions on Graphics (ToG), 2004. 2, 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.79, + 0.892, + 0.831 + ], + "angle": 0, + "content": "[56] Robert W Sumner, Johannes Schmid, and Mark Pauly. Embedded deformation for shape manipulation. In ACM Transactions on Graphics (SIGGRAPH). 2007. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.832, + 0.892, + 0.899 + ], + "angle": 0, + "content": "[57] Yu Sun, Xiaolong Wang, Zhuang Liu, John Miller, Alexei Efros, and Moritz Hardt. Test-time training with self-supervision for generalization under distribution shifts. In International Conference on Machine Learning (ICML), 2020. 5" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.516, + 0.957 + ], + "angle": 0, + "content": "8713" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.161 + ], + "angle": 0, + "content": "[58] Ramana Sundararaman, Gautam Pai, and Maks Ovsjanikov. Implicit field supervision for robust non-rigid shape matching. In Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part III, pages 344-362. Springer, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.163, + 0.47, + 0.204 + ], + "angle": 0, + "content": "[59] Seyoon Tak and Hyeong-Seok Ko. A physically-based motion retargeting filter. In ACM Transactions on Graphics (ToG), 2005. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.207, + 0.47, + 0.261 + ], + "angle": 0, + "content": "[60] Ruben Villegas, Duygu Ceylan, Aaron Hertzmann, Jimei Yang, and Jun Saito. Contact-aware retargeting of skinned motion. In IEEE International Conference on Computer Vision (ICCV), 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.263, + 0.47, + 0.318 + ], + "angle": 0, + "content": "[61] Ruben Villegas, Jimei Yang, Duygu Ceylan, and Honglak Lee. Neural kinematic networks for unsupervised motion retargeting. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.32, + 0.47, + 0.387 + ], + "angle": 0, + "content": "[62] Jiashun Wang, Chao Wen, Yanwei Fu, Haitao Lin, Tianyun Zou, Xiangyang Xue, and Yinda Zhang. Neural pose transfer by spatially adaptive instance normalization. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.39, + 0.47, + 0.444 + ], + "angle": 0, + "content": "[63] Zhan Xu, Yang Zhou, Evangelos Kalogerakis, Chris Landreth, and Karan Singh. RigNet: Neural rigging for articulated characters. In ACM Transactions on Graphics (SIGGRAPH), 2020. 3, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.446, + 0.47, + 0.5 + ], + "angle": 0, + "content": "[64] Zhan Xu, Yang Zhou, Evangelos Kalogerakis, and Karan Singh. Predicting animation skeletons for 3D articulated models via volumetric nets. In International Conference on 3D Vision, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.503, + 0.47, + 0.544 + ], + "angle": 0, + "content": "[65] Jie Yang, Lin Gao, Yu-Kun Lai, Paul L Rosin, and Shihong Xia. Biharmonic deformation transfer with automatic key point selection. Graphical Models, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.546, + 0.47, + 0.6 + ], + "angle": 0, + "content": "[66] Wang Yifan, Noam Aigerman, Vladimir G Kim, Siddhartha Chaudhuri, and Olga Sorkine-Hornung. Neural cages for detail-preserving 3D deformations. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.602, + 0.47, + 0.657 + ], + "angle": 0, + "content": "[67] Keyang Zhou, Bharat Lal Bhatnagar, and Gerard Pons-Moll. Unsupervised shape and pose disentanglement for 3D meshes. In European Conference on Computer Vision (ECCV), 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.659, + 0.47, + 0.713 + ], + "angle": 0, + "content": "[68] Silvia Zuffi, Angjoo Kanazawa, David W Jacobs, and Michael J Black. 3D Menagerie: Modeling the 3D shape and pose of animals. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2017. 2, 6, 8" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.713 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.516, + 0.956 + ], + "angle": 0, + "content": "8714" + } + ] +] \ No newline at end of file diff --git a/2023/Zero-Shot Pose Transfer for Unrigged Stylized 3D Characters/5ada3ef3-b974-4dfe-9b9c-f168fb79ad07_origin.pdf b/2023/Zero-Shot Pose Transfer for Unrigged Stylized 3D Characters/5ada3ef3-b974-4dfe-9b9c-f168fb79ad07_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..acefda6ab24fc92a8c6c17c0bf61490c4134f41c --- /dev/null +++ b/2023/Zero-Shot Pose Transfer for Unrigged Stylized 3D Characters/5ada3ef3-b974-4dfe-9b9c-f168fb79ad07_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ed87cd7b286643348f0cd948cd3377cbb469ebd6d3460e70bea533dc32a3c8e3 +size 7851807 diff --git a/2023/Zero-Shot Pose Transfer for Unrigged Stylized 3D Characters/full.md b/2023/Zero-Shot Pose Transfer for Unrigged Stylized 3D Characters/full.md new file mode 100644 index 0000000000000000000000000000000000000000..b2414a4ac88750190a2f2eb29b78c59fd3ca10b4 --- /dev/null +++ b/2023/Zero-Shot Pose Transfer for Unrigged Stylized 3D Characters/full.md @@ -0,0 +1,345 @@ +# Zero-shot Pose Transfer for Unrigged Stylized 3D Characters + +Jiashun Wang $^{1*}$ Xueting Li $^{2}$ Sifei Liu $^{2}$ Shalini De Mello $^{2}$ Orazio Gallo $^{2}$ Xiaolong Wang $^{3}$ Jan Kautz $^{2}$ $^{1}$ Carnegie Mellon University ${}^{2}$ NVIDIA ${}^{3}$ UC San Diego + +![](images/b8b5358168abb25f99559384141748e8150d018ef2d2eebe1ebab6a6c5eb3eae.jpg) +Figure 1. Our algorithm transfers the pose of a reference avatar (source) to stylized characters. Unlike existing methods, at training time our approach needs only the mesh of the source avatar in rest and desired pose, and the mesh of the stylized character only in rest pose. + +# Abstract + +Transferring the pose of a reference avatar to stylized 3D characters of various shapes is a fundamental task in computer graphics. Existing methods either require the stylized characters to be rigged, or they use the stylized character in the desired pose as ground truth at training. We present a zero-shot approach that requires only the widely available deformed non-stylized avatars in training, and deforms stylized characters of significantly different shapes at inference. Classical methods achieve strong generalization by deforming the mesh at the triangle level, but this requires labelled correspondences. We leverage the power of local deformation, but without requiring explicit correspondence labels. We introduce a semi-supervised shape-understanding module to bypass the need for explicit correspondences at test time, and an implicit pose deformation module that deforms individual surface points to match the target pose. Furthermore, to encourage realistic and accurate deformation of + +stylized characters, we introduce an efficient volume-based test-time training procedure. Because it does not need rigging, nor the deformed stylized character at training time, our model generalizes to categories with scarce annotation, such as stylized quadrupeds. Extensive experiments demonstrate the effectiveness of the proposed method compared to the state-of-the-art approaches trained with comparable or more supervision. Our project page is available at https://jiashunwang.github.io/ZPT/ + +# 1. Introduction + +Stylized 3D characters, such as those in Fig. 1, are commonly used in animation, movies, and video games. Deforming these characters to mimic natural human or animal poses has been a long-standing task in computer graphics. Different from the 3D models of natural humans and animals, stylized 3D characters are created by professional artists through imagination and exaggeration. As a result, each stylized character has a distinct skeleton, shape, mesh + +topology, and usually include various accessories, such as a cloak or wings (see Fig. 1). These variations hinder the process of matching the pose of a stylized 3D character to that of a reference avatar, generally making manual rigging a requirement. Unfortunately, rigging is a tedious process that requires manual effort to create the skeleton and skinning weights for each character. Even when provided with manually annotated rigs, transferring poses from a source avatar onto stylized characters is not trivial when the source and target skeletons differ. Automating this procedure is still an open research problem and is the focus of many recent works [2, 4, 24, 52]. Meanwhile, non-stylized 3D humans and animals have been well-studied by numerous prior works [35, 40, 54, 62, 68]. A few methods generously provide readily available annotated datasets [11, 12, 41, 68], or carefully designed parametric models [40, 51, 68]. By taking advantage of these datasets [12, 41], several learning-based methods [7, 14, 35, 62, 67] disentangle and transfer poses between human meshes using neural networks. However, these methods (referred to as "part-level" in the following) carry out pose transfer by either globally deforming the whole body mesh [14, 22, 47, 67] or by transforming body parts [35, 48], both of which lead to overfitting on the training human meshes and fail to generalize to stylized characters with significantly different body part shapes. Interestingly, classical mesh deformation methods [55, 56] (referred to as "local" in the following) can transfer poses between a pair of meshes with significant shape differences by computing and transferring per-triangle transformations through correspondence. Though these methods require manual correspondence annotation between the source and target meshes, they provide a key insight that by transforming individual triangles instead of body parts, the mesh deformation methods are more agnostic to a part's shape and can generalize to meshes with different shapes. + +We marry the benefits of learning-based methods [7, 14, 35, 62, 67] with the classic local deformation approach [55] and present a model for unrigged, stylized character deformation guided by a non-stylized biped or quadruped avatar. Notably, our model only requires easily accessible posed human or animal meshes for training and can be directly applied to deform 3D stylized characters with a significantly different shape at inference. To this end, we implicitly operationalize the key insight from the local deformation method [55] by modeling the shape and pose of a 3D character with a correspondence-aware shape understanding module and an implicit pose deformation module. The shape understanding module learns to predict the part segmentation label (i.e., the coarse-level correspondence) for each surface point, besides representing the shape of a 3D character as a latent shape code. The pose deformation module is conditioned on the shape code and deforms individual surface point guided by a target pose code sampled + +from a prior pose latent space [50]. Furthermore, to encourage realistic deformation and generalize to rare poses, we propose a novel volume-based test-time training procedure that can be efficiently applied to unseen stylized characters. + +During inference, by mapping biped or quadruped poses from videos, in addition to meshes to the prior pose latent space using existing works [32, 51, 53], we can transfer poses from different modalities onto unrigged 3D stylized characters. Our main contributions are: + +- We propose a solution to a practical and challenging task - learning a model for stylized 3D character deformation with only posed human or animal meshes. +- We develop a correspondence-aware shape understanding module, an implicit pose deformation module, and a volume-based test-time training procedure to generalize the proposed model to unseen stylized characters and arbitrary poses in a zero-shot manner. +- We carry out extensive experiments on both humans and quadrupeds to show that our method produces more visually pleasing and accurate deformations compared to baselines trained with comparable or more supervision. + +# 2. Related Work + +Deformation Transfer. Deformation transfer is a longstanding problem in the computer graphics community [3, 6, 8, 9, 55, 65]. Sumner et al. [55] apply an affine transformation to each triangle of the mesh to solve an optimization problem that matches the deformation of the source mesh while maintaining the shape of the target mesh. Ben-Chen et al. [9] enclose the source and target shapes with two cages and transfer the Jacobians of the source deformation to the target shape. However, these methods need tedious human efforts to annotate the correspondence between the source and target shapes. More recently, several deep learning methods are developed to solve the deformation transfer task. However, they either require manually providing the correspondence [66] or cannot generalize [14, 22, 67] to stylized characters with different shapes. Gao et al. [22] propose a VAE-GAN based method to leverage the cycle consistency between the source and target shapes. Nonetheless, it can only work on shapes used in training. Wang et al. [62] introduce conditional normalization used in style transfer for 3D deformation transfer. But the method is limited to clothed-humans and cannot handle the large shape variations of stylized characters. + +We argue that these learning-based methods cannot generalize to stylized characters because they rely on encoding their global information (e.g., body or parts), which is different from traditional works that focus on local deformation, e.g., the affine transformation applied to each triangle in [55]. Using a neural network to encode the global information easily leads to overfitting. For example, models + +trained on human meshes cannot generalize to a stylized humanoid character. At the same time, early works only focus on local information and cannot model global information such as correspondence between the source and target shapes, which is why they all need human effort to annotate the correspondence. Our method tries to learn the correspondence and deform locally at the same time. + +Skeleton-based Pose Transfer. Besides mesh deformation transfer, an alternative way to transfer pose is to utilize skeletons. Motion retargeting is also a common name used for transferring poses from one motion sequence to another. Gleicher et al. [24] propose a space-time constrained solver aiming to satisfy the kinematics-level constraints and to preserve the characters' original identity. Following works [5, 19, 33] try to solve inverse-kinematics or inverse rate control to achieve pose transfer. There are also dynamics-based methods [4, 59] that consider physics during the retargeting process. Recently, learning-based methods [20, 27, 37, 60, 61] train deep neural networks to predict the transformation of the skeleton. Aberman et al. [2] propose a pooling-based method to transfer poses between meshes with different skeletons. + +All these works highly rely on the skeleton for pose transfer. Other works try to estimate the rigging of the template shape [7, 39, 52, 63, 64] when a skeleton is not available. But if the prediction of the skinning weights fails, the retargeting fails as well. Liao et al. [36] propose a model that learns to predict the skinning weights and pose transfer jointly using ground truth skinning weights and paired motion data as supervision, which limits the generalization of this method to categories where annotations are more scarce compared to humans (e.g., quadrupeds). Instead, our method uses posed human or animal meshes for training and deforms stylized characters of different shapes at inference. + +Implicit 3D shape representation. Implicit 3D shape representations have shown great success in reconstructing static shapes [13,16,18,21,23,29,42,43,49] and deformable ones [10,28,34,44-48,58]. DeepSDF [49] proposes to use an MLP to predict the signed distance field (SDF) value of a query point in 3D space, where a shape code is jointly optimized in an auto-decoding manner. Occupancy flow [45] generalizes the Occupancy Networks [42] to learn a temporally and spatially continuous vector field with a NeuralODE [15]. Inspired by parametric models, NPMs [47] disentangles and represents the shape and pose of dynamic humans by learning an implicit shape and pose function, respectively. Different from these implicit shape representation works that focus on reconstructing static or deformable meshes, we further exploit the inherent continuity and locality of implicit functions to deform stylized characters to match a target pose in a zero-shot manner. + +# 3. Method + +We aim to transfer the pose of a biped or quadruped avatar to an unrigged, stylized 3D character. We tackle this problem by modeling the shape and pose of a 3D character using a correspondence-aware shape understanding module and an implicit pose deformation module, inspired by classical mesh deformation methods [55, 56]. The shape understanding module (Sec. 3.1, Fig. 2) predicts a latent shape code and part segmentation label of a 3D character in rest pose, while the pose deformation module (Sec. 3.2, Fig. 3) deforms the character in the rest pose given the predicted shape code and a target pose code. Moreover, to produce natural deformations and generalize to rare poses unseen at training, we introduce an efficient volume-based test-time training procedure (Sec 3.3) for unseen stylized characters. All three modules, trained only with posed, unclothed human meshes, and unrigged, stylized characters in a rest pose, are directly applied to unseen stylized characters at inference. We explain our method for humans, and describe how we extend it to quadrupeds in Sec. 4.6. + +# 3.1. Correspondence-Aware Shape Understanding + +Given a 3D character in rest pose, we propose a shape understanding module to represent its shape information as a latent code, and to predict a body part segmentation label for each surface point. + +To learn a representative shape code, we employ an implicit auto-decoder [47, 49] that reconstructs the 3D character taking the shape code as input. During training, we jointly optimize the shape code of each training sample and the decoder. Given an unseen character (i.e., a stylized 3D character) during inference, we obtain its shape code by freezing the decoder and optimizing the shape code to reconstruct the given character. Specifically, as shown in Fig. 2, given the concatenation of a query point $x \in \mathbb{R}^3$ and the shape code $s \in \mathbb{R}^d$ , we first obtain an embedding $e \in \mathbb{R}^d$ via an MLP denoted as $\mathcal{F}$ . Conditioned on the embedding $e$ , the occupancy $\hat{o}_x \in \mathbb{R}$ of $x$ is then predicted by another MLP denoted as $\mathcal{O}$ . The occupancy indicates if the query point $x$ is inside or outside the body surface and can be supervised by the ground truth occupancy as: + +$$ +\mathcal {L} _ {\mathcal {O}} = - \sum_ {x} \left(o _ {x} \cdot \log \left(\hat {o} _ {x}\right) + \left(1 - o _ {x}\right) \cdot \log \left(1 - \hat {o} _ {x}\right)\right), \tag {1} +$$ + +where $o_x$ is the ground truth occupancy at point $x$ . + +Since our shape code eventually serves as a condition for the pose deformation module, we argue that it should also capture the part correspondence knowledge across different instances, in addition to the shape information (e.g., height, weight, and shape of each body part). This insight has been utilized by early local mesh deformation method [55], which explicitly utilizes correspondence to transfer local transformations between the source and target meshes. Our + +![](images/84b1ba766b54c9ef332c96b1d8eabc7c5bc78378df1476b6d69cda375015e6e6.jpg) +Figure 2. The shape understanding module (Sec. 3.1). Given a query point and a learnable shape code, we take MLPs to predict the occupancy, part segmentation label and further use an inverse MLP to regress the query point. + +![](images/9368775deb240878960eedeec753f6ad31bb00e7b841c77c983e7bcc2ec94a62.jpg) +Figure 3. The pose deformation module (Sec. 3.2). Given a query point on the surface, the learned shape code and a target pose code, we use an MLP to predict the offset of the query point. + +pose deformation process could also benefit from learning part correspondence. Take the various headgear, hats, and horns on the stylized characters's heads in Fig. 1 as an example. If these components can be "understood" as extensions of the character's heads by their shape codes, they will move smoothly with the character's heads during pose deformation. Thus, besides mesh reconstruction, we effectively task our shape understanding module with an additional objective: predicting part-level correspondence instantiated as the part segmentation label. Specifically, we propose to utilize an MLP $\mathcal{P}$ to additionally predict a part label $p_x = (p_x^1,\dots,p_x^K)^T\in \mathbb{R}^K$ for each surface point $x$ . Thanks to the densely annotated human mesh dataset, we can also supervise part segmentation learning with ground truth labels via: + +$$ +\mathcal {L} _ {\mathcal {P}} = \sum_ {x} (- \sum_ {k = 1} ^ {K} \mathbb {1} _ {x} ^ {k} \log \left(p _ {x} ^ {k}\right)), \tag {2} +$$ + +where $K$ is the total number of body parts, and $\mathbb{1}_x^k = 1$ if $x$ belongs to the $k^{th}$ part and $\mathbb{1}_x^k = 0$ otherwise. + +To prepare the shape understanding module for stylized characters during inference, besides unclothed human meshes, we also include unrigged 3D stylized characters in rest pose during training. These characters in rest pose are easily accessible and do not require any annotation. For shape reconstruction, Eq. 1 can be similarly applied to the + +stylized characters. However, as there is no part segmentation annotation for stylized characters, we propose a self-supervised inverse constraint inspired by correspondence learning methods [17,38] to facilitate part segmentation prediction on these characters. Specifically, we reconstruct the query point's coordinates from the concatenation of the shape code $s$ and the embedding $e$ through an MLP $\mathcal{Q}$ and add an auxiliary objective as: + +$$ +\mathcal {L} _ {\mathcal {Q}} = \left| \left| \mathcal {Q} (s, e) - x \right| \right| ^ {2}. \tag {3} +$$ + +Intuitively, for stylized characters without part annotation, the model learned without this objective may converge to a trivial solution where similar embeddings are predicted for points with the same occupancy value, even when they are far away from each other, and belong to different body parts. Tab. 4 further quantitatively verifies the effectiveness of this constraint. Beyond facilitating shape understanding, the predicted part segmentation label is further utilized in the volume-based test-time training module which will be introduced in Sec. 3.3. + +# 3.2. Implicit Pose Deformation Module + +Given the learned shape code and a target pose, the pose deformation module deforms each surface point of the character to match the target pose. In the following, we first describe how we represent a human pose and then introduce the implicit function used for pose deformation. + +Instead of learning a latent pose space from scratch as in [36, 47], we propose to represent a human pose by the corresponding pose code in the latent space of VPoser [51]. Our intuition is that VPoser is trained with an abundance of posed humans from the large-scale AMASS dataset [41]. This facilitates faster training and provides robustness to overfitting. Furthermore, human poses can be successfully estimated from different modalities (e.g., videos or meshes), and mapped to the latent space of VPoser by existing methods [32, 51, 53]. By taking advantage of these works, our + +model can be applied to transfer poses from various modalities to an unrigged stylized character without any additional effort. A few examples can be found in the supplementary. + +To deform a character to match the given pose, we learn a neural implicit function $\mathcal{M}$ that takes the sampled pose code $m\in \mathbb{R}^{32}$ , the learned shape code, and a query point $x$ around the character's surface as inputs and outputs the offset (denoted as $\Delta \hat{x}\in \mathbb{R}^3$ ) of $x$ in 3D space. Given the densely annotated human mesh dataset, we directly use the ground truth offset $\Delta x$ as supervision. The training objective for our pose deformation module is defined as: + +$$ +\mathcal {L} _ {\mathcal {D}} = \sum_ {x} \left| \left| \Delta \hat {x} - \Delta x \right| \right| ^ {2}. \tag {4} +$$ + +Essentially, our implicit pose deformation module is similar in spirit to early local mesh deformation methods [55] and has two key advantages compared to the part-level pose transfer methods [22, 36, 62]. First, our implicit pose deformation network is agnostic to mesh topology and resolution. Thus our model can be directly applied to unseen 3D stylized characters with significantly different resolutions and mesh topology compared to the training human meshes during inference. Second, stylized characters often include distinct body part shapes compared to humans. For example, the characters shown in Fig. 1 include big heads or various accessories. Previous part-level methods [36] that learn to predict a bone transformation and skinning weight for each body part usually fail on these unique body parts, since they are different from the corresponding human body parts used for training. In contrast, by learning to deform individual surface point, implicit functions are more agnostic to the overall shape of a body part and thus can generalize better to stylized characters with significantly different body part shapes. Fig. 4 and Fig. 6 show these advantages. + +# 3.3. Volume-based Test-time Training + +The shape understanding and pose deformation modules discussed above are trained with only posed human meshes and unrigged 3D stylized characters in rest pose. When applied to unseen characters with significantly different shapes, we observe surface distortion introduced by the pose deformation module. Moreover, it is challenging for the module to fully capture the long tail of the pose distribution. To resolve these issues, we propose to apply test-time training [57] and fine-tune the pose deformation module on unseen stylized characters. + +To encourage natural pose deformation, we further propose a volume-preserving constraint during test-time training. Our key insight is that preserving the volume of each part in the rest pose mesh during pose deformation results in less distortion [35, 62]. However, it is non-trivial to compute the precise volume of each body part, which can have complex geometry. Instead, we propose to preserve the Eu + +ccludean distance between pairs of vertices sampled from the surface of the mesh, as a proxy for constraining the volume. Specifically, given a mesh in rest pose, we randomly sample two points $x_{i}^{c}$ and $x_{j}^{c}$ on the surface within the same part $c$ using the part segmentation prediction from the shape understanding module. We calculate the offset of these two points $\Delta \hat{x}_{i}^{c}$ and $\Delta \hat{x}_{j}^{c}$ using our pose deformation module and minimize the change in the distance between them by: + +$$ +\mathcal {L} _ {v} = \sum_ {c} \sum_ {i} \sum_ {j} \left(\left| \left| x _ {i} ^ {c} - x _ {j} ^ {c} \right| \right| - \left| \left| \left(x _ {i} ^ {c} + \Delta \hat {x} _ {i} ^ {c}\right) - \left(x _ {j} ^ {c} + \Delta \hat {x} _ {j} ^ {c}\right) \right| \right|\right) ^ {2}. \tag {5} +$$ + +By sampling a large number of point pairs within a part and minimizing Eq. 5, we can approximately maintain the volume of each body part during pose deformation. + +Furthermore, in order to generalize the pose deformation module to long-tail poses that are rarely seen during training, we propose to utilize the source character in rest pose and its deformed shape as paired training data during test-time training. Specifically, we take the source character in rest pose, its target pose code, and its optimized shape code as inputs and we output the movement $\Delta \hat{x}^{dr}$ , where $x^{dr}$ is a query point from the source character. We minimize the L2 distance between the predicted movement $\Delta \hat{x}^{dr}$ and the ground truth movement $\Delta x^{dr}$ , + +$$ +\mathcal {L} _ {d r} = \sum_ {x ^ {d r}} \left| \left| \Delta \hat {x} ^ {d r} - \Delta x ^ {d r} \right| \right| ^ {2}. \tag {6} +$$ + +Besides the volume-preserving constraint and the reconstruction of the source character, we also employ the edge loss $\mathcal{L}_e$ used in [25, 36, 62]. Overall, the objectives for the test-time training procedure are $\mathcal{L}_{\mathcal{T}} = \lambda_v\mathcal{L}_v + \lambda_e\mathcal{L}_e + \lambda_{dr}\mathcal{L}_{dr}$ , where $\lambda_v, \lambda_e$ , and $\lambda_{dr}$ are hyper-parameters balancing the loss weights. + +# 4. Experiments + +# 4.1. Datasets + +To train the shape understanding module, we use 40 human meshes sampled from the SMPL [40] parametric model. We use both the occupancy and part segmentation label of these meshes as supervision (see Sec. 3.1). To generalize the shape understanding module to stylized characters, we further include 600 stylized characters from RigNet [63]. Note that we only use the rest pose mesh (i.e., occupancy label) of the characters in [63] for training. To train our pose deformation module, we construct paired training data by deforming each of the 40 SMPL characters discussed above with 5000 pose codes sampled from the VPoser's [50] latent space. In total, we collect 200,000 training pairs, with each pair including an unclothed human mesh in rest pose and the same human mesh in target pose. + +After training the shape understanding and pose deformation modules, we test them on the Mixamo [1] dataset, + +which includes challenging stylized characters, and the MGN [11] dataset, which includes clothed humans. The characters in both datasets have different shapes compared to the unclothed SMPL meshes we used for training, demonstrating the generalization ability of the proposed method. Following [36], we test on 19 stylized characters, with each deformed by 28 motion sequences from the Mixamo dataset. For the MGN dataset, we test on 16 clothed characters, with each deformed by 200 target poses. Both the testing characters and poses are unseen during training. + +For quadrupeds, since there is no dataset including large-scale paired stylized quadrupeds for quantitative evaluation, we split all characters from the SMAL [68] dataset and use the first 34 shapes (i.e., cats, dogs, and horses) for training. We further collect 81 stylized quadrupeds in rest pose from the RigNet [63] to improve generalization of the shape understanding module. Similarly to the human category, we use occupancy and part segmentation supervision for the SMAL shapes and only the occupancy supervision for RigNet meshes. To train the pose deformation module, we deform each of the 34 characters in SMAL by 2000 poses sampled from the latent space of BARC [54], a 3D reconstruction model trained for the dog category. We quantitatively evaluate our model on the hippo meshes from the SMAL dataset, which have larger shape variance compared to the cats, dogs, and horses used for training. We produce the testing data by deforming each hippo mesh with 500 unseen target poses from SMAL [68]. We show qualitative pose transfer on stylized quadrupeds in Fig. 1. + +# 4.2. Implementation Details + +We use the ADAM [30] optimizer to train both the shape understanding and pose deformation modules. For the shape understanding module, we use a learning rate of $1e - 4$ for both the decoder and shape code optimization, with a batch size of 64. Given a new character at inference time, we fix the decoder and only optimize the shape code for the new character with the same optimizer and learning rate. For the pose deformation module, we use a learning rate of $3e - 4$ with a batch size of 128. For test-time training, we use a batch size of 1 and a learning rate of $5e - 3$ with the ADAM optimizer. We set $\lambda_v$ , $\lambda_e$ , and $\lambda_{dr}$ (See Sec. 3.3) as 0.05, 0.01, and 1 respectively. + +# 4.3. Metrics and Baselines for Comparison + +Metrics. We use Point-wise Mesh Euclidean Distance (PMD) [36, 62] to evaluate pose transfer error. The PMD metric reveals pose similarity of the predicted deformation compared to its ground truth. However, as shown in Fig. 4, PMD can not fully show the smoothness and realism of the generated results. Thus, we adopt an edge length score (ELS) metric to evaluate the character's smoothness after the deformation. Specifically, we compare each edge's + +
DatasetMetricSPT*(full) [36]NBS [35]SPT [36]Ours
MGN [11]PMD ↓1.621.331.820.99
ELS ↑0.860.700.850.89
Mixamo [1]PMD ↓3.057.045.295.06
ELS ↑0.610.660.590.88
+ +Table 1. Quantitative comparison on MGN and Mixamo. Our method achieves the lowest PMD with the highest ELS. We provide the performance of the SPT*(full) method, which uses more supervision than the other methods as a reference. Our method is even better or comparable to it. + +length in the deformed mesh with the corresponding edge's length in the ground truth mesh. We define the score as + +$$ +\frac {1}{| \mathcal {E} |} \sum_ {\{i, j \} \sim \mathcal {E}} 1 - \left| \frac {| | \hat {V} _ {i} - \hat {V} _ {j} | | _ {2}}{| | V _ {i} - V _ {j} | | _ {2}} - 1 \right|, \tag {7} +$$ + +where $\mathcal{E}$ indicates all edges of the mesh, $|\mathcal{E}|$ is the number of the edges in the mesh. $\hat{V}_i$ and $\hat{V}_j$ are the vertices in the deformed mesh. $V_{i}$ and $V_{j}$ are the vertices in the ground truth mesh. For all the evaluation metrics, we scale the template character to be 1 meter tall, following [36]. + +Baselines. We compare our method with Neural Blend Shapes (NBS) [35] and Skeleton-free Pose Transfer (SPT) [36]. NBS is a rigging prediction method trained on the SMPL and MGN datasets, which include naked and clothed human meshes with ground truth rigging information. For SPT, we show the results of two versions, one is trained only on the AMASS dataset, named SPT, which has a comparable level of supervision to our method. We also test the SPT*(full) version, which is trained on the AMASS, RigNet and Mixamo datasets, using both stylized characters' skinning weights as supervision and paired stylized characters in rest pose and target pose. + +# 4.4. Human-like Character Pose Transfer + +We report the PMD metric on the MGN and Mixamo datasets in Tab. 1. We also include the performance of SPT*(full) for reference. On the MGN dataset which includes clothed humans, our method which is trained with only unclothed humans achieve the best PMD score than all baseline methods, including baselines trained with more supervision (i.e., the NBS [35] learned with clothed humans and the SPT*(full) [36] learned with skinning weight and paired motion data). For the stylized characters, our method outperforms the SPT baseline learned with a comparable amount of supervision and gets competitive results with the NBS [35] and SPT*(full) baseline trained with more supervision. Furthermore, when testing on the more challenging, less human-like characters (e.g., a mouse with a big head in Fig. 1), the baselines produce noticeable artifacts and rough surfaces, which can be observed in the qualitative comparisons in Fig. 4. We provide the PMD value for each character in the supplementary. + +![](images/4373cf912884415bb198f2ddc519573b1b1bd0db9d3334d3f0dc36b7681ae064.jpg) + +![](images/c3dff5e8dad3476f2d059883ef3d74710ea2d082abb446921120c15296e71eaf.jpg) + +![](images/952d1fa77e29412929bf41fbba9b85a6ff632e47932a3704ef5124044cf10bce.jpg) +Source + +![](images/b1e6dd9f49d0f714b3af78032fbc4563ca35b398a8d566e0f0b1a4db98344968.jpg) + +![](images/3a7faa429e1cac3f2a004eb6235ddf01d8e00711b8c40b5dfcc94d7124f2b473.jpg) + +![](images/486462331e99d4af63c55bab579dded47eaed63a4861602bab0512fb89cf06fc.jpg) +Target + +![](images/e81d777fd3fcf00216ef88027d1be0cfe145ddf5d34f3843baeff045094f35f8.jpg) + +![](images/add374012e2123a228fcefe50ac76f5ab02ddf5d96b4deeff31e6509a1abf23e.jpg) +NBS [35] + +![](images/cd03b14f06fc44b5f5948f3046818372a221cbafefcd8bc05645c834e87bffa7.jpg) + +![](images/ed7ecd982161013fa5ed72db836e9357462eda131b179bc48db7d1b9db2cb163.jpg) + +![](images/608f1fbb2f0219e590b4adf949c59768bc908cc850bbaac5cdd0db00d9a53261.jpg) +SPT [36] + +![](images/0525239d0042cc48abf9e1689fc7ce77d739b320fa767bc0d09b3929d1475690.jpg) + +![](images/2c977e4e5a8760dfd73cb0657491914acda2ba3239bc9daef811cdeab81b6850.jpg) + +![](images/ee6cb725ab001d3127ba7fdfc22842d58bbb3a697126f8a3c69c2e3bae3de251.jpg) +Ours + +![](images/238231f66f59fef062c826ccc38980589d86e8a09236c4769eabe7137514659c.jpg) + +![](images/b9a701a555f870eeececddefa3c053b9380b1dad28769b8d15b8461255ead581.jpg) + +![](images/3e575671bfe48c8a1a7512eb1aaf766a19aa0fafbff6e85a954d9ff09e98021c.jpg) +GT + +![](images/28a16782138344dbf2157544077b57705a8741cef1658f2c44d1f82f17586ce9.jpg) +Figure 5. Part segmentation visualization. NBS makes wrong predictions for hair while SPT may mix the upper legs. + +We show the ELS score comparison of different methods on the MGN and Mixamo datasets in Tab. 1. For both clothed humans and stylized characters, our method can generate more realistic results which are consistent with the target mesh and achieves the best ELS score. + +We visually compare our method and the baseline methods in Fig. 4 on the Mixamo dataset. Although NBS is trained with a clothed-human dataset, when testing on the human-like characters, it still fails on parts that are separate from the body such as the hair and the pants. When using only naked human meshes as supervision, SPT cannot generalize to challenging human-like characters, producing rough mesh surface with spikes. + +![](images/480e4d440ac18475299a55cbd4e001af2d9ae4c3a990eda816afb0c67df4c577.jpg) +Figure 4. Qualitative comparison on Mixamo. The average PMD of these three results for NBS, SPT, and Ours are 8.16, 6.13, and 5.16 respectively and the average ELS for NBS, SPT, and Ours are 0.65, 0.78, and 0.93 respectively. Our method can successfully transfer the pose to challenging stylized characters (e.g., the mouse with a big head in the second row). + +![](images/8cce0cbc2c5271d9463dbb46bc10c1a1b56108aafeda84a396a145f818b87816.jpg) + +![](images/10a7eafaed4ee1a0eb0a58c40916888b9122ff4860b3fc3a70e31137b6426508.jpg) +SPT [36] + +![](images/9f2bfc42b2287de21b24f395c4be405319035d4b96ab4529afe1588ede1f50ae.jpg) + +![](images/22672dbf3808361df124c6dee0ccc9a194ed70ba2360e598c0d2e4622fe62124.jpg) + +![](images/21f7c6cc2c5c91b5743f1aa1e50965fcf337a4ddff7269bbfc7896af3776ffaf.jpg) +Ours + +![](images/017e7ce6912aaa677ff04da374dba94d3829be6008c0d2a19eacd9b95346d390.jpg) + +![](images/4bdb35185ddeab2f0f2d7bc5aaaebc7cbd67ac759c0ab2c00383fd165bab2ad4.jpg) + +![](images/b70da364d0efbbd894e4ad8718211b20b1a9777381f9d1d7a40d2d2bf72e3534.jpg) +GT +Figure 6. Quadrupedal pose transfer visualization. Our method can achieve smooth and accurate pose transfer while SPT fails on the mouth and leg regions. + +
MetricNBS [35]SPT [36]Ours
Accuracy ↑67.8%75.6%86.9%
+ +Table 2. Part prediction accuracy on Mixamo [1]. Our method achieves the best part segmentation accuracy. + +# 4.5. Part Understanding Comparison + +As discussed in Sec. 3.1, part segmentation plays an important role in both shape understanding and pose deformation. Though NBS [35] and SPT [36] do not explicitly predict part segmentation label, they are both skinning weight-based methods and we can derive the part segmentation label from the predicted skinning weights. Specifically, by selecting the maximum weight of each vertex, we can convert the skinning weight prediction to part segmentation labels for the vertices. We compare our part prediction results with those derived from SPT and NBS. We report the part segmentation accuracy on the Mixamo datasets in Tab. 2 + +![](images/8c1b2f4ff36dcae4437e98663db3b57ac789d1a88eb4f73a2ca28d0574424ce4.jpg) +Figure 7. Qualitative comparison for ablation study. Removing the constraint (eq. 1) in shape understanding leads to wrong pose deformation results. The volume preserving loss (eq. 5) helps to maintain the identity, e.g., the thickness of the arms in first row. + +
MetricSPT [36]OursMetricSPT [36]Ours
PMD ↓10.288.28ELS ↑0.280.86
+ +and visualize the part segmentation results in Fig. 5. Even trained with only part segmentation supervision of human meshes, our method can successfully segment each part for the stylized characters. On the contrary, SPT uses graph convolution network [31] to predict the skinning weights. When training only with human meshes, it often fails to distinguish different parts. As shown in Fig. 5, it mixes up the right and left upper legs, and incorrectly classifies the shoulder as the head. Though NBS is trained with clothed humans, it always classifies human hair as the human body for characters from Mixamo. This is because that NBS uses the MeshCNN [26] as the shape encoder. As a result, it is sensitive to mesh topology and cannot generalize to meshes with disconnected parts (e.g., disconnected hair and head). Tab. 2 further quantitatively demonstrates that our method achieves the best part segmentation accuracy, demonstrating its ability to correctly interpret the shape and part information in stylized characters. + +# 4.6. Quadrupedal Pose Transfer Comparison + +To further show the generalization ability of our method, we conduct experiments on quadrupeds. We report the PMD and ELS score of our method and the SPT [36] in Tab. 3. When testing on hippos with large shape gap from the training meshes, SPT has a hard time generalizing both in terms of pose transfer accuracy and natural deformation. While our method achieves both better qualitative and quantitative results. We visualize the qualitative comparisons in Fig. 6. SPT produces obvious artifacts on the hippo's mouth + +![](images/9c2fc818be70357453cc8987f0da7374f84f7b0d10915ad2a88d0bf0c98689c4.jpg) +Figure 8. Part prediction on stylized quadrupeds. Our method successfully predicts the parts of unseen stylized quadrupeds. + +Table 3. Comparison on Hippos from SMAL [68]. Our method achieves better pose transfer accuracy with more smooth results. + +
MetricOurs w/o invOurs w/o volumeOurs
PMD ↓1.261.020.99
ELS ↑0.880.880.89
+ +Table 4. Ablation study on inverse MLP and volume preserving loss. The inverse MLP and volume preserving loss helps to improve pose transfer accuracy and produce smooth deformation. + +and legs, while our method achieves accurate pose transfer and maintains the shape characteristics of the original character at the same time. We provide more results in the supplementary. We also show the part segmentation results on stylized characters by our method in Fig. 8. Even for unique parts such as the hats and antlers, our method correctly assigns them to the head part. + +# 4.7. Ablation Study + +To evaluate the key components of our method, we conduct ablation studies on the MGN dataset by removing the inverse constraint (Eq. 3) in the shape understanding module and the volume-preserving loss (Eq. 5) used during the test-time training produce, we name them as "ours w/o inv" and "ours w/o $v$ " respectively. We report the PMD and ELS metrics in Tab. 4. The model learned without the inverse constraint or volume-preserving loss has worse PMD and ELS score than our full model, indicating the contribution of these two objectives. We also provide qualitative results in Fig. 7. We use red boxes to point out the artifacts. As shown in Fig. 7, our model trained without the inverse constraint produces less accurate pose transfer results. Moreover, adding the volume-preserving loss helps to maintain the character's local details such as the thickness of the arms. + +# 5. Conclusion + +In this paper, we present a model that deforms unrigged, stylized characters guided by a biped or quadruped avatar. Our model is trained with only easily accessible posed human or animal meshes, yet can be applied to unseen stylized characters in a zero-shot manner during inference. To this end, we draw key insights from classic mesh deformation method and develop a correspondence-aware shape understanding module, an implicit pose deformation module and a volume-based test-time training procedure. We carry out extensive experiments on both the biped and quadruped category and show that our method produces more realistic and accurate deformation compared to baselines learned with comparable or more supervision. + +# References + +[1] Mixamo. http://www MIXamo.com/. Accessed on November 09th, 2022. 5, 6, 7 +[2] Kfir Aberman, Peizhuo Li, Dani Lischinski, Olga Sorkine-Hornung, Daniel Cohen-Or, and Baoquan Chen. Skeleton-aware networks for deep motion retargeting. In ACM Transactions on Graphics (SIGGRAPH), 2020. 2, 3 +[3] Noam Aigerman, Kunal Gupta, Vladimir G Kim, Siddhartha Chaudhuri, Jun Saito, and Thibault Groueix. Neural jacobian fields: Learning intrinsic mappings of arbitrary meshes. arXiv preprint arXiv:2205.02904, 2022. 2 +[4] Mazen Al Borno, Ludovic Righetti, Michael J Black, Scott L Delp, Eugene Fiume, and Javier Romero. Robust physics-based motion retargeting with realistic body shapes. In Computer Graphics Forum. Wiley Online Library, 2018. 2, 3 +[5] Andreas Aristidou and Joan Lasenby. FABRIK: A fast, iterative solver for the inverse kinematics problem. Graphical Models, 2011. 3 +[6] Quentin Avril, Donya Ghafourzadeh, Srinivasan Ramachandran, Sahel Fallahdoust, Sarah Ribet, Olivier Dionne, Martin de Lasa, and Eric Paquette. Animation setup transfer for 3D characters. In Computer Graphics Forum, 2016. 2 +[7] Ilya Baran and Jovan Popovic. Automatic rigging and animation of 3D characters. In ACM Transactions on Graphics (SIGGRAPH), 2007. 2, 3 +[8] Ilya Baran, Daniel Vlasic, Eitan Grinspun, and Jovan Popovic. Semantic deformation transfer. In ACM Transactions on Graphics (ToG). 2009. 2 +[9] Mirela Ben-Chen, Ofir Weber, and Craig Gotsman. Spatial deformation transfer. In Proceedings of the 2009 ACM SIGGRAPH/Eurographics Symposium on Computer Animation, 2009. 2 +[10] Bharat Lal Bhatnagar, Cristian Sminchisescu, Christian Theobalt, and Gerard Pons-Moll. Combining implicit function learning and parametric models for 3D human reconstruction. In European Conference on Computer Vision (ECCV), 2020. 3 +[11] Bharat Lal Bhatnagar, Garvita Tiwari, Christian Theobalt, and Gerard Pons-Moll. Multi-garment net: Learning to dress 3D people from images. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2, 6 +[12] Federica Bogo, Javier Romero, Matthew Loper, and Michael J Black. FAUST: Dataset and evaluation for 3D mesh registration. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2014. 2 +[13] Rohan Chabra, Jan E Lenssen, Eddy Ilg, Tanner Schmidt, Julian Straub, Steven Lovegrove, and Richard Newcombe. Deep local shapes: Learning local sdf priors for detailed 3D reconstruction. In European Conference on Computer Vision (ECCV), 2020. 3 +[14] Haoyu Chen, Hao Tang, Henglin Shi, Wei Peng, Nicu Sebe, and Guoying Zhao. Intrinsic-extrinsic preserved gans for unsupervised 3D pose transfer. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2 +[15] Ricky TQ Chen, Yulia Rubanova, Jesse Bettencourt, and David K Duvenaud. Neural ordinary differential equa + +tions. Advances in Neural Information Processing Systems (NeurIPS), 2018. 3 +[16] Zhiqin Chen and Hao Zhang. Learning implicit fields for generative shape modeling. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 3 +[17] An-Chieh Cheng, Xueting Li, Min Sun, Ming-Hsuan Yang, and Sifei Liu. Learning 3D dense correspondence via canonical point autoencoder. In Advances in Neural Information Processing Systems (NeurIPS), 2021. 4 +[18] Julian Chibane, Thiemo Alldieck, and Gerard Pons-Moll. Implicit functions in feature space for 3D shape reconstruction and completion. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 3 +[19] Kwang-Jin Choi and Hyeong-Seok Ko. Online motion retargeting. Comput. Animat. Virtual Worlds, 2000. 3 +[20] Brian Delhaisse, Domingo Esteban, Leonel Rozo, and Darwin Caldwell. Transfer learning of shared latent spaces between robots with similar kinematic structure. In International Joint Conference on Neural Networks (IJCNN), 2017. 3 +[21] Philipp Erler, Paul Guerrero, Stefan Ohrhallinger, Niloy J Mitra, and Michael Wimmer. Points2surf learning implicit surfaces from point clouds. In European Conference on Computer Vision (ECCV), 2020. 3 +[22] Lin Gao, Jie Yang, Yi-Ling Qiao, Yu-Kun Lai, Paul L Rosin, Weiwei Xu, and Shihong Xia. Automatic unpaired shape deformation transfer. ACM Transactions on Graphics (ToG), 2018. 2, 5 +[23] Kyle Genova, Forrester Cole, Daniel Vlasic, Aaron Sarna, William T Freeman, and Thomas Funkhouser. Learning shape templates with structured implicit functions. In IEEE International Conference on Computer Vision (ICCV), 2019. 3 +[24] Michael Gleicher. Retargetting motion to new characters. In Proceedings of the 25th annual conference on Computer graphics and interactive techniques, 1998. 2, 3 +[25] Thibault Groueix, Matthew Fisher, Vladimir G Kim, Bryan C Russell, and Mathieu Aubry. 3D-CODED: 3D correspondences by deep deformation. In European Conference on Computer Vision (ECCV), 2018. 5 +[26] Rana Hanocka, Amir Hertz, Noa Fish, Raja Giryes, Shachar Fleishman, and Daniel Cohen-Or. MeshCNN: a network with an edge. ACM Transactions on Graphics (ToG), 2019. 8 +[27] Hanyoung Jang, Byungjun Kwon, Moonwon Yu, Seong Uk Kim, and Jongmin Kim. A variational U-Net for motion retargeting. In Comput. Animat. Virtual Worlds, 2020. 3 +[28] Boyan Jiang, Yinda Zhang, Xingkui Wei, Xiangyang Xue, and Yanwei Fu. Learning compositional representation for 4D captures with neural ode. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 3 +[29] Chiyu Jiang, Avneesh Sud, Ameesh Makadia, Jingwei Huang, Matthias Nießner, Thomas Funkhouser, et al. Local implicit grid representations for 3D scenes. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 3 + +[30] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. International Conference on Learning Representations (ICLR), 2015. 6 +[31] Thomas N Kipf and Max Welling. Semi-supervised classification with graph convolutional networks. In International Conference on Learning Representations (ICLR), 2017. 8 +[32] Muhammed Kocabas, Nikos Athanasiou, and Michael J Black. Vibe: Video inference for human body pose and shape estimation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2, 4 +[33] Jehee Lee and Sung Yong Shin. A hierarchical approach to interactive motion editing for human-like figures. In Proceedings of the 26th annual conference on Computer graphics and interactive techniques, 1999. 3 +[34] Jiahui Lei and Kostas Daniilidis. CaDeX: Learning canonical deformation coordinate space for dynamic surface representation via neural homeomorphism. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 3 +[35] Peizhuo Li, Kfir Aberman, Rana Hanocka, Libin Liu, Olga Sorkine-Hornung, and Baoquan Chen. Learning skeletal articulations with neural blend shapes. In ACM Transactions on Graphics (SIGGRAPH), 2021. 2, 5, 6, 7 +[36] Zhouyingcheng Liao, Jimei Yang, Jun Saito, Gerard Pons-Moll, and Yang Zhou. Skeleton-free pose transfer for stylized 3D characters. In European Conference on Computer Vision (ECCV), 2022. 3, 4, 5, 6, 7, 8 +[37] Jongin Lim, Hyung Jin Chang, and Jin Young Choi. PMnet: Learning of disentangled pose and movement for unsupervised motion retargeting. In British Machine Vision Conference (BMVC), 2019. 3 +[38] Feng Liu and Xiaoming Liu. Learning implicit functions for topology-varying dense 3D shape correspondence. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 4 +[39] Lijuan Liu, Youyi Zheng, Di Tang, Yi Yuan, Changjie Fan, and Kun Zhou. Neuroskinning: Automatic skin binding for production characters with deep graph networks. ACM Transactions on Graphics (ToG), 2019. 3 +[40] Matthew Loper, Naureen Mahmood, Javier Romero, Gerard Pons-Moll, and Michael J Black. SMPL: A skinned multiperson linear model. ACM Transactions on Graphics (ToG), 2015. 2, 5 +[41] Naureen Mahmood, Nima Ghorbani, Nikolaus F. Troje, Gerard Pons-Moll, and Michael J. Black. AMASS: Archive of motion capture as surface shapes. In IEEE International Conference on Computer Vision (ICCV), 2019. 2, 4 +[42] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3D reconstruction in function space. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 3 +[43] Mateusz Michalkiewicz, Jhony K Pontes, Dominic Jack, Mahsa Baktashmotlagh, and Anders Eriksson. Deep level sets: Implicit surface representations for 3D shape inference. arXiv preprint arXiv:1901.06802, 2019. 3 +[44] Marko Mihajlovic, Yan Zhang, Michael J Black, and Siyu Tang. LEAP: Learning articulated occupancy of people. In + +IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 3 +[45] Michael Niemeyer, Lars Mescheder, Michael Oechsle, and Andreas Geiger. Occupancy flow: 4D reconstruction by learning particle dynamics. In IEEE International Conference on Computer Vision (ICCV), 2019. 3 +[46] Atsuhiro Noguchi, Umar Iqbal, Jonathan Tremblay, Tatsuya Harada, and Orazio Gallo. Watch it move: Unsupervised discovery of 3D joints for re-posing of articulated objects. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 3 +[47] Pablo Palafox, Aljaž Božić, Justus Thies, Matthias Nießner, and Angela Dai. NPMs: Neural parametric models for 3D deformable shapes. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2, 3, 4 +[48] Pablo Palafox, Nikolaos Sarafianos, Tony Tung, and Angela Dai. SPAMs: Structured implicit parametric models. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2, 3 +[49] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. DeepSDF: Learning continuous signed distance functions for shape representation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 3 +[50] Georgios Pavlakos, Vasileios Choutas, Nima Ghorbani, Timo Bolkart, Ahmed AA Osman, Dimitrios Tzionas, and Michael J Black. Expressive body capture: 3D hands, face, and body from a single image. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2, 5 +[51] Georgios Pavlakos, Vasileios Choutas, Nima Ghorbani, Timo Bolkart, Ahmed A. A. Osman, Dimitrios Tzionas, and Michael J. Black. Expressive body capture: 3D hands, face, and body from a single image. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2, 4 +[52] Martin Poirier and Eric Paquette. Rig retargeting for 3d animation. In Proceedings of the Graphics Interface 2009 Conference, 2009. 2, 3 +[53] Davis Rempe, Tolga Birdal, Aaron Hertzmann, Jimei Yang, Srinath Sridhar, and Leonidas J Guibas. Humor: 3D human motion model for robust pose estimation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2, 4 +[54] Nadine Ruegg, Silvia Zuffi, Konrad Schindler, and Michael J Black. BARC: Learning to regress 3D dog shape from images by exploiting breed information. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2, 6 +[55] Robert W Sumner and Jovan Popovic. Deformation transfer for triangle meshes. ACM Transactions on Graphics (ToG), 2004. 2, 3, 5 +[56] Robert W Sumner, Johannes Schmid, and Mark Pauly. Embedded deformation for shape manipulation. In ACM Transactions on Graphics (SIGGRAPH). 2007. 2, 3 +[57] Yu Sun, Xiaolong Wang, Zhuang Liu, John Miller, Alexei Efros, and Moritz Hardt. Test-time training with self-supervision for generalization under distribution shifts. In International Conference on Machine Learning (ICML), 2020. 5 + +[58] Ramana Sundararaman, Gautam Pai, and Maks Ovsjanikov. Implicit field supervision for robust non-rigid shape matching. In Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part III, pages 344-362. Springer, 2022. 3 +[59] Seyoon Tak and Hyeong-Seok Ko. A physically-based motion retargeting filter. In ACM Transactions on Graphics (ToG), 2005. 3 +[60] Ruben Villegas, Duygu Ceylan, Aaron Hertzmann, Jimei Yang, and Jun Saito. Contact-aware retargeting of skinned motion. In IEEE International Conference on Computer Vision (ICCV), 2021. 3 +[61] Ruben Villegas, Jimei Yang, Duygu Ceylan, and Honglak Lee. Neural kinematic networks for unsupervised motion retargeting. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018. 3 +[62] Jiashun Wang, Chao Wen, Yanwei Fu, Haitao Lin, Tianyun Zou, Xiangyang Xue, and Yinda Zhang. Neural pose transfer by spatially adaptive instance normalization. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2, 5, 6 +[63] Zhan Xu, Yang Zhou, Evangelos Kalogerakis, Chris Landreth, and Karan Singh. RigNet: Neural rigging for articulated characters. In ACM Transactions on Graphics (SIGGRAPH), 2020. 3, 5, 6 +[64] Zhan Xu, Yang Zhou, Evangelos Kalogerakis, and Karan Singh. Predicting animation skeletons for 3D articulated models via volumetric nets. In International Conference on 3D Vision, 2019. 3 +[65] Jie Yang, Lin Gao, Yu-Kun Lai, Paul L Rosin, and Shihong Xia. Biharmonic deformation transfer with automatic key point selection. Graphical Models, 2018. 2 +[66] Wang Yifan, Noam Aigerman, Vladimir G Kim, Siddhartha Chaudhuri, and Olga Sorkine-Hornung. Neural cages for detail-preserving 3D deformations. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2 +[67] Keyang Zhou, Bharat Lal Bhatnagar, and Gerard Pons-Moll. Unsupervised shape and pose disentanglement for 3D meshes. In European Conference on Computer Vision (ECCV), 2020. 2 +[68] Silvia Zuffi, Angjoo Kanazawa, David W Jacobs, and Michael J Black. 3D Menagerie: Modeling the 3D shape and pose of animals. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2017. 2, 6, 8 \ No newline at end of file diff --git a/2023/Zero-Shot Pose Transfer for Unrigged Stylized 3D Characters/images.zip b/2023/Zero-Shot Pose Transfer for Unrigged Stylized 3D Characters/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..bb83d125531a8b726564d83bcdb23919b90e6eda --- /dev/null +++ b/2023/Zero-Shot Pose Transfer for Unrigged Stylized 3D Characters/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:84a192044dd238f9148b173b4a71b3676cfe940d6e383859300abb6ba5f4ebe1 +size 364107 diff --git a/2023/Zero-Shot Pose Transfer for Unrigged Stylized 3D Characters/layout.json b/2023/Zero-Shot Pose Transfer for Unrigged Stylized 3D Characters/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..082e0402aff8c2e052fe50c98bc670a30a837bcd --- /dev/null +++ b/2023/Zero-Shot Pose Transfer for Unrigged Stylized 3D Characters/layout.json @@ -0,0 +1,9250 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 107, + 103, + 486, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 103, + 486, + 121 + ], + "spans": [ + { + "bbox": [ + 107, + 103, + 486, + 121 + ], + "type": "text", + "content": "Zero-shot Pose Transfer for Unrigged Stylized 3D Characters" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 138, + 141, + 452, + 187 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 141, + 452, + 187 + ], + "spans": [ + { + "bbox": [ + 138, + 141, + 452, + 187 + ], + "type": "text", + "content": "Jiashun Wang" + }, + { + "bbox": [ + 138, + 141, + 452, + 187 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 138, + 141, + 452, + 187 + ], + "type": "text", + "content": " Xueting Li" + }, + { + "bbox": [ + 138, + 141, + 452, + 187 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 138, + 141, + 452, + 187 + ], + "type": "text", + "content": " Sifei Liu" + }, + { + "bbox": [ + 138, + 141, + 452, + 187 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 138, + 141, + 452, + 187 + ], + "type": "text", + "content": " Shalini De Mello" + }, + { + "bbox": [ + 138, + 141, + 452, + 187 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 138, + 141, + 452, + 187 + ], + "type": "text", + "content": " Orazio Gallo" + }, + { + "bbox": [ + 138, + 141, + 452, + 187 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 138, + 141, + 452, + 187 + ], + "type": "text", + "content": " Xiaolong Wang" + }, + { + "bbox": [ + 138, + 141, + 452, + 187 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 138, + 141, + 452, + 187 + ], + "type": "text", + "content": " Jan Kautz" + }, + { + "bbox": [ + 138, + 141, + 452, + 187 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 138, + 141, + 452, + 187 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 138, + 141, + 452, + 187 + ], + "type": "text", + "content": "Carnegie Mellon University " + }, + { + "bbox": [ + 138, + 141, + 452, + 187 + ], + "type": "inline_equation", + "content": "{}^{2}" + }, + { + "bbox": [ + 138, + 141, + 452, + 187 + ], + "type": "text", + "content": "NVIDIA " + }, + { + "bbox": [ + 138, + 141, + 452, + 187 + ], + "type": "inline_equation", + "content": "{}^{3}" + }, + { + "bbox": [ + 138, + 141, + 452, + 187 + ], + "type": "text", + "content": "UC San Diego" + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 48, + 193, + 549, + 413 + ], + "blocks": [ + { + "bbox": [ + 48, + 193, + 549, + 413 + ], + "lines": [ + { + "bbox": [ + 48, + 193, + 549, + 413 + ], + "spans": [ + { + "bbox": [ + 48, + 193, + 549, + 413 + ], + "type": "image", + "image_path": "b8b5358168abb25f99559384141748e8150d018ef2d2eebe1ebab6a6c5eb3eae.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 422, + 546, + 446 + ], + "lines": [ + { + "bbox": [ + 46, + 422, + 546, + 446 + ], + "spans": [ + { + "bbox": [ + 46, + 422, + 546, + 446 + ], + "type": "text", + "content": "Figure 1. Our algorithm transfers the pose of a reference avatar (source) to stylized characters. Unlike existing methods, at training time our approach needs only the mesh of the source avatar in rest and desired pose, and the mesh of the stylized character only in rest pose." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 143, + 457, + 192, + 470 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 457, + 192, + 470 + ], + "spans": [ + { + "bbox": [ + 143, + 457, + 192, + 470 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 484, + 290, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 484, + 290, + 689 + ], + "spans": [ + { + "bbox": [ + 46, + 484, + 290, + 689 + ], + "type": "text", + "content": "Transferring the pose of a reference avatar to stylized 3D characters of various shapes is a fundamental task in computer graphics. Existing methods either require the stylized characters to be rigged, or they use the stylized character in the desired pose as ground truth at training. We present a zero-shot approach that requires only the widely available deformed non-stylized avatars in training, and deforms stylized characters of significantly different shapes at inference. Classical methods achieve strong generalization by deforming the mesh at the triangle level, but this requires labelled correspondences. We leverage the power of local deformation, but without requiring explicit correspondence labels. We introduce a semi-supervised shape-understanding module to bypass the need for explicit correspondences at test time, and an implicit pose deformation module that deforms individual surface points to match the target pose. Furthermore, to encourage realistic and accurate deformation of" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 458, + 547, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 458, + 547, + 567 + ], + "spans": [ + { + "bbox": [ + 306, + 458, + 547, + 567 + ], + "type": "text", + "content": "stylized characters, we introduce an efficient volume-based test-time training procedure. Because it does not need rigging, nor the deformed stylized character at training time, our model generalizes to categories with scarce annotation, such as stylized quadrupeds. Extensive experiments demonstrate the effectiveness of the proposed method compared to the state-of-the-art approaches trained with comparable or more supervision. Our project page is available at https://jiashunwang.github.io/ZPT/" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 595, + 387, + 608 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 595, + 387, + 608 + ], + "spans": [ + { + "bbox": [ + 306, + 595, + 387, + 608 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "type": "text", + "content": "Stylized 3D characters, such as those in Fig. 1, are commonly used in animation, movies, and video games. Deforming these characters to mimic natural human or animal poses has been a long-standing task in computer graphics. Different from the 3D models of natural humans and animals, stylized 3D characters are created by professional artists through imagination and exaggeration. As a result, each stylized character has a distinct skeleton, shape, mesh" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 702, + 249, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 702, + 249, + 713 + ], + "spans": [ + { + "bbox": [ + 58, + 702, + 249, + 713 + ], + "type": "text", + "content": "*Work done during Jiashun Wang's internship at NVIDIA." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "8704" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 289, + 492 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 289, + 492 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 289, + 492 + ], + "type": "text", + "content": "topology, and usually include various accessories, such as a cloak or wings (see Fig. 1). These variations hinder the process of matching the pose of a stylized 3D character to that of a reference avatar, generally making manual rigging a requirement. Unfortunately, rigging is a tedious process that requires manual effort to create the skeleton and skinning weights for each character. Even when provided with manually annotated rigs, transferring poses from a source avatar onto stylized characters is not trivial when the source and target skeletons differ. Automating this procedure is still an open research problem and is the focus of many recent works [2, 4, 24, 52]. Meanwhile, non-stylized 3D humans and animals have been well-studied by numerous prior works [35, 40, 54, 62, 68]. A few methods generously provide readily available annotated datasets [11, 12, 41, 68], or carefully designed parametric models [40, 51, 68]. By taking advantage of these datasets [12, 41], several learning-based methods [7, 14, 35, 62, 67] disentangle and transfer poses between human meshes using neural networks. However, these methods (referred to as \"part-level\" in the following) carry out pose transfer by either globally deforming the whole body mesh [14, 22, 47, 67] or by transforming body parts [35, 48], both of which lead to overfitting on the training human meshes and fail to generalize to stylized characters with significantly different body part shapes. Interestingly, classical mesh deformation methods [55, 56] (referred to as \"local\" in the following) can transfer poses between a pair of meshes with significant shape differences by computing and transferring per-triangle transformations through correspondence. Though these methods require manual correspondence annotation between the source and target meshes, they provide a key insight that by transforming individual triangles instead of body parts, the mesh deformation methods are more agnostic to a part's shape and can generalize to meshes with different shapes." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 498, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 498, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 47, + 498, + 289, + 715 + ], + "type": "text", + "content": "We marry the benefits of learning-based methods [7, 14, 35, 62, 67] with the classic local deformation approach [55] and present a model for unrigged, stylized character deformation guided by a non-stylized biped or quadruped avatar. Notably, our model only requires easily accessible posed human or animal meshes for training and can be directly applied to deform 3D stylized characters with a significantly different shape at inference. To this end, we implicitly operationalize the key insight from the local deformation method [55] by modeling the shape and pose of a 3D character with a correspondence-aware shape understanding module and an implicit pose deformation module. The shape understanding module learns to predict the part segmentation label (i.e., the coarse-level correspondence) for each surface point, besides representing the shape of a 3D character as a latent shape code. The pose deformation module is conditioned on the shape code and deforms individual surface point guided by a target pose code sampled" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 304, + 72, + 545, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 120 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 120 + ], + "type": "text", + "content": "from a prior pose latent space [50]. Furthermore, to encourage realistic deformation and generalize to rare poses, we propose a novel volume-based test-time training procedure that can be efficiently applied to unseen stylized characters." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 304, + 121, + 547, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 121, + 547, + 179 + ], + "spans": [ + { + "bbox": [ + 304, + 121, + 547, + 179 + ], + "type": "text", + "content": "During inference, by mapping biped or quadruped poses from videos, in addition to meshes to the prior pose latent space using existing works [32, 51, 53], we can transfer poses from different modalities onto unrigged 3D stylized characters. Our main contributions are:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 317, + 180, + 545, + 335 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 317, + 180, + 545, + 215 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 180, + 545, + 215 + ], + "spans": [ + { + "bbox": [ + 317, + 180, + 545, + 215 + ], + "type": "text", + "content": "- We propose a solution to a practical and challenging task - learning a model for stylized 3D character deformation with only posed human or animal meshes." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 317, + 216, + 545, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 216, + 545, + 275 + ], + "spans": [ + { + "bbox": [ + 317, + 216, + 545, + 275 + ], + "type": "text", + "content": "- We develop a correspondence-aware shape understanding module, an implicit pose deformation module, and a volume-based test-time training procedure to generalize the proposed model to unseen stylized characters and arbitrary poses in a zero-shot manner." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 317, + 276, + 545, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 276, + 545, + 335 + ], + "spans": [ + { + "bbox": [ + 317, + 276, + 545, + 335 + ], + "type": "text", + "content": "- We carry out extensive experiments on both humans and quadrupeds to show that our method produces more visually pleasing and accurate deformations compared to baselines trained with comparable or more supervision." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 346, + 392, + 358 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 346, + 392, + 358 + ], + "spans": [ + { + "bbox": [ + 306, + 346, + 392, + 358 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 366, + 545, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 366, + 545, + 628 + ], + "spans": [ + { + "bbox": [ + 304, + 366, + 545, + 628 + ], + "type": "text", + "content": "Deformation Transfer. Deformation transfer is a longstanding problem in the computer graphics community [3, 6, 8, 9, 55, 65]. Sumner et al. [55] apply an affine transformation to each triangle of the mesh to solve an optimization problem that matches the deformation of the source mesh while maintaining the shape of the target mesh. Ben-Chen et al. [9] enclose the source and target shapes with two cages and transfer the Jacobians of the source deformation to the target shape. However, these methods need tedious human efforts to annotate the correspondence between the source and target shapes. More recently, several deep learning methods are developed to solve the deformation transfer task. However, they either require manually providing the correspondence [66] or cannot generalize [14, 22, 67] to stylized characters with different shapes. Gao et al. [22] propose a VAE-GAN based method to leverage the cycle consistency between the source and target shapes. Nonetheless, it can only work on shapes used in training. Wang et al. [62] introduce conditional normalization used in style transfer for 3D deformation transfer. But the method is limited to clothed-humans and cannot handle the large shape variations of stylized characters." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 630, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 630, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 630, + 545, + 713 + ], + "type": "text", + "content": "We argue that these learning-based methods cannot generalize to stylized characters because they rely on encoding their global information (e.g., body or parts), which is different from traditional works that focus on local deformation, e.g., the affine transformation applied to each triangle in [55]. Using a neural network to encode the global information easily leads to overfitting. For example, models" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "8705" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 288, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 288, + 156 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 288, + 156 + ], + "type": "text", + "content": "trained on human meshes cannot generalize to a stylized humanoid character. At the same time, early works only focus on local information and cannot model global information such as correspondence between the source and target shapes, which is why they all need human effort to annotate the correspondence. Our method tries to learn the correspondence and deform locally at the same time." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 166, + 287, + 345 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 166, + 287, + 345 + ], + "spans": [ + { + "bbox": [ + 46, + 166, + 287, + 345 + ], + "type": "text", + "content": "Skeleton-based Pose Transfer. Besides mesh deformation transfer, an alternative way to transfer pose is to utilize skeletons. Motion retargeting is also a common name used for transferring poses from one motion sequence to another. Gleicher et al. [24] propose a space-time constrained solver aiming to satisfy the kinematics-level constraints and to preserve the characters' original identity. Following works [5, 19, 33] try to solve inverse-kinematics or inverse rate control to achieve pose transfer. There are also dynamics-based methods [4, 59] that consider physics during the retargeting process. Recently, learning-based methods [20, 27, 37, 60, 61] train deep neural networks to predict the transformation of the skeleton. Aberman et al. [2] propose a pooling-based method to transfer poses between meshes with different skeletons." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 356, + 287, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 356, + 287, + 499 + ], + "spans": [ + { + "bbox": [ + 46, + 356, + 287, + 499 + ], + "type": "text", + "content": "All these works highly rely on the skeleton for pose transfer. Other works try to estimate the rigging of the template shape [7, 39, 52, 63, 64] when a skeleton is not available. But if the prediction of the skinning weights fails, the retargeting fails as well. Liao et al. [36] propose a model that learns to predict the skinning weights and pose transfer jointly using ground truth skinning weights and paired motion data as supervision, which limits the generalization of this method to categories where annotations are more scarce compared to humans (e.g., quadrupeds). Instead, our method uses posed human or animal meshes for training and deforms stylized characters of different shapes at inference." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 510, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 510, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 510, + 288, + 713 + ], + "type": "text", + "content": "Implicit 3D shape representation. Implicit 3D shape representations have shown great success in reconstructing static shapes [13,16,18,21,23,29,42,43,49] and deformable ones [10,28,34,44-48,58]. DeepSDF [49] proposes to use an MLP to predict the signed distance field (SDF) value of a query point in 3D space, where a shape code is jointly optimized in an auto-decoding manner. Occupancy flow [45] generalizes the Occupancy Networks [42] to learn a temporally and spatially continuous vector field with a NeuralODE [15]. Inspired by parametric models, NPMs [47] disentangles and represents the shape and pose of dynamic humans by learning an implicit shape and pose function, respectively. Different from these implicit shape representation works that focus on reconstructing static or deformable meshes, we further exploit the inherent continuity and locality of implicit functions to deform stylized characters to match a target pose in a zero-shot manner." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 306, + 71, + 361, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 71, + 361, + 83 + ], + "spans": [ + { + "bbox": [ + 306, + 71, + 361, + 83 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 91, + 545, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 91, + 545, + 319 + ], + "spans": [ + { + "bbox": [ + 304, + 91, + 545, + 319 + ], + "type": "text", + "content": "We aim to transfer the pose of a biped or quadruped avatar to an unrigged, stylized 3D character. We tackle this problem by modeling the shape and pose of a 3D character using a correspondence-aware shape understanding module and an implicit pose deformation module, inspired by classical mesh deformation methods [55, 56]. The shape understanding module (Sec. 3.1, Fig. 2) predicts a latent shape code and part segmentation label of a 3D character in rest pose, while the pose deformation module (Sec. 3.2, Fig. 3) deforms the character in the rest pose given the predicted shape code and a target pose code. Moreover, to produce natural deformations and generalize to rare poses unseen at training, we introduce an efficient volume-based test-time training procedure (Sec 3.3) for unseen stylized characters. All three modules, trained only with posed, unclothed human meshes, and unrigged, stylized characters in a rest pose, are directly applied to unseen stylized characters at inference. We explain our method for humans, and describe how we extend it to quadrupeds in Sec. 4.6." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 305, + 325, + 543, + 338 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 325, + 543, + 338 + ], + "spans": [ + { + "bbox": [ + 305, + 325, + 543, + 338 + ], + "type": "text", + "content": "3.1. Correspondence-Aware Shape Understanding" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 344, + 545, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 344, + 545, + 392 + ], + "spans": [ + { + "bbox": [ + 304, + 344, + 545, + 392 + ], + "type": "text", + "content": "Given a 3D character in rest pose, we propose a shape understanding module to represent its shape information as a latent code, and to predict a body part segmentation label for each surface point." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 392, + 545, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 392, + 545, + 571 + ], + "spans": [ + { + "bbox": [ + 304, + 392, + 545, + 571 + ], + "type": "text", + "content": "To learn a representative shape code, we employ an implicit auto-decoder [47, 49] that reconstructs the 3D character taking the shape code as input. During training, we jointly optimize the shape code of each training sample and the decoder. Given an unseen character (i.e., a stylized 3D character) during inference, we obtain its shape code by freezing the decoder and optimizing the shape code to reconstruct the given character. Specifically, as shown in Fig. 2, given the concatenation of a query point " + }, + { + "bbox": [ + 304, + 392, + 545, + 571 + ], + "type": "inline_equation", + "content": "x \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 304, + 392, + 545, + 571 + ], + "type": "text", + "content": " and the shape code " + }, + { + "bbox": [ + 304, + 392, + 545, + 571 + ], + "type": "inline_equation", + "content": "s \\in \\mathbb{R}^d" + }, + { + "bbox": [ + 304, + 392, + 545, + 571 + ], + "type": "text", + "content": ", we first obtain an embedding " + }, + { + "bbox": [ + 304, + 392, + 545, + 571 + ], + "type": "inline_equation", + "content": "e \\in \\mathbb{R}^d" + }, + { + "bbox": [ + 304, + 392, + 545, + 571 + ], + "type": "text", + "content": " via an MLP denoted as " + }, + { + "bbox": [ + 304, + 392, + 545, + 571 + ], + "type": "inline_equation", + "content": "\\mathcal{F}" + }, + { + "bbox": [ + 304, + 392, + 545, + 571 + ], + "type": "text", + "content": ". Conditioned on the embedding " + }, + { + "bbox": [ + 304, + 392, + 545, + 571 + ], + "type": "inline_equation", + "content": "e" + }, + { + "bbox": [ + 304, + 392, + 545, + 571 + ], + "type": "text", + "content": ", the occupancy " + }, + { + "bbox": [ + 304, + 392, + 545, + 571 + ], + "type": "inline_equation", + "content": "\\hat{o}_x \\in \\mathbb{R}" + }, + { + "bbox": [ + 304, + 392, + 545, + 571 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 304, + 392, + 545, + 571 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 304, + 392, + 545, + 571 + ], + "type": "text", + "content": " is then predicted by another MLP denoted as " + }, + { + "bbox": [ + 304, + 392, + 545, + 571 + ], + "type": "inline_equation", + "content": "\\mathcal{O}" + }, + { + "bbox": [ + 304, + 392, + 545, + 571 + ], + "type": "text", + "content": ". The occupancy indicates if the query point " + }, + { + "bbox": [ + 304, + 392, + 545, + 571 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 304, + 392, + 545, + 571 + ], + "type": "text", + "content": " is inside or outside the body surface and can be supervised by the ground truth occupancy as:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 312, + 579, + 545, + 604 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 579, + 545, + 604 + ], + "spans": [ + { + "bbox": [ + 312, + 579, + 545, + 604 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathcal {O}} = - \\sum_ {x} \\left(o _ {x} \\cdot \\log \\left(\\hat {o} _ {x}\\right) + \\left(1 - o _ {x}\\right) \\cdot \\log \\left(1 - \\hat {o} _ {x}\\right)\\right), \\tag {1}", + "image_path": "df0b9c08d7023baae52f140402fc0f05491e5f0ae77d0f799b4dc4352f8c9911.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 606, + 509, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 606, + 509, + 617 + ], + "spans": [ + { + "bbox": [ + 306, + 606, + 509, + 617 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 306, + 606, + 509, + 617 + ], + "type": "inline_equation", + "content": "o_x" + }, + { + "bbox": [ + 306, + 606, + 509, + 617 + ], + "type": "text", + "content": " is the ground truth occupancy at point " + }, + { + "bbox": [ + 306, + 606, + 509, + 617 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 306, + 606, + 509, + 617 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 618, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 618, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 618, + 545, + 713 + ], + "type": "text", + "content": "Since our shape code eventually serves as a condition for the pose deformation module, we argue that it should also capture the part correspondence knowledge across different instances, in addition to the shape information (e.g., height, weight, and shape of each body part). This insight has been utilized by early local mesh deformation method [55], which explicitly utilizes correspondence to transfer local transformations between the source and target meshes. Our" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "8706" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 62, + 62, + 521, + 210 + ], + "blocks": [ + { + "bbox": [ + 62, + 62, + 521, + 210 + ], + "lines": [ + { + "bbox": [ + 62, + 62, + 521, + 210 + ], + "spans": [ + { + "bbox": [ + 62, + 62, + 521, + 210 + ], + "type": "image", + "image_path": "84b1ba766b54c9ef332c96b1d8eabc7c5bc78378df1476b6d69cda375015e6e6.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 218, + 546, + 240 + ], + "lines": [ + { + "bbox": [ + 46, + 218, + 546, + 240 + ], + "spans": [ + { + "bbox": [ + 46, + 218, + 546, + 240 + ], + "type": "text", + "content": "Figure 2. The shape understanding module (Sec. 3.1). Given a query point and a learnable shape code, we take MLPs to predict the occupancy, part segmentation label and further use an inverse MLP to regress the query point." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 50, + 248, + 286, + 343 + ], + "blocks": [ + { + "bbox": [ + 50, + 248, + 286, + 343 + ], + "lines": [ + { + "bbox": [ + 50, + 248, + 286, + 343 + ], + "spans": [ + { + "bbox": [ + 50, + 248, + 286, + 343 + ], + "type": "image", + "image_path": "9368775deb240878960eedeec753f6ad31bb00e7b841c77c983e7bcc2ec94a62.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 349, + 288, + 383 + ], + "lines": [ + { + "bbox": [ + 46, + 349, + 288, + 383 + ], + "spans": [ + { + "bbox": [ + 46, + 349, + 288, + 383 + ], + "type": "text", + "content": "Figure 3. The pose deformation module (Sec. 3.2). Given a query point on the surface, the learned shape code and a target pose code, we use an MLP to predict the offset of the query point." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 394, + 287, + 573 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 394, + 287, + 573 + ], + "spans": [ + { + "bbox": [ + 46, + 394, + 287, + 573 + ], + "type": "text", + "content": "pose deformation process could also benefit from learning part correspondence. Take the various headgear, hats, and horns on the stylized characters's heads in Fig. 1 as an example. If these components can be \"understood\" as extensions of the character's heads by their shape codes, they will move smoothly with the character's heads during pose deformation. Thus, besides mesh reconstruction, we effectively task our shape understanding module with an additional objective: predicting part-level correspondence instantiated as the part segmentation label. Specifically, we propose to utilize an MLP " + }, + { + "bbox": [ + 46, + 394, + 287, + 573 + ], + "type": "inline_equation", + "content": "\\mathcal{P}" + }, + { + "bbox": [ + 46, + 394, + 287, + 573 + ], + "type": "text", + "content": " to additionally predict a part label " + }, + { + "bbox": [ + 46, + 394, + 287, + 573 + ], + "type": "inline_equation", + "content": "p_x = (p_x^1,\\dots,p_x^K)^T\\in \\mathbb{R}^K" + }, + { + "bbox": [ + 46, + 394, + 287, + 573 + ], + "type": "text", + "content": " for each surface point " + }, + { + "bbox": [ + 46, + 394, + 287, + 573 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 46, + 394, + 287, + 573 + ], + "type": "text", + "content": ". Thanks to the densely annotated human mesh dataset, we can also supervise part segmentation learning with ground truth labels via:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 578, + 287, + 611 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 578, + 287, + 611 + ], + "spans": [ + { + "bbox": [ + 105, + 578, + 287, + 611 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathcal {P}} = \\sum_ {x} (- \\sum_ {k = 1} ^ {K} \\mathbb {1} _ {x} ^ {k} \\log \\left(p _ {x} ^ {k}\\right)), \\tag {2}", + "image_path": "e1718a50d489bf292cb18c69c5d9b50d1645c0306507f7d740b783efbc5c92ab.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 617, + 287, + 642 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 617, + 287, + 642 + ], + "spans": [ + { + "bbox": [ + 46, + 617, + 287, + 642 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 617, + 287, + 642 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 46, + 617, + 287, + 642 + ], + "type": "text", + "content": " is the total number of body parts, and " + }, + { + "bbox": [ + 46, + 617, + 287, + 642 + ], + "type": "inline_equation", + "content": "\\mathbb{1}_x^k = 1" + }, + { + "bbox": [ + 46, + 617, + 287, + 642 + ], + "type": "text", + "content": " if " + }, + { + "bbox": [ + 46, + 617, + 287, + 642 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 46, + 617, + 287, + 642 + ], + "type": "text", + "content": " belongs to the " + }, + { + "bbox": [ + 46, + 617, + 287, + 642 + ], + "type": "inline_equation", + "content": "k^{th}" + }, + { + "bbox": [ + 46, + 617, + 287, + 642 + ], + "type": "text", + "content": " part and " + }, + { + "bbox": [ + 46, + 617, + 287, + 642 + ], + "type": "inline_equation", + "content": "\\mathbb{1}_x^k = 0" + }, + { + "bbox": [ + 46, + 617, + 287, + 642 + ], + "type": "text", + "content": " otherwise." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 642, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 642, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 642, + 287, + 714 + ], + "type": "text", + "content": "To prepare the shape understanding module for stylized characters during inference, besides unclothed human meshes, we also include unrigged 3D stylized characters in rest pose during training. These characters in rest pose are easily accessible and do not require any annotation. For shape reconstruction, Eq. 1 can be similarly applied to the" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 262, + 546, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 262, + 546, + 357 + ], + "spans": [ + { + "bbox": [ + 304, + 262, + 546, + 357 + ], + "type": "text", + "content": "stylized characters. However, as there is no part segmentation annotation for stylized characters, we propose a self-supervised inverse constraint inspired by correspondence learning methods [17,38] to facilitate part segmentation prediction on these characters. Specifically, we reconstruct the query point's coordinates from the concatenation of the shape code " + }, + { + "bbox": [ + 304, + 262, + 546, + 357 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 304, + 262, + 546, + 357 + ], + "type": "text", + "content": " and the embedding " + }, + { + "bbox": [ + 304, + 262, + 546, + 357 + ], + "type": "inline_equation", + "content": "e" + }, + { + "bbox": [ + 304, + 262, + 546, + 357 + ], + "type": "text", + "content": " through an MLP " + }, + { + "bbox": [ + 304, + 262, + 546, + 357 + ], + "type": "inline_equation", + "content": "\\mathcal{Q}" + }, + { + "bbox": [ + 304, + 262, + 546, + 357 + ], + "type": "text", + "content": " and add an auxiliary objective as:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 377, + 366, + 545, + 380 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 377, + 366, + 545, + 380 + ], + "spans": [ + { + "bbox": [ + 377, + 366, + 545, + 380 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathcal {Q}} = \\left| \\left| \\mathcal {Q} (s, e) - x \\right| \\right| ^ {2}. \\tag {3}", + "image_path": "4b599052aeb6ba5e99195549ba85b796ccd1b5286230ab69d37d62642f3227e1.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 388, + 546, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 388, + 546, + 506 + ], + "spans": [ + { + "bbox": [ + 304, + 388, + 546, + 506 + ], + "type": "text", + "content": "Intuitively, for stylized characters without part annotation, the model learned without this objective may converge to a trivial solution where similar embeddings are predicted for points with the same occupancy value, even when they are far away from each other, and belong to different body parts. Tab. 4 further quantitatively verifies the effectiveness of this constraint. Beyond facilitating shape understanding, the predicted part segmentation label is further utilized in the volume-based test-time training module which will be introduced in Sec. 3.3." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 515, + 491, + 528 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 515, + 491, + 528 + ], + "spans": [ + { + "bbox": [ + 306, + 515, + 491, + 528 + ], + "type": "text", + "content": "3.2. Implicit Pose Deformation Module" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 533, + 545, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 533, + 545, + 593 + ], + "spans": [ + { + "bbox": [ + 304, + 533, + 545, + 593 + ], + "type": "text", + "content": "Given the learned shape code and a target pose, the pose deformation module deforms each surface point of the character to match the target pose. In the following, we first describe how we represent a human pose and then introduce the implicit function used for pose deformation." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 594, + 546, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 594, + 546, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 594, + 546, + 713 + ], + "type": "text", + "content": "Instead of learning a latent pose space from scratch as in [36, 47], we propose to represent a human pose by the corresponding pose code in the latent space of VPoser [51]. Our intuition is that VPoser is trained with an abundance of posed humans from the large-scale AMASS dataset [41]. This facilitates faster training and provides robustness to overfitting. Furthermore, human poses can be successfully estimated from different modalities (e.g., videos or meshes), and mapped to the latent space of VPoser by existing methods [32, 51, 53]. By taking advantage of these works, our" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "8707" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 108 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 108 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 108 + ], + "type": "text", + "content": "model can be applied to transfer poses from various modalities to an unrigged stylized character without any additional effort. A few examples can be found in the supplementary." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 108, + 287, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 108, + 287, + 204 + ], + "spans": [ + { + "bbox": [ + 46, + 108, + 287, + 204 + ], + "type": "text", + "content": "To deform a character to match the given pose, we learn a neural implicit function " + }, + { + "bbox": [ + 46, + 108, + 287, + 204 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 46, + 108, + 287, + 204 + ], + "type": "text", + "content": " that takes the sampled pose code " + }, + { + "bbox": [ + 46, + 108, + 287, + 204 + ], + "type": "inline_equation", + "content": "m\\in \\mathbb{R}^{32}" + }, + { + "bbox": [ + 46, + 108, + 287, + 204 + ], + "type": "text", + "content": ", the learned shape code, and a query point " + }, + { + "bbox": [ + 46, + 108, + 287, + 204 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 46, + 108, + 287, + 204 + ], + "type": "text", + "content": " around the character's surface as inputs and outputs the offset (denoted as " + }, + { + "bbox": [ + 46, + 108, + 287, + 204 + ], + "type": "inline_equation", + "content": "\\Delta \\hat{x}\\in \\mathbb{R}^3" + }, + { + "bbox": [ + 46, + 108, + 287, + 204 + ], + "type": "text", + "content": ") of " + }, + { + "bbox": [ + 46, + 108, + 287, + 204 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 46, + 108, + 287, + 204 + ], + "type": "text", + "content": " in 3D space. Given the densely annotated human mesh dataset, we directly use the ground truth offset " + }, + { + "bbox": [ + 46, + 108, + 287, + 204 + ], + "type": "inline_equation", + "content": "\\Delta x" + }, + { + "bbox": [ + 46, + 108, + 287, + 204 + ], + "type": "text", + "content": " as supervision. The training objective for our pose deformation module is defined as:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 114, + 213, + 287, + 237 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 213, + 287, + 237 + ], + "spans": [ + { + "bbox": [ + 114, + 213, + 287, + 237 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathcal {D}} = \\sum_ {x} \\left| \\left| \\Delta \\hat {x} - \\Delta x \\right| \\right| ^ {2}. \\tag {4}", + "image_path": "5495ea3d15927f39a6c8c655d62043d116ba86d847dda422f1e6081f5597981b.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 245, + 288, + 484 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 245, + 288, + 484 + ], + "spans": [ + { + "bbox": [ + 46, + 245, + 288, + 484 + ], + "type": "text", + "content": "Essentially, our implicit pose deformation module is similar in spirit to early local mesh deformation methods [55] and has two key advantages compared to the part-level pose transfer methods [22, 36, 62]. First, our implicit pose deformation network is agnostic to mesh topology and resolution. Thus our model can be directly applied to unseen 3D stylized characters with significantly different resolutions and mesh topology compared to the training human meshes during inference. Second, stylized characters often include distinct body part shapes compared to humans. For example, the characters shown in Fig. 1 include big heads or various accessories. Previous part-level methods [36] that learn to predict a bone transformation and skinning weight for each body part usually fail on these unique body parts, since they are different from the corresponding human body parts used for training. In contrast, by learning to deform individual surface point, implicit functions are more agnostic to the overall shape of a body part and thus can generalize better to stylized characters with significantly different body part shapes. Fig. 4 and Fig. 6 show these advantages." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 491, + 226, + 504 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 491, + 226, + 504 + ], + "spans": [ + { + "bbox": [ + 47, + 491, + 226, + 504 + ], + "type": "text", + "content": "3.3. Volume-based Test-time Training" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 510, + 287, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 510, + 287, + 629 + ], + "spans": [ + { + "bbox": [ + 46, + 510, + 287, + 629 + ], + "type": "text", + "content": "The shape understanding and pose deformation modules discussed above are trained with only posed human meshes and unrigged 3D stylized characters in rest pose. When applied to unseen characters with significantly different shapes, we observe surface distortion introduced by the pose deformation module. Moreover, it is challenging for the module to fully capture the long tail of the pose distribution. To resolve these issues, we propose to apply test-time training [57] and fine-tune the pose deformation module on unseen stylized characters." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 630, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 630, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 630, + 287, + 713 + ], + "type": "text", + "content": "To encourage natural pose deformation, we further propose a volume-preserving constraint during test-time training. Our key insight is that preserving the volume of each part in the rest pose mesh during pose deformation results in less distortion [35, 62]. However, it is non-trivial to compute the precise volume of each body part, which can have complex geometry. Instead, we propose to preserve the Eu" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 72, + 545, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 168 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 168 + ], + "type": "text", + "content": "ccludean distance between pairs of vertices sampled from the surface of the mesh, as a proxy for constraining the volume. Specifically, given a mesh in rest pose, we randomly sample two points " + }, + { + "bbox": [ + 304, + 72, + 545, + 168 + ], + "type": "inline_equation", + "content": "x_{i}^{c}" + }, + { + "bbox": [ + 304, + 72, + 545, + 168 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 72, + 545, + 168 + ], + "type": "inline_equation", + "content": "x_{j}^{c}" + }, + { + "bbox": [ + 304, + 72, + 545, + 168 + ], + "type": "text", + "content": " on the surface within the same part " + }, + { + "bbox": [ + 304, + 72, + 545, + 168 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 304, + 72, + 545, + 168 + ], + "type": "text", + "content": " using the part segmentation prediction from the shape understanding module. We calculate the offset of these two points " + }, + { + "bbox": [ + 304, + 72, + 545, + 168 + ], + "type": "inline_equation", + "content": "\\Delta \\hat{x}_{i}^{c}" + }, + { + "bbox": [ + 304, + 72, + 545, + 168 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 72, + 545, + 168 + ], + "type": "inline_equation", + "content": "\\Delta \\hat{x}_{j}^{c}" + }, + { + "bbox": [ + 304, + 72, + 545, + 168 + ], + "type": "text", + "content": " using our pose deformation module and minimize the change in the distance between them by:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 305, + 176, + 547, + 213 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 176, + 547, + 213 + ], + "spans": [ + { + "bbox": [ + 305, + 176, + 547, + 213 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {v} = \\sum_ {c} \\sum_ {i} \\sum_ {j} \\left(\\left| \\left| x _ {i} ^ {c} - x _ {j} ^ {c} \\right| \\right| - \\left| \\left| \\left(x _ {i} ^ {c} + \\Delta \\hat {x} _ {i} ^ {c}\\right) - \\left(x _ {j} ^ {c} + \\Delta \\hat {x} _ {j} ^ {c}\\right) \\right| \\right|\\right) ^ {2}. \\tag {5}", + "image_path": "6b0ef719190d2019f616e4f1a533d40d874b90c2041fea3db3b9e7c38a2aae39.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 210, + 545, + 246 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 210, + 545, + 246 + ], + "spans": [ + { + "bbox": [ + 304, + 210, + 545, + 246 + ], + "type": "text", + "content": "By sampling a large number of point pairs within a part and minimizing Eq. 5, we can approximately maintain the volume of each body part during pose deformation." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 246, + 545, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 246, + 545, + 365 + ], + "spans": [ + { + "bbox": [ + 304, + 246, + 545, + 365 + ], + "type": "text", + "content": "Furthermore, in order to generalize the pose deformation module to long-tail poses that are rarely seen during training, we propose to utilize the source character in rest pose and its deformed shape as paired training data during test-time training. Specifically, we take the source character in rest pose, its target pose code, and its optimized shape code as inputs and we output the movement " + }, + { + "bbox": [ + 304, + 246, + 545, + 365 + ], + "type": "inline_equation", + "content": "\\Delta \\hat{x}^{dr}" + }, + { + "bbox": [ + 304, + 246, + 545, + 365 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 246, + 545, + 365 + ], + "type": "inline_equation", + "content": "x^{dr}" + }, + { + "bbox": [ + 304, + 246, + 545, + 365 + ], + "type": "text", + "content": " is a query point from the source character. We minimize the L2 distance between the predicted movement " + }, + { + "bbox": [ + 304, + 246, + 545, + 365 + ], + "type": "inline_equation", + "content": "\\Delta \\hat{x}^{dr}" + }, + { + "bbox": [ + 304, + 246, + 545, + 365 + ], + "type": "text", + "content": " and the ground truth movement " + }, + { + "bbox": [ + 304, + 246, + 545, + 365 + ], + "type": "inline_equation", + "content": "\\Delta x^{dr}" + }, + { + "bbox": [ + 304, + 246, + 545, + 365 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 364, + 374, + 545, + 399 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 364, + 374, + 545, + 399 + ], + "spans": [ + { + "bbox": [ + 364, + 374, + 545, + 399 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {d r} = \\sum_ {x ^ {d r}} \\left| \\left| \\Delta \\hat {x} ^ {d r} - \\Delta x ^ {d r} \\right| \\right| ^ {2}. \\tag {6}", + "image_path": "932f76d1bd9073687c9a30d09a00970251c41f4700d8c3c4ebbd937a516d661e.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 402, + 545, + 474 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 402, + 545, + 474 + ], + "spans": [ + { + "bbox": [ + 304, + 402, + 545, + 474 + ], + "type": "text", + "content": "Besides the volume-preserving constraint and the reconstruction of the source character, we also employ the edge loss " + }, + { + "bbox": [ + 304, + 402, + 545, + 474 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_e" + }, + { + "bbox": [ + 304, + 402, + 545, + 474 + ], + "type": "text", + "content": " used in [25, 36, 62]. Overall, the objectives for the test-time training procedure are " + }, + { + "bbox": [ + 304, + 402, + 545, + 474 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathcal{T}} = \\lambda_v\\mathcal{L}_v + \\lambda_e\\mathcal{L}_e + \\lambda_{dr}\\mathcal{L}_{dr}" + }, + { + "bbox": [ + 304, + 402, + 545, + 474 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 402, + 545, + 474 + ], + "type": "inline_equation", + "content": "\\lambda_v, \\lambda_e" + }, + { + "bbox": [ + 304, + 402, + 545, + 474 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 304, + 402, + 545, + 474 + ], + "type": "inline_equation", + "content": "\\lambda_{dr}" + }, + { + "bbox": [ + 304, + 402, + 545, + 474 + ], + "type": "text", + "content": " are hyper-parameters balancing the loss weights." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 484, + 387, + 498 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 484, + 387, + 498 + ], + "spans": [ + { + "bbox": [ + 306, + 484, + 387, + 498 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 503, + 368, + 515 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 503, + 368, + 515 + ], + "spans": [ + { + "bbox": [ + 306, + 503, + 368, + 515 + ], + "type": "text", + "content": "4.1. Datasets" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 522, + 545, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 522, + 545, + 689 + ], + "spans": [ + { + "bbox": [ + 304, + 522, + 545, + 689 + ], + "type": "text", + "content": "To train the shape understanding module, we use 40 human meshes sampled from the SMPL [40] parametric model. We use both the occupancy and part segmentation label of these meshes as supervision (see Sec. 3.1). To generalize the shape understanding module to stylized characters, we further include 600 stylized characters from RigNet [63]. Note that we only use the rest pose mesh (i.e., occupancy label) of the characters in [63] for training. To train our pose deformation module, we construct paired training data by deforming each of the 40 SMPL characters discussed above with 5000 pose codes sampled from the VPoser's [50] latent space. In total, we collect 200,000 training pairs, with each pair including an unclothed human mesh in rest pose and the same human mesh in target pose." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "text", + "content": "After training the shape understanding and pose deformation modules, we test them on the Mixamo [1] dataset," + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "8708" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 191 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 191 + ], + "type": "text", + "content": "which includes challenging stylized characters, and the MGN [11] dataset, which includes clothed humans. The characters in both datasets have different shapes compared to the unclothed SMPL meshes we used for training, demonstrating the generalization ability of the proposed method. Following [36], we test on 19 stylized characters, with each deformed by 28 motion sequences from the Mixamo dataset. For the MGN dataset, we test on 16 clothed characters, with each deformed by 200 target poses. Both the testing characters and poses are unseen during training." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 192, + 288, + 420 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 192, + 288, + 420 + ], + "spans": [ + { + "bbox": [ + 46, + 192, + 288, + 420 + ], + "type": "text", + "content": "For quadrupeds, since there is no dataset including large-scale paired stylized quadrupeds for quantitative evaluation, we split all characters from the SMAL [68] dataset and use the first 34 shapes (i.e., cats, dogs, and horses) for training. We further collect 81 stylized quadrupeds in rest pose from the RigNet [63] to improve generalization of the shape understanding module. Similarly to the human category, we use occupancy and part segmentation supervision for the SMAL shapes and only the occupancy supervision for RigNet meshes. To train the pose deformation module, we deform each of the 34 characters in SMAL by 2000 poses sampled from the latent space of BARC [54], a 3D reconstruction model trained for the dog category. We quantitatively evaluate our model on the hippo meshes from the SMAL dataset, which have larger shape variance compared to the cats, dogs, and horses used for training. We produce the testing data by deforming each hippo mesh with 500 unseen target poses from SMAL [68]. We show qualitative pose transfer on stylized quadrupeds in Fig. 1." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 428, + 180, + 441 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 428, + 180, + 441 + ], + "spans": [ + { + "bbox": [ + 47, + 428, + 180, + 441 + ], + "type": "text", + "content": "4.2. Implementation Details" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 447, + 287, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 447, + 287, + 590 + ], + "spans": [ + { + "bbox": [ + 46, + 447, + 287, + 590 + ], + "type": "text", + "content": "We use the ADAM [30] optimizer to train both the shape understanding and pose deformation modules. For the shape understanding module, we use a learning rate of " + }, + { + "bbox": [ + 46, + 447, + 287, + 590 + ], + "type": "inline_equation", + "content": "1e - 4" + }, + { + "bbox": [ + 46, + 447, + 287, + 590 + ], + "type": "text", + "content": " for both the decoder and shape code optimization, with a batch size of 64. Given a new character at inference time, we fix the decoder and only optimize the shape code for the new character with the same optimizer and learning rate. For the pose deformation module, we use a learning rate of " + }, + { + "bbox": [ + 46, + 447, + 287, + 590 + ], + "type": "inline_equation", + "content": "3e - 4" + }, + { + "bbox": [ + 46, + 447, + 287, + 590 + ], + "type": "text", + "content": " with a batch size of 128. For test-time training, we use a batch size of 1 and a learning rate of " + }, + { + "bbox": [ + 46, + 447, + 287, + 590 + ], + "type": "inline_equation", + "content": "5e - 3" + }, + { + "bbox": [ + 46, + 447, + 287, + 590 + ], + "type": "text", + "content": " with the ADAM optimizer. We set " + }, + { + "bbox": [ + 46, + 447, + 287, + 590 + ], + "type": "inline_equation", + "content": "\\lambda_v" + }, + { + "bbox": [ + 46, + 447, + 287, + 590 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 447, + 287, + 590 + ], + "type": "inline_equation", + "content": "\\lambda_e" + }, + { + "bbox": [ + 46, + 447, + 287, + 590 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 46, + 447, + 287, + 590 + ], + "type": "inline_equation", + "content": "\\lambda_{dr}" + }, + { + "bbox": [ + 46, + 447, + 287, + 590 + ], + "type": "text", + "content": " (See Sec. 3.3) as 0.05, 0.01, and 1 respectively." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 599, + 249, + 612 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 599, + 249, + 612 + ], + "spans": [ + { + "bbox": [ + 47, + 599, + 249, + 612 + ], + "type": "text", + "content": "4.3. Metrics and Baselines for Comparison" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 617, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 617, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 617, + 286, + 713 + ], + "type": "text", + "content": "Metrics. We use Point-wise Mesh Euclidean Distance (PMD) [36, 62] to evaluate pose transfer error. The PMD metric reveals pose similarity of the predicted deformation compared to its ground truth. However, as shown in Fig. 4, PMD can not fully show the smoothness and realism of the generated results. Thus, we adopt an edge length score (ELS) metric to evaluate the character's smoothness after the deformation. Specifically, we compare each edge's" + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 310, + 71, + 541, + 123 + ], + "blocks": [ + { + "bbox": [ + 310, + 71, + 541, + 123 + ], + "lines": [ + { + "bbox": [ + 310, + 71, + 541, + 123 + ], + "spans": [ + { + "bbox": [ + 310, + 71, + 541, + 123 + ], + "type": "table", + "html": "
DatasetMetricSPT*(full) [36]NBS [35]SPT [36]Ours
MGN [11]PMD ↓1.621.331.820.99
ELS ↑0.860.700.850.89
Mixamo [1]PMD ↓3.057.045.295.06
ELS ↑0.610.660.590.88
", + "image_path": "df81a559b2c5a17b2caaba17ccca1bc676cda20344b82987e19d802d405b76ac.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 305, + 131, + 545, + 186 + ], + "lines": [ + { + "bbox": [ + 305, + 131, + 545, + 186 + ], + "spans": [ + { + "bbox": [ + 305, + 131, + 545, + 186 + ], + "type": "text", + "content": "Table 1. Quantitative comparison on MGN and Mixamo. Our method achieves the lowest PMD with the highest ELS. We provide the performance of the SPT*(full) method, which uses more supervision than the other methods as a reference. Our method is even better or comparable to it." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 305, + 194, + 545, + 218 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 194, + 545, + 218 + ], + "spans": [ + { + "bbox": [ + 305, + 194, + 545, + 218 + ], + "type": "text", + "content": "length in the deformed mesh with the corresponding edge's length in the ground truth mesh. We define the score as" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 355, + 228, + 545, + 262 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 355, + 228, + 545, + 262 + ], + "spans": [ + { + "bbox": [ + 355, + 228, + 545, + 262 + ], + "type": "interline_equation", + "content": "\\frac {1}{| \\mathcal {E} |} \\sum_ {\\{i, j \\} \\sim \\mathcal {E}} 1 - \\left| \\frac {| | \\hat {V} _ {i} - \\hat {V} _ {j} | | _ {2}}{| | V _ {i} - V _ {j} | | _ {2}} - 1 \\right|, \\tag {7}", + "image_path": "76b1cec14a24a6b47f0ce6d42a4d5263bfdf8eecf0b98f448fbc5e52d0d27185.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 266, + 545, + 326 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 266, + 545, + 326 + ], + "spans": [ + { + "bbox": [ + 304, + 266, + 545, + 326 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 266, + 545, + 326 + ], + "type": "inline_equation", + "content": "\\mathcal{E}" + }, + { + "bbox": [ + 304, + 266, + 545, + 326 + ], + "type": "text", + "content": " indicates all edges of the mesh, " + }, + { + "bbox": [ + 304, + 266, + 545, + 326 + ], + "type": "inline_equation", + "content": "|\\mathcal{E}|" + }, + { + "bbox": [ + 304, + 266, + 545, + 326 + ], + "type": "text", + "content": " is the number of the edges in the mesh. " + }, + { + "bbox": [ + 304, + 266, + 545, + 326 + ], + "type": "inline_equation", + "content": "\\hat{V}_i" + }, + { + "bbox": [ + 304, + 266, + 545, + 326 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 266, + 545, + 326 + ], + "type": "inline_equation", + "content": "\\hat{V}_j" + }, + { + "bbox": [ + 304, + 266, + 545, + 326 + ], + "type": "text", + "content": " are the vertices in the deformed mesh. " + }, + { + "bbox": [ + 304, + 266, + 545, + 326 + ], + "type": "inline_equation", + "content": "V_{i}" + }, + { + "bbox": [ + 304, + 266, + 545, + 326 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 266, + 545, + 326 + ], + "type": "inline_equation", + "content": "V_{j}" + }, + { + "bbox": [ + 304, + 266, + 545, + 326 + ], + "type": "text", + "content": " are the vertices in the ground truth mesh. For all the evaluation metrics, we scale the template character to be 1 meter tall, following [36]." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 327, + 545, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 327, + 545, + 471 + ], + "spans": [ + { + "bbox": [ + 304, + 327, + 545, + 471 + ], + "type": "text", + "content": "Baselines. We compare our method with Neural Blend Shapes (NBS) [35] and Skeleton-free Pose Transfer (SPT) [36]. NBS is a rigging prediction method trained on the SMPL and MGN datasets, which include naked and clothed human meshes with ground truth rigging information. For SPT, we show the results of two versions, one is trained only on the AMASS dataset, named SPT, which has a comparable level of supervision to our method. We also test the SPT*(full) version, which is trained on the AMASS, RigNet and Mixamo datasets, using both stylized characters' skinning weights as supervision and paired stylized characters in rest pose and target pose." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 479, + 503, + 491 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 479, + 503, + 491 + ], + "spans": [ + { + "bbox": [ + 306, + 479, + 503, + 491 + ], + "type": "text", + "content": "4.4. Human-like Character Pose Transfer" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "text", + "content": "We report the PMD metric on the MGN and Mixamo datasets in Tab. 1. We also include the performance of SPT*(full) for reference. On the MGN dataset which includes clothed humans, our method which is trained with only unclothed humans achieve the best PMD score than all baseline methods, including baselines trained with more supervision (i.e., the NBS [35] learned with clothed humans and the SPT*(full) [36] learned with skinning weight and paired motion data). For the stylized characters, our method outperforms the SPT baseline learned with a comparable amount of supervision and gets competitive results with the NBS [35] and SPT*(full) baseline trained with more supervision. Furthermore, when testing on the more challenging, less human-like characters (e.g., a mouse with a big head in Fig. 1), the baselines produce noticeable artifacts and rough surfaces, which can be observed in the qualitative comparisons in Fig. 4. We provide the PMD value for each character in the supplementary." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "8709" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 104, + 57, + 149, + 122 + ], + "blocks": [ + { + "bbox": [ + 104, + 57, + 149, + 122 + ], + "lines": [ + { + "bbox": [ + 104, + 57, + 149, + 122 + ], + "spans": [ + { + "bbox": [ + 104, + 57, + 149, + 122 + ], + "type": "image", + "image_path": "4373cf912884415bb198f2ddc519573b1b1bd0db9d3334d3f0dc36b7681ae064.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 104, + 125, + 149, + 185 + ], + "blocks": [ + { + "bbox": [ + 104, + 125, + 149, + 185 + ], + "lines": [ + { + "bbox": [ + 104, + 125, + 149, + 185 + ], + "spans": [ + { + "bbox": [ + 104, + 125, + 149, + 185 + ], + "type": "image", + "image_path": "c3dff5e8dad3476f2d059883ef3d74710ea2d082abb446921120c15296e71eaf.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 105, + 190, + 141, + 246 + ], + "blocks": [ + { + "bbox": [ + 105, + 190, + 141, + 246 + ], + "lines": [ + { + "bbox": [ + 105, + 190, + 141, + 246 + ], + "spans": [ + { + "bbox": [ + 105, + 190, + 141, + 246 + ], + "type": "image", + "image_path": "952d1fa77e29412929bf41fbba9b85a6ff632e47932a3704ef5124044cf10bce.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 109, + 252, + 138, + 262 + ], + "lines": [ + { + "bbox": [ + 109, + 252, + 138, + 262 + ], + "spans": [ + { + "bbox": [ + 109, + 252, + 138, + 262 + ], + "type": "text", + "content": "Source" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 158, + 58, + 219, + 121 + ], + "blocks": [ + { + "bbox": [ + 158, + 58, + 219, + 121 + ], + "lines": [ + { + "bbox": [ + 158, + 58, + 219, + 121 + ], + "spans": [ + { + "bbox": [ + 158, + 58, + 219, + 121 + ], + "type": "image", + "image_path": "b1e6dd9f49d0f714b3af78032fbc4563ca35b398a8d566e0f0b1a4db98344968.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 160, + 122, + 217, + 185 + ], + "blocks": [ + { + "bbox": [ + 160, + 122, + 217, + 185 + ], + "lines": [ + { + "bbox": [ + 160, + 122, + 217, + 185 + ], + "spans": [ + { + "bbox": [ + 160, + 122, + 217, + 185 + ], + "type": "image", + "image_path": "3a7faa429e1cac3f2a004eb6235ddf01d8e00711b8c40b5dfcc94d7124f2b473.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 162, + 189, + 219, + 248 + ], + "blocks": [ + { + "bbox": [ + 162, + 189, + 219, + 248 + ], + "lines": [ + { + "bbox": [ + 162, + 189, + 219, + 248 + ], + "spans": [ + { + "bbox": [ + 162, + 189, + 219, + 248 + ], + "type": "image", + "image_path": "486462331e99d4af63c55bab579dded47eaed63a4861602bab0512fb89cf06fc.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 175, + 251, + 203, + 262 + ], + "lines": [ + { + "bbox": [ + 175, + 251, + 203, + 262 + ], + "spans": [ + { + "bbox": [ + 175, + 251, + 203, + 262 + ], + "type": "text", + "content": "Target" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 226, + 58, + 289, + 184 + ], + "blocks": [ + { + "bbox": [ + 226, + 58, + 289, + 184 + ], + "lines": [ + { + "bbox": [ + 226, + 58, + 289, + 184 + ], + "spans": [ + { + "bbox": [ + 226, + 58, + 289, + 184 + ], + "type": "image", + "image_path": "e81d777fd3fcf00216ef88027d1be0cfe145ddf5d34f3843baeff045094f35f8.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 238, + 189, + 275, + 248 + ], + "blocks": [ + { + "bbox": [ + 238, + 189, + 275, + 248 + ], + "lines": [ + { + "bbox": [ + 238, + 189, + 275, + 248 + ], + "spans": [ + { + "bbox": [ + 238, + 189, + 275, + 248 + ], + "type": "image", + "image_path": "add374012e2123a228fcefe50ac76f5ab02ddf5d96b4deeff31e6509a1abf23e.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 236, + 251, + 276, + 263 + ], + "lines": [ + { + "bbox": [ + 236, + 251, + 276, + 263 + ], + "spans": [ + { + "bbox": [ + 236, + 251, + 276, + 263 + ], + "type": "text", + "content": "NBS [35]" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 304, + 57, + 354, + 122 + ], + "blocks": [ + { + "bbox": [ + 304, + 57, + 354, + 122 + ], + "lines": [ + { + "bbox": [ + 304, + 57, + 354, + 122 + ], + "spans": [ + { + "bbox": [ + 304, + 57, + 354, + 122 + ], + "type": "image", + "image_path": "cd03b14f06fc44b5f5948f3046818372a221cbafefcd8bc05645c834e87bffa7.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 304, + 122, + 353, + 187 + ], + "blocks": [ + { + "bbox": [ + 304, + 122, + 353, + 187 + ], + "lines": [ + { + "bbox": [ + 304, + 122, + 353, + 187 + ], + "spans": [ + { + "bbox": [ + 304, + 122, + 353, + 187 + ], + "type": "image", + "image_path": "ed7ecd982161013fa5ed72db836e9357462eda131b179bc48db7d1b9db2cb163.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 311, + 190, + 346, + 247 + ], + "blocks": [ + { + "bbox": [ + 311, + 190, + 346, + 247 + ], + "lines": [ + { + "bbox": [ + 311, + 190, + 346, + 247 + ], + "spans": [ + { + "bbox": [ + 311, + 190, + 346, + 247 + ], + "type": "image", + "image_path": "608f1fbb2f0219e590b4adf949c59768bc908cc850bbaac5cdd0db00d9a53261.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 309, + 251, + 347, + 263 + ], + "lines": [ + { + "bbox": [ + 309, + 251, + 347, + 263 + ], + "spans": [ + { + "bbox": [ + 309, + 251, + 347, + 263 + ], + "type": "text", + "content": "SPT [36]" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 375, + 57, + 410, + 121 + ], + "blocks": [ + { + "bbox": [ + 375, + 57, + 410, + 121 + ], + "lines": [ + { + "bbox": [ + 375, + 57, + 410, + 121 + ], + "spans": [ + { + "bbox": [ + 375, + 57, + 410, + 121 + ], + "type": "image", + "image_path": "0525239d0042cc48abf9e1689fc7ce77d739b320fa767bc0d09b3929d1475690.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 375, + 122, + 424, + 186 + ], + "blocks": [ + { + "bbox": [ + 375, + 122, + 424, + 186 + ], + "lines": [ + { + "bbox": [ + 375, + 122, + 424, + 186 + ], + "spans": [ + { + "bbox": [ + 375, + 122, + 424, + 186 + ], + "type": "image", + "image_path": "2c977e4e5a8760dfd73cb0657491914acda2ba3239bc9daef811cdeab81b6850.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 379, + 189, + 415, + 248 + ], + "blocks": [ + { + "bbox": [ + 379, + 189, + 415, + 248 + ], + "lines": [ + { + "bbox": [ + 379, + 189, + 415, + 248 + ], + "spans": [ + { + "bbox": [ + 379, + 189, + 415, + 248 + ], + "type": "image", + "image_path": "ee6cb725ab001d3127ba7fdfc22842d58bbb3a697126f8a3c69c2e3bae3de251.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 386, + 251, + 407, + 262 + ], + "lines": [ + { + "bbox": [ + 386, + 251, + 407, + 262 + ], + "spans": [ + { + "bbox": [ + 386, + 251, + 407, + 262 + ], + "type": "text", + "content": "Ours" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 441, + 57, + 488, + 122 + ], + "blocks": [ + { + "bbox": [ + 441, + 57, + 488, + 122 + ], + "lines": [ + { + "bbox": [ + 441, + 57, + 488, + 122 + ], + "spans": [ + { + "bbox": [ + 441, + 57, + 488, + 122 + ], + "type": "image", + "image_path": "238231f66f59fef062c826ccc38980589d86e8a09236c4769eabe7137514659c.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 442, + 122, + 487, + 186 + ], + "blocks": [ + { + "bbox": [ + 442, + 122, + 487, + 186 + ], + "lines": [ + { + "bbox": [ + 442, + 122, + 487, + 186 + ], + "spans": [ + { + "bbox": [ + 442, + 122, + 487, + 186 + ], + "type": "image", + "image_path": "b9a701a555f870eeececddefa3c053b9380b1dad28769b8d15b8461255ead581.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 446, + 189, + 485, + 247 + ], + "blocks": [ + { + "bbox": [ + 446, + 189, + 485, + 247 + ], + "lines": [ + { + "bbox": [ + 446, + 189, + 485, + 247 + ], + "spans": [ + { + "bbox": [ + 446, + 189, + 485, + 247 + ], + "type": "image", + "image_path": "3e575671bfe48c8a1a7512eb1aaf766a19aa0fafbff6e85a954d9ff09e98021c.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 457, + 251, + 473, + 262 + ], + "lines": [ + { + "bbox": [ + 457, + 251, + 473, + 262 + ], + "spans": [ + { + "bbox": [ + 457, + 251, + 473, + 262 + ], + "type": "text", + "content": "GT" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 56, + 306, + 266, + 514 + ], + "blocks": [ + { + "bbox": [ + 56, + 306, + 266, + 514 + ], + "lines": [ + { + "bbox": [ + 56, + 306, + 266, + 514 + ], + "spans": [ + { + "bbox": [ + 56, + 306, + 266, + 514 + ], + "type": "image", + "image_path": "28a16782138344dbf2157544077b57705a8741cef1658f2c44d1f82f17586ce9.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 515, + 287, + 538 + ], + "lines": [ + { + "bbox": [ + 46, + 515, + 287, + 538 + ], + "spans": [ + { + "bbox": [ + 46, + 515, + 287, + 538 + ], + "type": "text", + "content": "Figure 5. Part segmentation visualization. NBS makes wrong predictions for hair while SPT may mix the upper legs." + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_caption" + } + ], + "index": 24 + }, + { + "bbox": [ + 46, + 555, + 287, + 614 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 555, + 287, + 614 + ], + "spans": [ + { + "bbox": [ + 46, + 555, + 287, + 614 + ], + "type": "text", + "content": "We show the ELS score comparison of different methods on the MGN and Mixamo datasets in Tab. 1. For both clothed humans and stylized characters, our method can generate more realistic results which are consistent with the target mesh and achieves the best ELS score." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 46, + 617, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 617, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 617, + 288, + 715 + ], + "type": "text", + "content": "We visually compare our method and the baseline methods in Fig. 4 on the Mixamo dataset. Although NBS is trained with a clothed-human dataset, when testing on the human-like characters, it still fails on parts that are separate from the body such as the hair and the pants. When using only naked human meshes as supervision, SPT cannot generalize to challenging human-like characters, producing rough mesh surface with spikes." + } + ] + } + ], + "index": 27 + }, + { + "type": "image", + "bbox": [ + 310, + 302, + 382, + 344 + ], + "blocks": [ + { + "bbox": [ + 46, + 266, + 547, + 300 + ], + "lines": [ + { + "bbox": [ + 46, + 266, + 547, + 300 + ], + "spans": [ + { + "bbox": [ + 46, + 266, + 547, + 300 + ], + "type": "text", + "content": "Figure 4. Qualitative comparison on Mixamo. The average PMD of these three results for NBS, SPT, and Ours are 8.16, 6.13, and 5.16 respectively and the average ELS for NBS, SPT, and Ours are 0.65, 0.78, and 0.93 respectively. Our method can successfully transfer the pose to challenging stylized characters (e.g., the mouse with a big head in the second row)." + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 310, + 302, + 382, + 344 + ], + "lines": [ + { + "bbox": [ + 310, + 302, + 382, + 344 + ], + "spans": [ + { + "bbox": [ + 310, + 302, + 382, + 344 + ], + "type": "image", + "image_path": "480e4d440ac18475299a55cbd4e001af2d9ae4c3a990eda816afb0c67df4c577.jpg" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_body" + } + ], + "index": 28 + }, + { + "type": "image", + "bbox": [ + 311, + 348, + 383, + 380 + ], + "blocks": [ + { + "bbox": [ + 311, + 348, + 383, + 380 + ], + "lines": [ + { + "bbox": [ + 311, + 348, + 383, + 380 + ], + "spans": [ + { + "bbox": [ + 311, + 348, + 383, + 380 + ], + "type": "image", + "image_path": "8cce0cbc2c5271d9463dbb46bc10c1a1b56108aafeda84a396a145f818b87816.jpg" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_body" + } + ], + "index": 29 + }, + { + "type": "image", + "bbox": [ + 313, + 386, + 381, + 426 + ], + "blocks": [ + { + "bbox": [ + 313, + 386, + 381, + 426 + ], + "lines": [ + { + "bbox": [ + 313, + 386, + 381, + 426 + ], + "spans": [ + { + "bbox": [ + 313, + 386, + 381, + 426 + ], + "type": "image", + "image_path": "10a7eafaed4ee1a0eb0a58c40916888b9122ff4860b3fc3a70e31137b6426508.jpg" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 329, + 430, + 367, + 442 + ], + "lines": [ + { + "bbox": [ + 329, + 430, + 367, + 442 + ], + "spans": [ + { + "bbox": [ + 329, + 430, + 367, + 442 + ], + "type": "text", + "content": "SPT [36]" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_caption" + } + ], + "index": 30 + }, + { + "type": "image", + "bbox": [ + 388, + 303, + 454, + 339 + ], + "blocks": [ + { + "bbox": [ + 388, + 303, + 454, + 339 + ], + "lines": [ + { + "bbox": [ + 388, + 303, + 454, + 339 + ], + "spans": [ + { + "bbox": [ + 388, + 303, + 454, + 339 + ], + "type": "image", + "image_path": "9f2bfc42b2287de21b24f395c4be405319035d4b96ab4529afe1588ede1f50ae.jpg" + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_body" + } + ], + "index": 32 + }, + { + "type": "image", + "bbox": [ + 388, + 347, + 459, + 380 + ], + "blocks": [ + { + "bbox": [ + 388, + 347, + 459, + 380 + ], + "lines": [ + { + "bbox": [ + 388, + 347, + 459, + 380 + ], + "spans": [ + { + "bbox": [ + 388, + 347, + 459, + 380 + ], + "type": "image", + "image_path": "22672dbf3808361df124c6dee0ccc9a194ed70ba2360e598c0d2e4622fe62124.jpg" + } + ] + } + ], + "index": 33, + "angle": 0, + "type": "image_body" + } + ], + "index": 33 + }, + { + "type": "image", + "bbox": [ + 388, + 386, + 455, + 425 + ], + "blocks": [ + { + "bbox": [ + 388, + 386, + 455, + 425 + ], + "lines": [ + { + "bbox": [ + 388, + 386, + 455, + 425 + ], + "spans": [ + { + "bbox": [ + 388, + 386, + 455, + 425 + ], + "type": "image", + "image_path": "21f7c6cc2c5c91b5743f1aa1e50965fcf337a4ddff7269bbfc7896af3776ffaf.jpg" + } + ] + } + ], + "index": 34, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 414, + 431, + 435, + 441 + ], + "lines": [ + { + "bbox": [ + 414, + 431, + 435, + 441 + ], + "spans": [ + { + "bbox": [ + 414, + 431, + 435, + 441 + ], + "type": "text", + "content": "Ours" + } + ] + } + ], + "index": 35, + "angle": 0, + "type": "image_caption" + } + ], + "index": 34 + }, + { + "type": "image", + "bbox": [ + 460, + 303, + 525, + 340 + ], + "blocks": [ + { + "bbox": [ + 460, + 303, + 525, + 340 + ], + "lines": [ + { + "bbox": [ + 460, + 303, + 525, + 340 + ], + "spans": [ + { + "bbox": [ + 460, + 303, + 525, + 340 + ], + "type": "image", + "image_path": "017e7ce6912aaa677ff04da374dba94d3829be6008c0d2a19eacd9b95346d390.jpg" + } + ] + } + ], + "index": 36, + "angle": 0, + "type": "image_body" + } + ], + "index": 36 + }, + { + "type": "image", + "bbox": [ + 462, + 347, + 534, + 381 + ], + "blocks": [ + { + "bbox": [ + 462, + 347, + 534, + 381 + ], + "lines": [ + { + "bbox": [ + 462, + 347, + 534, + 381 + ], + "spans": [ + { + "bbox": [ + 462, + 347, + 534, + 381 + ], + "type": "image", + "image_path": "4bdb35185ddeab2f0f2d7bc5aaaebc7cbd67ac759c0ab2c00383fd165bab2ad4.jpg" + } + ] + } + ], + "index": 37, + "angle": 0, + "type": "image_body" + } + ], + "index": 37 + }, + { + "type": "image", + "bbox": [ + 462, + 386, + 529, + 424 + ], + "blocks": [ + { + "bbox": [ + 462, + 386, + 529, + 424 + ], + "lines": [ + { + "bbox": [ + 462, + 386, + 529, + 424 + ], + "spans": [ + { + "bbox": [ + 462, + 386, + 529, + 424 + ], + "type": "image", + "image_path": "b70da364d0efbbd894e4ad8718211b20b1a9777381f9d1d7a40d2d2bf72e3534.jpg" + } + ] + } + ], + "index": 38, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 492, + 430, + 507, + 441 + ], + "lines": [ + { + "bbox": [ + 492, + 430, + 507, + 441 + ], + "spans": [ + { + "bbox": [ + 492, + 430, + 507, + 441 + ], + "type": "text", + "content": "GT" + } + ] + } + ], + "index": 39, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 305, + 447, + 547, + 481 + ], + "lines": [ + { + "bbox": [ + 305, + 447, + 547, + 481 + ], + "spans": [ + { + "bbox": [ + 305, + 447, + 547, + 481 + ], + "type": "text", + "content": "Figure 6. Quadrupedal pose transfer visualization. Our method can achieve smooth and accurate pose transfer while SPT fails on the mouth and leg regions." + } + ] + } + ], + "index": 40, + "angle": 0, + "type": "image_caption" + } + ], + "index": 38 + }, + { + "type": "table", + "bbox": [ + 342, + 492, + 511, + 517 + ], + "blocks": [ + { + "bbox": [ + 342, + 492, + 511, + 517 + ], + "lines": [ + { + "bbox": [ + 342, + 492, + 511, + 517 + ], + "spans": [ + { + "bbox": [ + 342, + 492, + 511, + 517 + ], + "type": "table", + "html": "
MetricNBS [35]SPT [36]Ours
Accuracy ↑67.8%75.6%86.9%
", + "image_path": "cacbf14f020635b037d362c51c25b3bc2e0add3ea22e75a482015375212a1382.jpg" + } + ] + } + ], + "index": 41, + "angle": 0, + "type": "table_body" + } + ], + "index": 41 + }, + { + "bbox": [ + 306, + 525, + 545, + 548 + ], + "lines": [ + { + "bbox": [ + 306, + 525, + 545, + 548 + ], + "spans": [ + { + "bbox": [ + 306, + 525, + 545, + 548 + ], + "type": "text", + "content": "Table 2. Part prediction accuracy on Mixamo [1]. Our method achieves the best part segmentation accuracy." + } + ] + } + ], + "index": 42, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 306, + 563, + 482, + 577 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 563, + 482, + 577 + ], + "spans": [ + { + "bbox": [ + 306, + 563, + 482, + 577 + ], + "type": "text", + "content": "4.5. Part Understanding Comparison" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 304, + 582, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 582, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 582, + 547, + 715 + ], + "type": "text", + "content": "As discussed in Sec. 3.1, part segmentation plays an important role in both shape understanding and pose deformation. Though NBS [35] and SPT [36] do not explicitly predict part segmentation label, they are both skinning weight-based methods and we can derive the part segmentation label from the predicted skinning weights. Specifically, by selecting the maximum weight of each vertex, we can convert the skinning weight prediction to part segmentation labels for the vertices. We compare our part prediction results with those derived from SPT and NBS. We report the part segmentation accuracy on the Mixamo datasets in Tab. 2" + } + ] + } + ], + "index": 44 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "8710" + } + ] + } + ], + "index": 45 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 55, + 58, + 270, + 249 + ], + "blocks": [ + { + "bbox": [ + 55, + 58, + 270, + 249 + ], + "lines": [ + { + "bbox": [ + 55, + 58, + 270, + 249 + ], + "spans": [ + { + "bbox": [ + 55, + 58, + 270, + 249 + ], + "type": "image", + "image_path": "8c1b2f4ff36dcae4437e98663db3b57ac789d1a88eb4f73a2ca28d0574424ce4.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 251, + 288, + 295 + ], + "lines": [ + { + "bbox": [ + 46, + 251, + 288, + 295 + ], + "spans": [ + { + "bbox": [ + 46, + 251, + 288, + 295 + ], + "type": "text", + "content": "Figure 7. Qualitative comparison for ablation study. Removing the constraint (eq. 1) in shape understanding leads to wrong pose deformation results. The volume preserving loss (eq. 5) helps to maintain the identity, e.g., the thickness of the arms in first row." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 62, + 304, + 271, + 328 + ], + "blocks": [ + { + "bbox": [ + 62, + 304, + 271, + 328 + ], + "lines": [ + { + "bbox": [ + 62, + 304, + 271, + 328 + ], + "spans": [ + { + "bbox": [ + 62, + 304, + 271, + 328 + ], + "type": "table", + "html": "
MetricSPT [36]OursMetricSPT [36]Ours
PMD ↓10.288.28ELS ↑0.280.86
", + "image_path": "3678981c2558f768c774c1d992e16cc744308b5218126001fdf01af245c32d3a.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 363, + 289, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 363, + 289, + 579 + ], + "spans": [ + { + "bbox": [ + 46, + 363, + 289, + 579 + ], + "type": "text", + "content": "and visualize the part segmentation results in Fig. 5. Even trained with only part segmentation supervision of human meshes, our method can successfully segment each part for the stylized characters. On the contrary, SPT uses graph convolution network [31] to predict the skinning weights. When training only with human meshes, it often fails to distinguish different parts. As shown in Fig. 5, it mixes up the right and left upper legs, and incorrectly classifies the shoulder as the head. Though NBS is trained with clothed humans, it always classifies human hair as the human body for characters from Mixamo. This is because that NBS uses the MeshCNN [26] as the shape encoder. As a result, it is sensitive to mesh topology and cannot generalize to meshes with disconnected parts (e.g., disconnected hair and head). Tab. 2 further quantitatively demonstrates that our method achieves the best part segmentation accuracy, demonstrating its ability to correctly interpret the shape and part information in stylized characters." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 587, + 261, + 601 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 587, + 261, + 601 + ], + "spans": [ + { + "bbox": [ + 47, + 587, + 261, + 601 + ], + "type": "text", + "content": "4.6. Quadrupedal Pose Transfer Comparison" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 605, + 287, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 605, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 605, + 287, + 715 + ], + "type": "text", + "content": "To further show the generalization ability of our method, we conduct experiments on quadrupeds. We report the PMD and ELS score of our method and the SPT [36] in Tab. 3. When testing on hippos with large shape gap from the training meshes, SPT has a hard time generalizing both in terms of pose transfer accuracy and natural deformation. While our method achieves both better qualitative and quantitative results. We visualize the qualitative comparisons in Fig. 6. SPT produces obvious artifacts on the hippo's mouth" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 317, + 58, + 531, + 114 + ], + "blocks": [ + { + "bbox": [ + 317, + 58, + 531, + 114 + ], + "lines": [ + { + "bbox": [ + 317, + 58, + 531, + 114 + ], + "spans": [ + { + "bbox": [ + 317, + 58, + 531, + 114 + ], + "type": "image", + "image_path": "9c2fc818be70357453cc8987f0da7374f84f7b0d10915ad2a88d0bf0c98689c4.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 306, + 118, + 545, + 140 + ], + "lines": [ + { + "bbox": [ + 306, + 118, + 545, + 140 + ], + "spans": [ + { + "bbox": [ + 306, + 118, + 545, + 140 + ], + "type": "text", + "content": "Figure 8. Part prediction on stylized quadrupeds. Our method successfully predicts the parts of unseen stylized quadrupeds." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 332, + 147, + 518, + 181 + ], + "blocks": [ + { + "bbox": [ + 46, + 333, + 287, + 357 + ], + "lines": [ + { + "bbox": [ + 46, + 333, + 287, + 357 + ], + "spans": [ + { + "bbox": [ + 46, + 333, + 287, + 357 + ], + "type": "text", + "content": "Table 3. Comparison on Hippos from SMAL [68]. Our method achieves better pose transfer accuracy with more smooth results." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 332, + 147, + 518, + 181 + ], + "lines": [ + { + "bbox": [ + 332, + 147, + 518, + 181 + ], + "spans": [ + { + "bbox": [ + 332, + 147, + 518, + 181 + ], + "type": "table", + "html": "
MetricOurs w/o invOurs w/o volumeOurs
PMD ↓1.261.020.99
ELS ↑0.880.880.89
", + "image_path": "654945bb1d1502ca0e35cb28c00ec9d2eea13ee4e035665264b2cada172ef8b6.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 186, + 545, + 220 + ], + "lines": [ + { + "bbox": [ + 305, + 186, + 545, + 220 + ], + "spans": [ + { + "bbox": [ + 305, + 186, + 545, + 220 + ], + "type": "text", + "content": "Table 4. Ablation study on inverse MLP and volume preserving loss. The inverse MLP and volume preserving loss helps to improve pose transfer accuracy and produce smooth deformation." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 304, + 226, + 545, + 310 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 226, + 545, + 310 + ], + "spans": [ + { + "bbox": [ + 304, + 226, + 545, + 310 + ], + "type": "text", + "content": "and legs, while our method achieves accurate pose transfer and maintains the shape characteristics of the original character at the same time. We provide more results in the supplementary. We also show the part segmentation results on stylized characters by our method in Fig. 8. Even for unique parts such as the hats and antlers, our method correctly assigns them to the head part." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 317, + 399, + 330 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 317, + 399, + 330 + ], + "spans": [ + { + "bbox": [ + 306, + 317, + 399, + 330 + ], + "type": "text", + "content": "4.7. Ablation Study" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 336, + 545, + 526 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 336, + 545, + 526 + ], + "spans": [ + { + "bbox": [ + 304, + 336, + 545, + 526 + ], + "type": "text", + "content": "To evaluate the key components of our method, we conduct ablation studies on the MGN dataset by removing the inverse constraint (Eq. 3) in the shape understanding module and the volume-preserving loss (Eq. 5) used during the test-time training produce, we name them as \"ours w/o inv\" and \"ours w/o " + }, + { + "bbox": [ + 304, + 336, + 545, + 526 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 304, + 336, + 545, + 526 + ], + "type": "text", + "content": "\" respectively. We report the PMD and ELS metrics in Tab. 4. The model learned without the inverse constraint or volume-preserving loss has worse PMD and ELS score than our full model, indicating the contribution of these two objectives. We also provide qualitative results in Fig. 7. We use red boxes to point out the artifacts. As shown in Fig. 7, our model trained without the inverse constraint produces less accurate pose transfer results. Moreover, adding the volume-preserving loss helps to maintain the character's local details such as the thickness of the arms." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 538, + 378, + 550 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 538, + 378, + 550 + ], + "spans": [ + { + "bbox": [ + 306, + 538, + 378, + 550 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 558, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 558, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 558, + 545, + 713 + ], + "type": "text", + "content": "In this paper, we present a model that deforms unrigged, stylized characters guided by a biped or quadruped avatar. Our model is trained with only easily accessible posed human or animal meshes, yet can be applied to unseen stylized characters in a zero-shot manner during inference. To this end, we draw key insights from classic mesh deformation method and develop a correspondence-aware shape understanding module, an implicit pose deformation module and a volume-based test-time training procedure. We carry out extensive experiments on both the biped and quadruped category and show that our method produces more realistic and accurate deformation compared to baselines learned with comparable or more supervision." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "type": "text", + "content": "8711" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 286, + 713 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 53, + 91, + 286, + 112 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 286, + 112 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 286, + 112 + ], + "type": "text", + "content": "[1] Mixamo. http://www MIXamo.com/. Accessed on November 09th, 2022. 5, 6, 7" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 114, + 286, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 114, + 286, + 158 + ], + "spans": [ + { + "bbox": [ + 53, + 114, + 286, + 158 + ], + "type": "text", + "content": "[2] Kfir Aberman, Peizhuo Li, Dani Lischinski, Olga Sorkine-Hornung, Daniel Cohen-Or, and Baoquan Chen. Skeleton-aware networks for deep motion retargeting. In ACM Transactions on Graphics (SIGGRAPH), 2020. 2, 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 160, + 286, + 203 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 160, + 286, + 203 + ], + "spans": [ + { + "bbox": [ + 53, + 160, + 286, + 203 + ], + "type": "text", + "content": "[3] Noam Aigerman, Kunal Gupta, Vladimir G Kim, Siddhartha Chaudhuri, Jun Saito, and Thibault Groueix. Neural jacobian fields: Learning intrinsic mappings of arbitrary meshes. arXiv preprint arXiv:2205.02904, 2022. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 205, + 286, + 249 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 205, + 286, + 249 + ], + "spans": [ + { + "bbox": [ + 53, + 205, + 286, + 249 + ], + "type": "text", + "content": "[4] Mazen Al Borno, Ludovic Righetti, Michael J Black, Scott L Delp, Eugene Fiume, and Javier Romero. Robust physics-based motion retargeting with realistic body shapes. In Computer Graphics Forum. Wiley Online Library, 2018. 2, 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 251, + 286, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 251, + 286, + 282 + ], + "spans": [ + { + "bbox": [ + 53, + 251, + 286, + 282 + ], + "type": "text", + "content": "[5] Andreas Aristidou and Joan Lasenby. FABRIK: A fast, iterative solver for the inverse kinematics problem. Graphical Models, 2011. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 285, + 286, + 328 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 285, + 286, + 328 + ], + "spans": [ + { + "bbox": [ + 53, + 285, + 286, + 328 + ], + "type": "text", + "content": "[6] Quentin Avril, Donya Ghafourzadeh, Srinivasan Ramachandran, Sahel Fallahdoust, Sarah Ribet, Olivier Dionne, Martin de Lasa, and Eric Paquette. Animation setup transfer for 3D characters. In Computer Graphics Forum, 2016. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 330, + 286, + 361 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 330, + 286, + 361 + ], + "spans": [ + { + "bbox": [ + 53, + 330, + 286, + 361 + ], + "type": "text", + "content": "[7] Ilya Baran and Jovan Popovic. Automatic rigging and animation of 3D characters. In ACM Transactions on Graphics (SIGGRAPH), 2007. 2, 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 364, + 286, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 364, + 286, + 396 + ], + "spans": [ + { + "bbox": [ + 53, + 364, + 286, + 396 + ], + "type": "text", + "content": "[8] Ilya Baran, Daniel Vlasic, Eitan Grinspun, and Jovan Popovic. Semantic deformation transfer. In ACM Transactions on Graphics (ToG). 2009. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 398, + 286, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 398, + 286, + 441 + ], + "spans": [ + { + "bbox": [ + 53, + 398, + 286, + 441 + ], + "type": "text", + "content": "[9] Mirela Ben-Chen, Ofir Weber, and Craig Gotsman. Spatial deformation transfer. In Proceedings of the 2009 ACM SIGGRAPH/Eurographics Symposium on Computer Animation, 2009. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 444, + 286, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 444, + 286, + 498 + ], + "spans": [ + { + "bbox": [ + 48, + 444, + 286, + 498 + ], + "type": "text", + "content": "[10] Bharat Lal Bhatnagar, Cristian Sminchisescu, Christian Theobalt, and Gerard Pons-Moll. Combining implicit function learning and parametric models for 3D human reconstruction. In European Conference on Computer Vision (ECCV), 2020. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 500, + 286, + 543 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 500, + 286, + 543 + ], + "spans": [ + { + "bbox": [ + 48, + 500, + 286, + 543 + ], + "type": "text", + "content": "[11] Bharat Lal Bhatnagar, Garvita Tiwari, Christian Theobalt, and Gerard Pons-Moll. Multi-garment net: Learning to dress 3D people from images. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2, 6" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 544, + 286, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 544, + 286, + 588 + ], + "spans": [ + { + "bbox": [ + 48, + 544, + 286, + 588 + ], + "type": "text", + "content": "[12] Federica Bogo, Javier Romero, Matthew Loper, and Michael J Black. FAUST: Dataset and evaluation for 3D mesh registration. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2014. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 590, + 286, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 590, + 286, + 643 + ], + "spans": [ + { + "bbox": [ + 48, + 590, + 286, + 643 + ], + "type": "text", + "content": "[13] Rohan Chabra, Jan E Lenssen, Eddy Ilg, Tanner Schmidt, Julian Straub, Steven Lovegrove, and Richard Newcombe. Deep local shapes: Learning local sdf priors for detailed 3D reconstruction. In European Conference on Computer Vision (ECCV), 2020. 3" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 646, + 286, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 646, + 286, + 689 + ], + "spans": [ + { + "bbox": [ + 48, + 646, + 286, + 689 + ], + "type": "text", + "content": "[14] Haoyu Chen, Hao Tang, Henglin Shi, Wei Peng, Nicu Sebe, and Guoying Zhao. Intrinsic-extrinsic preserved gans for unsupervised 3D pose transfer. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 48, + 691, + 286, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 691, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 691, + 286, + 713 + ], + "type": "text", + "content": "[15] Ricky TQ Chen, Yulia Rubanova, Jesse Bettencourt, and David K Duvenaud. Neural ordinary differential equa" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 308, + 74, + 545, + 712 + ], + "type": "list", + "angle": 0, + "index": 32, + "blocks": [ + { + "bbox": [ + 327, + 74, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 327, + 74, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 327, + 74, + 545, + 95 + ], + "type": "text", + "content": "tions. Advances in Neural Information Processing Systems (NeurIPS), 2018. 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 98, + 545, + 129 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 98, + 545, + 129 + ], + "spans": [ + { + "bbox": [ + 308, + 98, + 545, + 129 + ], + "type": "text", + "content": "[16] Zhiqin Chen and Hao Zhang. Learning implicit fields for generative shape modeling. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 132, + 545, + 175 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 132, + 545, + 175 + ], + "spans": [ + { + "bbox": [ + 308, + 132, + 545, + 175 + ], + "type": "text", + "content": "[17] An-Chieh Cheng, Xueting Li, Min Sun, Ming-Hsuan Yang, and Sifei Liu. Learning 3D dense correspondence via canonical point autoencoder. In Advances in Neural Information Processing Systems (NeurIPS), 2021. 4" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 178, + 545, + 220 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 178, + 545, + 220 + ], + "spans": [ + { + "bbox": [ + 308, + 178, + 545, + 220 + ], + "type": "text", + "content": "[18] Julian Chibane, Thiemo Alldieck, and Gerard Pons-Moll. Implicit functions in feature space for 3D shape reconstruction and completion. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 223, + 545, + 245 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 223, + 545, + 245 + ], + "spans": [ + { + "bbox": [ + 308, + 223, + 545, + 245 + ], + "type": "text", + "content": "[19] Kwang-Jin Choi and Hyeong-Seok Ko. Online motion retargeting. Comput. Animat. Virtual Worlds, 2000. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 247, + 545, + 300 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 247, + 545, + 300 + ], + "spans": [ + { + "bbox": [ + 308, + 247, + 545, + 300 + ], + "type": "text", + "content": "[20] Brian Delhaisse, Domingo Esteban, Leonel Rozo, and Darwin Caldwell. Transfer learning of shared latent spaces between robots with similar kinematic structure. In International Joint Conference on Neural Networks (IJCNN), 2017. 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 304, + 545, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 304, + 545, + 346 + ], + "spans": [ + { + "bbox": [ + 308, + 304, + 545, + 346 + ], + "type": "text", + "content": "[21] Philipp Erler, Paul Guerrero, Stefan Ohrhallinger, Niloy J Mitra, and Michael Wimmer. Points2surf learning implicit surfaces from point clouds. In European Conference on Computer Vision (ECCV), 2020. 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 350, + 545, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 350, + 545, + 392 + ], + "spans": [ + { + "bbox": [ + 308, + 350, + 545, + 392 + ], + "type": "text", + "content": "[22] Lin Gao, Jie Yang, Yi-Ling Qiao, Yu-Kun Lai, Paul L Rosin, Weiwei Xu, and Shihong Xia. Automatic unpaired shape deformation transfer. ACM Transactions on Graphics (ToG), 2018. 2, 5" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 395, + 545, + 448 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 395, + 545, + 448 + ], + "spans": [ + { + "bbox": [ + 308, + 395, + 545, + 448 + ], + "type": "text", + "content": "[23] Kyle Genova, Forrester Cole, Daniel Vlasic, Aaron Sarna, William T Freeman, and Thomas Funkhouser. Learning shape templates with structured implicit functions. In IEEE International Conference on Computer Vision (ICCV), 2019. 3" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 452, + 545, + 484 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 452, + 545, + 484 + ], + "spans": [ + { + "bbox": [ + 308, + 452, + 545, + 484 + ], + "type": "text", + "content": "[24] Michael Gleicher. Retargetting motion to new characters. In Proceedings of the 25th annual conference on Computer graphics and interactive techniques, 1998. 2, 3" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 487, + 545, + 529 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 487, + 545, + 529 + ], + "spans": [ + { + "bbox": [ + 308, + 487, + 545, + 529 + ], + "type": "text", + "content": "[25] Thibault Groueix, Matthew Fisher, Vladimir G Kim, Bryan C Russell, and Mathieu Aubry. 3D-CODED: 3D correspondences by deep deformation. In European Conference on Computer Vision (ECCV), 2018. 5" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 308, + 533, + 545, + 574 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 533, + 545, + 574 + ], + "spans": [ + { + "bbox": [ + 308, + 533, + 545, + 574 + ], + "type": "text", + "content": "[26] Rana Hanocka, Amir Hertz, Noa Fish, Raja Giryes, Shachar Fleishman, and Daniel Cohen-Or. MeshCNN: a network with an edge. ACM Transactions on Graphics (ToG), 2019. 8" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 308, + 578, + 545, + 611 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 578, + 545, + 611 + ], + "spans": [ + { + "bbox": [ + 308, + 578, + 545, + 611 + ], + "type": "text", + "content": "[27] Hanyoung Jang, Byungjun Kwon, Moonwon Yu, Seong Uk Kim, and Jongmin Kim. A variational U-Net for motion retargeting. In Comput. Animat. Virtual Worlds, 2020. 3" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 308, + 613, + 545, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 613, + 545, + 656 + ], + "spans": [ + { + "bbox": [ + 308, + 613, + 545, + 656 + ], + "type": "text", + "content": "[28] Boyan Jiang, Yinda Zhang, Xingkui Wei, Xiangyang Xue, and Yanwei Fu. Learning compositional representation for 4D captures with neural ode. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 3" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 308, + 658, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 658, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 308, + 658, + 545, + 712 + ], + "type": "text", + "content": "[29] Chiyu Jiang, Avneesh Sud, Ameesh Makadia, Jingwei Huang, Matthias Nießner, Thomas Funkhouser, et al. Local implicit grid representations for 3D scenes. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 3" + } + ] + } + ], + "index": 31 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "text", + "content": "8712" + } + ] + } + ], + "index": 33 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 714 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 106 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 106 + ], + "type": "text", + "content": "[30] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. International Conference on Learning Representations (ICLR), 2015. 6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 106, + 288, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 106, + 288, + 139 + ], + "spans": [ + { + "bbox": [ + 48, + 106, + 288, + 139 + ], + "type": "text", + "content": "[31] Thomas N Kipf and Max Welling. Semi-supervised classification with graph convolutional networks. In International Conference on Learning Representations (ICLR), 2017. 8" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 140, + 288, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 140, + 288, + 183 + ], + "spans": [ + { + "bbox": [ + 48, + 140, + 288, + 183 + ], + "type": "text", + "content": "[32] Muhammed Kocabas, Nikos Athanasiou, and Michael J Black. Vibe: Video inference for human body pose and shape estimation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2, 4" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 184, + 288, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 184, + 288, + 228 + ], + "spans": [ + { + "bbox": [ + 48, + 184, + 288, + 228 + ], + "type": "text", + "content": "[33] Jehee Lee and Sung Yong Shin. A hierarchical approach to interactive motion editing for human-like figures. In Proceedings of the 26th annual conference on Computer graphics and interactive techniques, 1999. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 228, + 288, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 228, + 288, + 281 + ], + "spans": [ + { + "bbox": [ + 48, + 228, + 288, + 281 + ], + "type": "text", + "content": "[34] Jiahui Lei and Kostas Daniilidis. CaDeX: Learning canonical deformation coordinate space for dynamic surface representation via neural homeomorphism. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 282, + 288, + 327 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 282, + 288, + 327 + ], + "spans": [ + { + "bbox": [ + 48, + 282, + 288, + 327 + ], + "type": "text", + "content": "[35] Peizhuo Li, Kfir Aberman, Rana Hanocka, Libin Liu, Olga Sorkine-Hornung, and Baoquan Chen. Learning skeletal articulations with neural blend shapes. In ACM Transactions on Graphics (SIGGRAPH), 2021. 2, 5, 6, 7" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 327, + 288, + 370 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 327, + 288, + 370 + ], + "spans": [ + { + "bbox": [ + 48, + 327, + 288, + 370 + ], + "type": "text", + "content": "[36] Zhouyingcheng Liao, Jimei Yang, Jun Saito, Gerard Pons-Moll, and Yang Zhou. Skeleton-free pose transfer for stylized 3D characters. In European Conference on Computer Vision (ECCV), 2022. 3, 4, 5, 6, 7, 8" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 371, + 288, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 371, + 288, + 415 + ], + "spans": [ + { + "bbox": [ + 48, + 371, + 288, + 415 + ], + "type": "text", + "content": "[37] Jongin Lim, Hyung Jin Chang, and Jin Young Choi. PMnet: Learning of disentangled pose and movement for unsupervised motion retargeting. In British Machine Vision Conference (BMVC), 2019. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 415, + 288, + 458 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 415, + 288, + 458 + ], + "spans": [ + { + "bbox": [ + 48, + 415, + 288, + 458 + ], + "type": "text", + "content": "[38] Feng Liu and Xiaoming Liu. Learning implicit functions for topology-varying dense 3D shape correspondence. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 4" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 459, + 288, + 503 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 459, + 288, + 503 + ], + "spans": [ + { + "bbox": [ + 48, + 459, + 288, + 503 + ], + "type": "text", + "content": "[39] Lijuan Liu, Youyi Zheng, Di Tang, Yi Yuan, Changjie Fan, and Kun Zhou. Neuroskinning: Automatic skin binding for production characters with deep graph networks. ACM Transactions on Graphics (ToG), 2019. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 503, + 288, + 546 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 503, + 288, + 546 + ], + "spans": [ + { + "bbox": [ + 48, + 503, + 288, + 546 + ], + "type": "text", + "content": "[40] Matthew Loper, Naureen Mahmood, Javier Romero, Gerard Pons-Moll, and Michael J Black. SMPL: A skinned multiperson linear model. ACM Transactions on Graphics (ToG), 2015. 2, 5" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 547, + 288, + 591 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 547, + 288, + 591 + ], + "spans": [ + { + "bbox": [ + 48, + 547, + 288, + 591 + ], + "type": "text", + "content": "[41] Naureen Mahmood, Nima Ghorbani, Nikolaus F. Troje, Gerard Pons-Moll, and Michael J. Black. AMASS: Archive of motion capture as surface shapes. In IEEE International Conference on Computer Vision (ICCV), 2019. 2, 4" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 592, + 288, + 646 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 592, + 288, + 646 + ], + "spans": [ + { + "bbox": [ + 48, + 592, + 288, + 646 + ], + "type": "text", + "content": "[42] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3D reconstruction in function space. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 3" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 647, + 288, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 647, + 288, + 690 + ], + "spans": [ + { + "bbox": [ + 48, + 647, + 288, + 690 + ], + "type": "text", + "content": "[43] Mateusz Michalkiewicz, Jhony K Pontes, Dominic Jack, Mahsa Baktashmotlagh, and Anders Eriksson. Deep level sets: Implicit surface representations for 3D shape inference. arXiv preprint arXiv:1901.06802, 2019. 3" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 691, + 288, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 691, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 48, + 691, + 288, + 714 + ], + "type": "text", + "content": "[44] Marko Mihajlovic, Yan Zhang, Michael J Black, and Siyu Tang. LEAP: Learning articulated occupancy of people. In" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 712 + ], + "type": "list", + "angle": 0, + "index": 30, + "blocks": [ + { + "bbox": [ + 327, + 73, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 327, + 73, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 327, + 73, + 545, + 95 + ], + "type": "text", + "content": "IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 95, + 545, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 95, + 545, + 139 + ], + "spans": [ + { + "bbox": [ + 307, + 95, + 545, + 139 + ], + "type": "text", + "content": "[45] Michael Niemeyer, Lars Mescheder, Michael Oechsle, and Andreas Geiger. Occupancy flow: 4D reconstruction by learning particle dynamics. In IEEE International Conference on Computer Vision (ICCV), 2019. 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 140, + 545, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 140, + 545, + 194 + ], + "spans": [ + { + "bbox": [ + 307, + 140, + 545, + 194 + ], + "type": "text", + "content": "[46] Atsuhiro Noguchi, Umar Iqbal, Jonathan Tremblay, Tatsuya Harada, and Orazio Gallo. Watch it move: Unsupervised discovery of 3D joints for re-posing of articulated objects. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 194, + 545, + 238 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 194, + 545, + 238 + ], + "spans": [ + { + "bbox": [ + 307, + 194, + 545, + 238 + ], + "type": "text", + "content": "[47] Pablo Palafox, Aljaž Božić, Justus Thies, Matthias Nießner, and Angela Dai. NPMs: Neural parametric models for 3D deformable shapes. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2, 3, 4" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 239, + 545, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 239, + 545, + 282 + ], + "spans": [ + { + "bbox": [ + 307, + 239, + 545, + 282 + ], + "type": "text", + "content": "[48] Pablo Palafox, Nikolaos Sarafianos, Tony Tung, and Angela Dai. SPAMs: Structured implicit parametric models. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2, 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 283, + 545, + 338 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 283, + 545, + 338 + ], + "spans": [ + { + "bbox": [ + 307, + 283, + 545, + 338 + ], + "type": "text", + "content": "[49] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. DeepSDF: Learning continuous signed distance functions for shape representation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 338, + 545, + 393 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 338, + 545, + 393 + ], + "spans": [ + { + "bbox": [ + 307, + 338, + 545, + 393 + ], + "type": "text", + "content": "[50] Georgios Pavlakos, Vasileios Choutas, Nima Ghorbani, Timo Bolkart, Ahmed AA Osman, Dimitrios Tzionas, and Michael J Black. Expressive body capture: 3D hands, face, and body from a single image. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2, 5" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 393, + 545, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 393, + 545, + 449 + ], + "spans": [ + { + "bbox": [ + 307, + 393, + 545, + 449 + ], + "type": "text", + "content": "[51] Georgios Pavlakos, Vasileios Choutas, Nima Ghorbani, Timo Bolkart, Ahmed A. A. Osman, Dimitrios Tzionas, and Michael J. Black. Expressive body capture: 3D hands, face, and body from a single image. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2, 4" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 449, + 545, + 481 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 449, + 545, + 481 + ], + "spans": [ + { + "bbox": [ + 307, + 449, + 545, + 481 + ], + "type": "text", + "content": "[52] Martin Poirier and Eric Paquette. Rig retargeting for 3d animation. In Proceedings of the Graphics Interface 2009 Conference, 2009. 2, 3" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 482, + 545, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 482, + 545, + 536 + ], + "spans": [ + { + "bbox": [ + 307, + 482, + 545, + 536 + ], + "type": "text", + "content": "[53] Davis Rempe, Tolga Birdal, Aaron Hertzmann, Jimei Yang, Srinath Sridhar, and Leonidas J Guibas. Humor: 3D human motion model for robust pose estimation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2, 4" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 537, + 545, + 591 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 537, + 545, + 591 + ], + "spans": [ + { + "bbox": [ + 307, + 537, + 545, + 591 + ], + "type": "text", + "content": "[54] Nadine Ruegg, Silvia Zuffi, Konrad Schindler, and Michael J Black. BARC: Learning to regress 3D dog shape from images by exploiting breed information. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2, 6" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 592, + 545, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 592, + 545, + 624 + ], + "spans": [ + { + "bbox": [ + 307, + 592, + 545, + 624 + ], + "type": "text", + "content": "[55] Robert W Sumner and Jovan Popovic. Deformation transfer for triangle meshes. ACM Transactions on Graphics (ToG), 2004. 2, 3, 5" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 625, + 545, + 658 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 625, + 545, + 658 + ], + "spans": [ + { + "bbox": [ + 307, + 625, + 545, + 658 + ], + "type": "text", + "content": "[56] Robert W Sumner, Johannes Schmid, and Mark Pauly. Embedded deformation for shape manipulation. In ACM Transactions on Graphics (SIGGRAPH). 2007. 2, 3" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 307, + 658, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 658, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 307, + 658, + 545, + 712 + ], + "type": "text", + "content": "[57] Yu Sun, Xiaolong Wang, Zhuang Liu, John Miller, Alexei Efros, and Moritz Hardt. Test-time training with self-supervision for generalization under distribution shifts. In International Conference on Machine Learning (ICML), 2020. 5" + } + ] + } + ], + "index": 29 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "text", + "content": "8713" + } + ] + } + ], + "index": 31 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 564 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "type": "text", + "content": "[58] Ramana Sundararaman, Gautam Pai, and Maks Ovsjanikov. Implicit field supervision for robust non-rigid shape matching. In Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part III, pages 344-362. Springer, 2022. 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 49, + 129, + 287, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 129, + 287, + 161 + ], + "spans": [ + { + "bbox": [ + 49, + 129, + 287, + 161 + ], + "type": "text", + "content": "[59] Seyoon Tak and Hyeong-Seok Ko. A physically-based motion retargeting filter. In ACM Transactions on Graphics (ToG), 2005. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 49, + 163, + 287, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 163, + 287, + 206 + ], + "spans": [ + { + "bbox": [ + 49, + 163, + 287, + 206 + ], + "type": "text", + "content": "[60] Ruben Villegas, Duygu Ceylan, Aaron Hertzmann, Jimei Yang, and Jun Saito. Contact-aware retargeting of skinned motion. In IEEE International Conference on Computer Vision (ICCV), 2021. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 49, + 208, + 287, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 208, + 287, + 251 + ], + "spans": [ + { + "bbox": [ + 49, + 208, + 287, + 251 + ], + "type": "text", + "content": "[61] Ruben Villegas, Jimei Yang, Duygu Ceylan, and Honglak Lee. Neural kinematic networks for unsupervised motion retargeting. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 49, + 253, + 287, + 306 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 253, + 287, + 306 + ], + "spans": [ + { + "bbox": [ + 49, + 253, + 287, + 306 + ], + "type": "text", + "content": "[62] Jiashun Wang, Chao Wen, Yanwei Fu, Haitao Lin, Tianyun Zou, Xiangyang Xue, and Yinda Zhang. Neural pose transfer by spatially adaptive instance normalization. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2, 5, 6" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 49, + 308, + 287, + 351 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 308, + 287, + 351 + ], + "spans": [ + { + "bbox": [ + 49, + 308, + 287, + 351 + ], + "type": "text", + "content": "[63] Zhan Xu, Yang Zhou, Evangelos Kalogerakis, Chris Landreth, and Karan Singh. RigNet: Neural rigging for articulated characters. In ACM Transactions on Graphics (SIGGRAPH), 2020. 3, 5, 6" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 49, + 353, + 287, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 353, + 287, + 396 + ], + "spans": [ + { + "bbox": [ + 49, + 353, + 287, + 396 + ], + "type": "text", + "content": "[64] Zhan Xu, Yang Zhou, Evangelos Kalogerakis, and Karan Singh. Predicting animation skeletons for 3D articulated models via volumetric nets. In International Conference on 3D Vision, 2019. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 49, + 398, + 287, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 398, + 287, + 430 + ], + "spans": [ + { + "bbox": [ + 49, + 398, + 287, + 430 + ], + "type": "text", + "content": "[65] Jie Yang, Lin Gao, Yu-Kun Lai, Paul L Rosin, and Shihong Xia. Biharmonic deformation transfer with automatic key point selection. Graphical Models, 2018. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 49, + 432, + 287, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 432, + 287, + 475 + ], + "spans": [ + { + "bbox": [ + 49, + 432, + 287, + 475 + ], + "type": "text", + "content": "[66] Wang Yifan, Noam Aigerman, Vladimir G Kim, Siddhartha Chaudhuri, and Olga Sorkine-Hornung. Neural cages for detail-preserving 3D deformations. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 49, + 476, + 287, + 520 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 476, + 287, + 520 + ], + "spans": [ + { + "bbox": [ + 49, + 476, + 287, + 520 + ], + "type": "text", + "content": "[67] Keyang Zhou, Bharat Lal Bhatnagar, and Gerard Pons-Moll. Unsupervised shape and pose disentanglement for 3D meshes. In European Conference on Computer Vision (ECCV), 2020. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 49, + 521, + 287, + 564 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 521, + 287, + 564 + ], + "spans": [ + { + "bbox": [ + 49, + 521, + 287, + 564 + ], + "type": "text", + "content": "[68] Silvia Zuffi, Angjoo Kanazawa, David W Jacobs, and Michael J Black. 3D Menagerie: Modeling the 3D shape and pose of animals. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2017. 2, 6, 8" + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "text", + "content": "8714" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/Zero-Shot Referring Image Segmentation With Global-Local Context Features/d944ca48-5a24-4209-88df-55a9c1e47851_content_list.json b/2023/Zero-Shot Referring Image Segmentation With Global-Local Context Features/d944ca48-5a24-4209-88df-55a9c1e47851_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..0e876f0e0c4eb797c14a179171fd1b37229e11f5 --- /dev/null +++ b/2023/Zero-Shot Referring Image Segmentation With Global-Local Context Features/d944ca48-5a24-4209-88df-55a9c1e47851_content_list.json @@ -0,0 +1,1927 @@ +[ + { + "type": "text", + "text": "Zero-shot Referring Image Segmentation with Global-Local Context Features", + "text_level": 1, + "bbox": [ + 93, + 130, + 875, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Seonghoon $\\mathrm{Yu}^{1}$", + "bbox": [ + 251, + 178, + 379, + 196 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Paul Hongsuck Seo2", + "bbox": [ + 418, + 180, + 584, + 198 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Jeany Son1", + "bbox": [ + 622, + 180, + 714, + 198 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ AI Graduate School, GIST", + "bbox": [ + 281, + 198, + 501, + 215 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "2Google Research", + "bbox": [ + 542, + 199, + 687, + 215 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "seonghoon@gm.gist.ac.kr", + "bbox": [ + 194, + 219, + 401, + 233 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "phseo@google.com", + "bbox": [ + 439, + 219, + 586, + 233 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "jeany@gist.ac.kr", + "bbox": [ + 627, + 219, + 769, + 233 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 268, + 313, + 284 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Referring image segmentation (RIS) aims to find a segmentation mask given a referring expression grounded to a region of the input image. Collecting labelled datasets for this task, however, is notoriously costly and labor-intensive. To overcome this issue, we propose a simple yet effective zero-shot referring image segmentation method by leveraging the pre-trained cross-modal knowledge from CLIP. In order to obtain segmentation masks grounded to the input text, we propose a mask-guided visual encoder that captures global and local contextual information of an input image. By utilizing instance masks obtained from off-the-shelf mask proposal techniques, our method is able to segment fine-detailed instance-level groundings. We also introduce a global-local text encoder where the global feature captures complex sentence-level semantics of the entire input expression while the local feature focuses on the target noun phrase extracted by a dependency parser. In our experiments, the proposed method outperforms several zero-shot baselines of the task and even the weakly supervised referring expression segmentation method with substantial margins. Our code is available at https://github.com/Seonghoon-Yu/Zero-shot-RIS.", + "bbox": [ + 75, + 300, + 473, + 618 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 662, + 209, + 679 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent advances of deep learning has revolutionised computer vision and natural language processing, and addressed various tasks in the field of vision-and-language [4, 19, 27, 28, 36, 43, 50]. A key element in the recent success of the multi-modal models such as CLIP [43] is the contrastive image-text pre-training on a large set of image and text pairs. It has shown a remarkable zero-shot transferability on a wide range of tasks, such as object detection [9, 10, 13], semantic segmentation [7, 12, 59, 63], image captioning [40], visual question answering (VQA) [47] and so on.", + "bbox": [ + 75, + 688, + 470, + 839 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Despite its good transferability of pre-trained multi-modal models, it is not straightforward to handle dense prediction tasks such as object detection and image segmentation. A pixel-level dense prediction task is challenging since there", + "bbox": [ + 75, + 839, + 470, + 901 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "(a) Referring image segmentation task", + "bbox": [ + 501, + 271, + 702, + 282 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/1566abd5ebc5a9381de29c7ad4240a71b1407982ee1c649987e6d2bd794fe85f.jpg", + "image_caption": [ + "Input image", + "Input text", + "\"the bottom cat\"", + "Local-context" + ], + "image_footnote": [ + "\"a cat is lying on the seat of the scooter\"" + ], + "bbox": [ + 506, + 294, + 598, + 364 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/845f9db0e94f532cc292e82afcc911a6a0642a2ee247df769d8d528e523bc19a.jpg", + "image_caption": [ + "Output mask", + "Global-Local context" + ], + "image_footnote": [ + "\"a scooter with two cats sitting on\"" + ], + "bbox": [ + 794, + 294, + 885, + 364 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "(b) Global-Local context in RIS", + "bbox": [ + 503, + 371, + 663, + 382 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/a29450ea7923b69ad64fef1514291153faf7e39ea55008fce3ecdcef6ba549b8.jpg", + "image_caption": [ + "a cat is lying on", + "the seat of the scod" + ], + "image_footnote": [], + "bbox": [ + 557, + 425, + 586, + 450 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/178bc5b70cd7d3de40222d1dfbb2bf377146b8f9c8606ad03ed9f872450cbae3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 517, + 460, + 555, + 493 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/1ee188e3d920ecfb3a8b26ae7b9c96712b7d816fc6ce10f5b982ef6191de3586.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 594, + 460, + 620, + 491 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/d399ed7c9d810758449eb86e4fbbc0063b1a527981df09b0b9af26650ba47630.jpg", + "image_caption": [ + "Global-context", + "a cat is lying on", + "the seat of the scooter" + ], + "image_footnote": [], + "bbox": [ + 643, + 424, + 746, + 493 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/23985f799d7872410bfd8292adda80207ee84a87eda2daa6213395e3b0dd9f7b.jpg", + "image_caption": [ + "a cat is lying on", + "the seat of the scooter", + "Figure 1. Illustrations of the task of referring image segmentation and motivations of global-local context features. To find the grounded mask given an expression, we need to understand the relations between the objects as well as their semantics." + ], + "image_footnote": [], + "bbox": [ + 772, + 425, + 870, + 494 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "is a substantial gap between the image-level contrastive pretraining task and the pixel-level downstream task such as semantic segmentation. There have been several attempts to reduce gap between two tasks [44, 54, 63], but these works aim to fine-tune the model consequently requiring task-specific dense annotations, which is notoriously labor-intensive and costly.", + "bbox": [ + 496, + 598, + 893, + 703 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Referring image segmentation is a task to find the specific region in an image given a natural language text describing the region, and it is well-known as one of challenging vision- and language tasks. Collecting annotations for this task is even more challenging as the task requires to collect precise referring expression of the target region as well as its dense mask annotation. Recently, a weakly-supervised referring image segmentation method [48] is proposed to overcome this issue. However, it still requires high-level text expression annotations pairing with images for the target datasets and the performance of the method is far from that of the supervised methods. To tackle this issue, in this paper, we focus on zero-shot transferring from the pre-trained knowledge", + "bbox": [ + 496, + 704, + 895, + 901 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 807, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "19456", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "of CLIP to the task of referring image segmentation.", + "bbox": [ + 76, + 90, + 421, + 104 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Moreover, this task is challenging because it requires high-level understanding of language and comprehensive understanding of an image, as well as a dense instance-level prediction. There have been several works for zero-shot semantic segmentation [7, 12, 59, 63], but they cannot be directly extended to the zero-shot referring image segmentation task because it has different characteristics. Specifically, the semantic segmentation task does not need to distinguish instances, but the referring image segmentation task should be able to predict an instance-level segmentation mask. In addition, among multiple instances of the same class, only one instance described by the expression must be selected. For example, in Figure 1, there are two cats in the input image. If the input text is given by \"a cat is lying on the seat of the scooter\", the cat with the green mask is the proper output. To find this correct mask, we need to understand the relation between the objects (i.e. \"lying on the seat\") as well as their semantics (i.e. \"cat\", \"scooter\").", + "bbox": [ + 75, + 106, + 470, + 378 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this paper, we propose a new baseline of zero-shot referring image segmentation task using a pre-trained model from CLIP, where global and local contexts of an image and an expression are handled in a consistent way. In order to localize an object mask region in an image given a textual referring expression, we propose a mask-guided visual encoder that captures global and local context information of an image given a mask. We also present a global-local textual encoder where the local-context is captured by a target noun phrase and the global context is captured by a whole sentence of the expressions. By combining features in two different context levels, our method is able to understand a comprehensive knowledge as well as a specific trait of the target object. Note that, although our method does not require any additional training on CLIP model, it outperforms all baselines and the weakly supervised referring image segmentation method with a big margin.", + "bbox": [ + 75, + 378, + 470, + 635 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our main contributions can be summarised as follows:", + "bbox": [ + 96, + 637, + 457, + 651 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We propose a new task of zero-shot referring image segmentation based on CLIP without any additional training. To the best of our knowledge, this is the first work to study the zero-shot referring image segmentation task.", + "- We present a visual encoder and a textual encoder that integrates global and local contexts of images and sentences, respectively. Although the modalities of two encoders are different, our visual and textual features are dealt in a consistent way.", + "- The proposed global-local context features take full advantage of CLIP to capture the target object semantics as well as the relations between the objects in both visual and textual modalities." + ], + "bbox": [ + 96, + 662, + 468, + 898 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "- Our method consistently shows outstanding results compared to several baseline methods, and also outperforms the weakly supervised referring image segmentation method with substantial margins.", + "bbox": [ + 517, + 90, + 890, + 151 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 500, + 167, + 640, + 184 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Zero-shot Transfer. Classical zero-shot learning aims to predict unseen classes that have not seen before by transferring the knowledge trained on the seen classes. Early works [3, 14, 34] leverage the pre-trained word embedding [5, 39] of class names or attributes and perform zero-shot prediction via mapping between visual representations of images and this word embedding. Recently, CLIP [43] and ALIGNN [19] shed a new light on the zero-shot learning via large-scale image-text pre-training. They show the successive results on various downstream tasks via zero-shot knowledge transfer, such as image captioning [40], video action localization [51], image-text retrieval [1] and so on. Contrary to classical zero-shot learning, zero-shot transfer has an advantage of avoiding fine-tuning the pre-trained model on the task-specific dataset, where collecting datasets is time-consuming. There have been several works that apply CLIP encoders directly with tiny architectural modification without additional training for semantic segmentation [63], referring expression comprehension [49], phrase localization [25] and object localization [17]. Our work is also lying on the line of this research field.", + "bbox": [ + 496, + 195, + 893, + 512 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Zero-shot Dense Prediction Tasks. Very recently, with the success of pre-training models using large-scale image-text pairs, there have been several attempts to deal with dense prediction tasks with CLIP, e.g. object detection [9, 10, 13, 24, 30, 45], semantic segmentation [22, 29, 37, 42, 44, 58, 59, 63, 64] and so on. These dense prediction tasks, however, are challenging since CLIP learns image-level features not pixel-level fine-grained features. In order to handle this issue, ViLD [13] introduces a method which crop the image to contain only the bounding box region, and then extract the visual features of cropped regions using CLIP to classify the unseen objects. This approach is applied in a wide range of dense prediction tasks which are demanded the zero-shot transfer ability of CLIP [7, 9, 10, 12, 49, 59]. While this method only considers the cropped area, there are several methods [25, 63] to consider the global context in the image, not only just the cropped region. Adapting CLIP [25] proposed the phrase localization method by modifying CLIP to generate high-resolution spatial feature maps using superpixels. MaskCLIP [63] modifies the image encoder of CLIP by transforming the value embedding layer and the last linear layer into two $1 \\times 1$ convolutional layers to handle pixel-level predictions. In this work, we focus on extracting both global and local context visual features with CLIP.", + "bbox": [ + 496, + 537, + 893, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "19457", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/616e1a9316f9da7ca1627269ffaee1c21ebbda64d8d1632898fb35145bf1b376.jpg", + "image_caption": [ + "Figure 2. Overall framework of our global-local CLIP. Given an image and an expression as inputs, we extract global-local context visual features using mask proposals, and also we extract a global-local context textual feature. After computing the cosine similarity scores between all global-local context visual features and a global-local context textual feature, we choose the mask with the highest score." + ], + "image_footnote": [], + "bbox": [ + 80, + 85, + 893, + 287 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Referring Image Segmentation. Referring image segmentation aims to segment a target object in an image given a natural linguistic expression introduced by [18]. There have been several fully-supervised methods for this task, where images and expressions are used as an input, and the target mask is given for training [2, 20, 33, 55, 60, 62]. Most of works [6, 11, 23, 60, 61] focuses on how to fuse those two features in different modalities extracted from independent encoders. Early works [26, 32] extract multi-modal features by simply concatenating visual and textual features and feed them into the segmentation networks [35] to predict dense segmentation masks. There have been two branches of works fusing cross-modal features; an attention based encoder fusion [11, 57, 60] and a cross-modal decoder fusion based on a Transformer decoder [6, 54, 61]. Recently, a CLIP-based approach, which learns separated image and text transformer using a contrastive pre-training, has been proposed [54]. Those fully supervised referring image segmentation methods show good performances in general, but they require dense annotations for target masks and comprehensive expressions describing the target object. To address this problem, TSEG [48] proposed a weakly-supervised referring image segmentation method which learns the segmentation model using text-based image-level supervisions. However, this method still requires high-level referring expression annotations with images for specific datasets. Therefore, we propose a new baseline for zero-shot referring image segmentation without any training or supervisions.", + "bbox": [ + 76, + 366, + 472, + 789 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 76, + 811, + 166, + 827 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this section, we present the proposed method for zero-shot referring image segmentation in detail. We first show an overall framework of the proposed method (3.1), and then discuss the detailed methods for extracting visual features", + "bbox": [ + 75, + 840, + 470, + 900 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "(3.2) and textual features (3.3) to encode global and local contextual information.", + "bbox": [ + 498, + 364, + 890, + 395 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Overall Framework", + "text_level": 1, + "bbox": [ + 500, + 407, + 689, + 422 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To solve the task of referring image segmentation, which aims to predict the target region grounded to the text description, it is essential to learn image and text representations in a shared embedding space. To this end, we adopt CLIP to leverage the pre-trained cross-modal features for images and natural language.", + "bbox": [ + 496, + 431, + 893, + 521 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our framework consists of two parts as shown in Fig 2: (1) global-local visual encoder for visual representation, and (2) global-local natural language encoder for referring expression representation. Given a set of mask proposals generated by an unsupervised mask generator [52, 53], we first extract two visual features in global-context and local-context levels for each mask proposal, and then combine them into a single visual feature. Our global-context visual features can comprehensively represent the masked area as well as the surrounding region, while the local-context visual features can capture the representation of the specific masked region. This acts key roles in the referring image segmentation task because we need to focus a small specific target region using a comprehensive expression of the target. At the same time, given a sentence of expressing the target, our textual representation is extracted by the CLIP text encoder. In order to understand a holistic expression of the target as well as to focus on the target object itself, we first extract a key noun phrase from a sentence using a dependency parsing provided by spaCy [16], and then combine a global sentence feature and a local target noun phrase feature. Note that, our visual and text encoders are designed to handle both global-context and local-context information in a consistent way.", + "bbox": [ + 496, + 523, + 895, + 868 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Since our method is built on CLIP where the visual and textual features are embedded in the common embedding", + "bbox": [ + 500, + 869, + 893, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "19458", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/8a1d0440dc97753c1d5e4d1c3617b6fc3c94d70f956ae0dae518057008c5d6aa.jpg", + "image_caption": [ + "Figure 3. Detailed illustration of our mask-guided global-context visual encoders in ResNet and ViT architectures: (a) Masked attention pooling in ResNet, (b) Token masking in ViT." + ], + "image_footnote": [], + "bbox": [ + 106, + 88, + 263, + 425 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/41270556ea24f805230e9b1fdf4d566cd914203d9d021aeb35626cfe26ce26af.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 274, + 88, + 450, + 425 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "space, we can formulate the objective of our zero-shot image referring segmentation task as follows. Given inputs of an image $I$ and a referring expression $T$ , our method finds the mask that has the maximum similarity between its visual feature and the given textual feature among all mask proposals:", + "bbox": [ + 75, + 503, + 470, + 580 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {m} = \\arg \\max _ {m \\in M (I)} \\operatorname {s i m} (\\mathbf {t}, \\mathbf {f} _ {m}), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 174, + 606, + 470, + 630 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathrm{sim}(\\cdot, \\cdot)$ is a cosine similarity, $\\mathbf{t}$ is the proposed global-local textual feature for a referring expression $T$ , $\\mathbf{f}$ is the mask-guided global-local visual feature, and $M(I)$ is a mask proposal set for a given image $I$ .", + "bbox": [ + 75, + 641, + 470, + 702 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. Mask-guided Global-local Visual Features", + "text_level": 1, + "bbox": [ + 76, + 710, + 434, + 727 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To segment the target region related to the referring expression, it is essential to understand a global relationship between multiple objects in the image as well as local semantic information of the target. In this section, we demonstrate how to extract global and local-context features using CLIP, and how to fuse them.", + "bbox": [ + 75, + 734, + 470, + 823 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Since CLIP is designed to learn image-level representation, it is not well-suited for a pixel-level dense prediction such as an image segmentation. To overcome the limitation of using CLIP, we decompose the task into two sub-tasks: mask proposal generation and masked image-text matching.", + "bbox": [ + 75, + 824, + 472, + 901 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In order to generate mask proposals, we use the off-the-shelf mask extractor [53] which is the unsupervised instance-level mask generation model. By using mask proposals explicitly, our method can handle fine-detailed instance-level segmentation masks with CLIP.", + "bbox": [ + 496, + 90, + 893, + 167 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Global-context Visual Features. For each mask proposals, we first extract global-context visual features using the CLIP pre-trained model. The original visual features from CLIP, however, is designed to generate one single feature vector to describe the whole image. To tackle this issue, we modify a visual encoder from CLIP to extract features that contain information from not only the masked region but also surrounding regions to understand relationships between multiple objects.", + "bbox": [ + 496, + 184, + 893, + 319 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In this paper, we use two different architectures for the visual encoder as in CLIP: ResNet [15] and Vision Transformer (ViT) [8]. For the visual encoder with the ResNet architecture, we denote a visual feature extractor without a pooling layer as $\\phi_{\\mathrm{f}}$ and its attention pooling layer as $\\phi_{\\mathrm{att}}$ . Then the visual feature, $\\mathbf{f}$ , using the visual encoder of CLIP, $\\phi_{\\mathrm{CLIP}}$ , can be expressed as follows:", + "bbox": [ + 496, + 320, + 893, + 426 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {f} = \\phi_ {\\mathrm {C L I P}} (I) = \\phi_ {\\mathrm {a t t}} \\left(\\phi_ {\\mathrm {f}} (I)\\right), \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 599, + 434, + 890, + 450 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $I$ is a given image. Similarly, since ViT has multiple multi-head attention layers, we divide this visual encoder into two parts: last $k$ layers and the rest. We denote the former one by $\\phi_{\\mathrm{att}}$ , and the later one by $\\phi_{\\mathrm{f}}$ for ViT architectures based on CLIP.", + "bbox": [ + 496, + 458, + 890, + 532 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Then given an image $I$ and a mask $m$ , our global-context visual feature is defined as follows:", + "bbox": [ + 496, + 534, + 890, + 564 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {f} _ {m} ^ {G} = \\phi_ {\\text {a t t}} \\left(\\phi_ {f} (I) \\odot \\bar {m}\\right), \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 614, + 571, + 890, + 590 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\bar{m}$ is the resized mask scaled to the size of the feature map, and $\\odot$ is a Hadamard product operation. We illustrate more details of this masking strategy for each architecture of CLIP in Section 4.1 and Figure 3.", + "bbox": [ + 496, + 598, + 890, + 657 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We refer to it as the global context visual feature, because the entire image is passed through the encoder and the feature map at the last layer contains the holistic information about the image. Although we use mask proposals to obtain the features only on masked regions on the feature map, these features already have comprehensive information about the scene.", + "bbox": [ + 496, + 657, + 890, + 762 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Local-context Visual Features. To obtain local-context visual features given a mask proposal, we first mask the image and then crop the image to obtain a new image surrounding only an area of the mask proposal. After cropping and masking the image, it is passed to the visual encoder of CLIP to extract our local-context visual feature $\\mathbf{f}_m^L$ :", + "bbox": [ + 496, + 781, + 893, + 873 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {f} _ {m} ^ {L} = \\phi_ {\\mathrm {C L I P}} \\left(\\mathcal {T} _ {\\text {c r o p}} (I \\odot m)\\right), \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 602, + 878, + 890, + 897 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "19459", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathcal{T}_{crop}(\\cdot)$ denotes a cropping operation. This approach is commonly used in zero-shot semantic segmentation methods [7,59]. Since this feature focuses on the masked region in the image where irrelevant regions are removed, it concentrates only on the target object itself.", + "bbox": [ + 75, + 90, + 470, + 167 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Global-local Context Visual features. We aggregate global- and local-context features over masked regions to obtain one single visual feature that describe a representation of masked regions of the image. The global-local context visual feature is computed as follows:", + "bbox": [ + 75, + 184, + 468, + 258 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {f} _ {m} = \\alpha \\mathbf {f} _ {m} ^ {G} + (1 - \\alpha) \\mathbf {f} _ {m} ^ {L}, \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 184, + 265, + 468, + 284 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\alpha \\in [0,1]$ is a constant parameter, $m$ is a mask proposal, $\\mathbf{f}^G$ and $\\mathbf{f}^L$ are global-context and local-context visual features in Eq. (3) and Eq. (4), respectively. As in Eq. (1), the score for each mask proposal is then obtained by computing similarity between our global-local context visual features and the textual feature of the expression described in the next section.", + "bbox": [ + 75, + 291, + 470, + 396 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3. Global-local Textual Features", + "text_level": 1, + "bbox": [ + 76, + 404, + 339, + 419 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Similar to the visual features, it is important to understand a holistic meaning as well as the target object noun in given expressions. Given a referring expression $T$ , we extract a global sentence feature, $\\mathbf{t}^G$ , using the pre-trained CLIP text encoder, $\\psi_{\\mathrm{CLIP}}$ , as follows:", + "bbox": [ + 75, + 428, + 468, + 503 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {t} ^ {G} = \\psi_ {\\mathrm {C L I P}} (T). \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 215, + 508, + 468, + 527 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Although the CLIP text encoder can extract the textual representation aligning with the image-level representation, it is hard to focus on the target noun in the expression because the expression of this task is formed as a complex sentence containing multiple clauses, e.g. \"a dark brown leather sofa behind a foot stool that has a laptop computer on it\".", + "bbox": [ + 75, + 534, + 468, + 625 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To address this problem, we exploit a dependency parsing using spaCy [16] to find the target noun phrase, $\\mathrm{NP}(T)$ , given the text expression $T$ . To find the target noun phrase, we first find all noun phrases in the expression, and then select the target noun phrase that contains the root noun of the sentence. After identifying the target noun phrase in the input sentence, we extract the local-context textual feature from the CLIP textual encoder:", + "bbox": [ + 75, + 625, + 468, + 744 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {t} ^ {L} = \\psi_ {\\mathrm {C L I P}} (\\mathrm {N P} (T)). \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 200, + 752, + 468, + 770 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Finally, our global-local context textual feature is computed by a weighted sum of the global and local textual features described in Eq. (6) and Eq. (7) as follows:", + "bbox": [ + 75, + 777, + 468, + 823 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {t} = \\beta \\mathbf {t} ^ {G} + (1 - \\beta) \\mathbf {t} ^ {L}, \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 189, + 828, + 468, + 847 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\beta \\in [0,1]$ is a constant parameter, $\\mathbf{t}^G$ and $\\mathbf{t}^L$ are global sentence and local noun-phrase textual features, respectively.", + "bbox": [ + 75, + 854, + 468, + 901 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Implementation Details", + "text_level": 1, + "bbox": [ + 500, + 90, + 720, + 107 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We use unsupervised instance segmentation methods, FreeSOLO [53], to obtain mask proposals, and the shorter size of an input image is set to 800. For CLIP, the size of an image is set to $224 \\times 224$ . The number of masking layers, $k$ in ViT is set to 3. We set $\\alpha = 0.85$ for RefCOCOg, 0.95 for RefCOCO and RefCOCO+, and $\\beta = 0.5$ for all datasets.", + "bbox": [ + 496, + 114, + 893, + 205 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Masking in Global-context Visual Encoder", + "text_level": 1, + "bbox": [ + 498, + 215, + 862, + 232 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We use both ResNet-50 and ViT-B/32 architectures for the CLIP visual encoder. Masking strategies of the global-context visual encoder for these two architecture are mostly similar but have small differences, described next.", + "bbox": [ + 496, + 239, + 893, + 299 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Masked Attention Pooling in ResNet [15]. In a ResNet-based visual encoder of the original CLIP, a global average pooling layer is replaced by an attention pooling layer. This attention pooling layer has the same architecture as the multi-head attention in a Transformer. A query of the attention pooling layer is computed by a global average pooling operation onto the feature maps extracted by the ResNet backbone. A key and a value of the attention pooling layer is given by a flattened feature map. In our masked attention pooling, we mask the feature map using a given mask before computing query, key and value. After masking feature maps, we compute query, key and value, and then they are fed into the multi-head attention layer. The detailed illustration of our masked attention pooling in ResNet is shown in Figure 3a.", + "bbox": [ + 496, + 319, + 893, + 531 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Token Masking in ViT [8]. Following ViT, we divide an image into grid patches, and embed patches to a linear layer with positional embeddings to get tokens, and then process those tokens with a series of Transformer layer. To capture global-context of images, we mask tokens in only the last $k$ Transformer layers. The tokens are reshaped and masked by a given mask proposal, and then flattened and applied to the subsequent Transformer layer. As ViT has a class token (CLS), we use the final output feature from this CLS token as our global-context visual representation. The detailed method of our token masking in ViT is also shown in Figure 3b. In our experiments, we use ViT-B/32 architecture for the backbone of our ViT-based visual encoder, and we apply a token masking to the last 3 layers in the visual encoder. We show the performances with respect to the location of token masking layers in the supplementary materials.", + "bbox": [ + 496, + 550, + 893, + 792 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5. Experiments", + "text_level": 1, + "bbox": [ + 500, + 806, + 633, + 824 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5.1. Datasets and Metrics", + "text_level": 1, + "bbox": [ + 500, + 830, + 697, + 845 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We evaluate our method on RefCOCO [41], RefCOCO+ [41] and RefCOCOg [21, 38], where the images and masks in MS-COCO [31] dataset are used to annotate", + "bbox": [ + 496, + 854, + 893, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "19460", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/47368bb0bceecea861e0a5fe7b3dd85554b0d951f89010e0123ceeb39a8f0cd4.jpg", + "table_caption": [ + "Table 1. Comparison with Zero-shot RIS baseline methods on three standard benchmark datasets. U: The UMD partition. G: The Google partition. All baseline methods use FreeSOLO as the mask proposal network. † denotes that the model is initialized with the ImageNet pre-trained weights and trained on RIS datasets. FreeSOLO upper-bound is computed between the GT mask and the maximum overlapped FreeSOLO mask with the GT mask." + ], + "table_footnote": [], + "table_body": "
MetricMethodsVisual EncoderRefCOCORefCOCO+RefCOCOg
valtest Atest Bvaltest Atest Bval(U)test(U)val(G)
oIoUSupervised SoTA method [60]72.7375.8268.7962.1468.3855.1061.2462.0960.50
Zero-Shot Baselines
Grad-CAMResNet-5014.0215.0713.4914.4614.9714.0412.5112.8112.86
Score mapResNet-5019.8719.3120.2220.3719.6520.7518.8819.1619.15
Region tokenViT-B/3221.7120.3122.6322.6120.9123.4625.5225.3825.29
CroppingResNet-5022.3620.4922.6923.9522.0323.4928.2027.6427.47
CroppingViT-B/3222.7321.1123.0824.0922.4223.9328.6927.5127.70
Global-Local CLIP (ours)ResNet-5024.5823.3824.3525.8724.6125.6130.0729.8329.45
Global-Local CLIP (ours)ViT-B/3224.8823.6124.6626.1624.9025.8331.1130.9630.69
FreeSOLO upper-bound-42.0842.5243.5242.1742.5243.8048.8148.9648.49
mIoUZero-Shot Baselines
Grad-CAMResNet-5014.2215.9313.1814.8015.8713.7812.4713.1613.30
Score mapResNet-5021.3220.9621.5721.6121.1722.3020.0720.4320.63
Region tokenViT-B/3223.4322.0724.6224.5122.6425.3727.5727.3427.69
CroppingResNet-5024.3122.3724.6626.3123.9425.6931.2730.8730.78
CroppingViT-B/3224.8322.5825.7226.3324.0626.4631.8830.9431.06
Global-Local CLIP (ours)ResNet-5026.7024.9926.4828.2226.5427.8633.0233.1232.79
Global-Local CLIP (ours)ViT-B/3226.2024.9426.5627.8025.6427.8433.5233.6733.61
FreeSOLO upper-bound-48.2546.6250.4348.2846.6250.6252.4452.9152.76
Weakly-supervised method
TSEG [48]ViT-S/16†25.95--22.62--23.41--
", + "bbox": [ + 96, + 155, + 874, + 441 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/af79deb05cb5a8b857a14e275374b1e1eac6817510a35a9ee5f9423acbcac241.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodTrain datasetoIoU on PhraseCut
AllUnseen
CRISRefCOCO15.5313.75
RefCOCO+16.3014.62
RefCOCOg16.2413.88
LAVTRefCOCO16.6814.43
RefCOCO+16.6413.49
RefCOCOg16.0513.48
OursN/A23.6422.98
", + "bbox": [ + 80, + 462, + 294, + 556 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/0ce096b4aec5fe9d6429feeca993b26f9dc691311bb2d6e48e649f2f9863ac2b.jpg", + "image_caption": [ + "Figure 4. Comparisons to supervised methods in zero-shot setting on PhraseCut (left), and in few-shot setting on RefCOCOg (right). Unseen denotes a subset with classes that are not seen in RefCOCO." + ], + "image_footnote": [], + "bbox": [ + 297, + 462, + 455, + 556 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "the ground-truth of the referring image segmentation task. RefCOCO, RefCOCO+ and RefCOCOg have 19,994, 19,992 and 26,711 images with 142,210, 141,564 and 104,560 referring expressions, respectively. RefCOCO and RefCOCO+ have shorter expressions and an average of 1.6 nouns and 3.6 words are included in one expression, while RefCOCOg expresses more complex relations with longer sentences and has an average of about 2.8 nouns and 8.4 words. The detailed statistics of those datasets are demonstrated in our supplementary materials.", + "bbox": [ + 75, + 625, + 470, + 775 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "For the evaluation metrics, we use the overall Intersection over Union (oIoU) and the mean Intersection over Union (mIoU) which are the common metrics for the referring image segmentation task. The oIoU is measured by the total area of intersection divided by the total area of union, where the total area is computed by accumulating over all examples. In our ablation study, we use oIoUs since most of supervised RIS methods [6, 23] adopt it. We also report the mIoUs as", + "bbox": [ + 75, + 779, + 470, + 902 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/85b9e510817bf3557e89da49217ff040a4503b3c33a18c9410fd741e918655f3.jpg", + "table_caption": [ + "Table 2. oIoU results of our method and the baselines using COCO instance GT masks. We use a ViT-B/32 model for a visual encoder." + ], + "table_footnote": [], + "table_body": "
MethodRefCOCORefCOCO+RefCOCOg
Grad-CAM18.3218.1421.24
Score map23.9725.5028.11
Region token35.5938.1340.19
Cropping36.3242.0747.42
Ours37.0542.5951.01
", + "bbox": [ + 527, + 501, + 859, + 579 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/876759a51b8f1a597312be4e95a7de6a849a91dae299012f9866cca5a0b7ef48.jpg", + "table_caption": [ + "Table 3. oIoU results with different context-level features on the val split of RefCOCOg. We use a ViT-B/32 model for a visual encoder." + ], + "table_footnote": [], + "table_body": "
Encoder VariantsTextual features
GlobalLocalGlobal-Local
Visual \nfeaturesGlobal27.0327.3727.60
Local28.6925.2329.48
Global-Local30.1827.9431.11
", + "bbox": [ + 519, + 630, + 870, + 696 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "in [48], which computes the average IoU across all examples while considering the object sizes.", + "bbox": [ + 498, + 719, + 890, + 750 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.2. Baselines", + "text_level": 1, + "bbox": [ + 500, + 758, + 607, + 773 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We modify some baseline methods extracting dense predictions from CLIP into zero-shot RIS task to compare with our framework, and use FreeSOLO [53] as a mask generator in all baselines.", + "bbox": [ + 498, + 782, + 893, + 842 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "- Grad-CAM: The first baseline is utilizing gradient-based activation map based on Grad-CAM [46] which has been verified in the prior work [17]. After obtaining the activa", + "bbox": [ + 500, + 854, + 893, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "19461", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/175ec71baec5767f30f4da75d8358cc443024c51dc4780ec22d5a79bdb451c73.jpg", + "image_caption": [ + "Image", + "Expression:", + "the banana the person is holding", + "Local visual" + ], + "image_footnote": [], + "bbox": [ + 109, + 99, + 209, + 176 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/59bcf0e50b884b1cc9e410e95f689b8a0e0ef98b998d456350f68a6d85a6c57c.jpg", + "image_caption": [ + "GT" + ], + "image_footnote": [], + "bbox": [ + 109, + 188, + 184, + 244 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/b79efdc9aef4bdd6cea7434cf89bcb992bfa031c993f159d593661738218379c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 192, + 188, + 266, + 244 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/34f4f27f3a778b3fbe4a15c39d0954a877e63fb00293b0bf1be62da800064c68.jpg", + "image_caption": [ + "Global visual" + ], + "image_footnote": [], + "bbox": [ + 277, + 188, + 352, + 244 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/54814498ca74ac1fd2495ddf3c8a74115c921e8dcb790e4479c8484932d955e5.jpg", + "image_caption": [ + "Global-Local" + ], + "image_footnote": [], + "bbox": [ + 364, + 188, + 437, + 244 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/8dba6ad77e3b2a9dc43c6ec0d74e28331d5380c6cc456c8723e59e9e1c0b4759.jpg", + "image_caption": [ + "Image", + "Expression:", + "a green bicycle ridden by a man in a black windbreaker", + "GT" + ], + "image_footnote": [], + "bbox": [ + 109, + 260, + 209, + 337 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/60c7fe2b38e6fe473564eceab8aa34e9d80bdaeec8881270a1d8de660b904858.jpg", + "image_caption": [ + "Figure 5. Qualitative results with different levels of visual features. COCO instance GT masks are used as mask proposals to validate the effect of the global-local context visual features." + ], + "image_footnote": [], + "bbox": [ + 109, + 349, + 181, + 405 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/5da1f89587b0963d59efff3c90e8212d3c46fce91c0dbf41b445923fa81b8cc1.jpg", + "image_caption": [ + "Local visual" + ], + "image_footnote": [], + "bbox": [ + 194, + 349, + 267, + 405 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/a3a590e289940f97c228a5acbfef851aa08b50e856c05a0e352cac07442bfa86.jpg", + "image_caption": [ + "Global visual" + ], + "image_footnote": [], + "bbox": [ + 279, + 349, + 352, + 405 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/07e6c57a984bdebd860e3e01c1404e23b6827ecdcfbae57a653560c07aa2766f.jpg", + "image_caption": [ + "Global-Local" + ], + "image_footnote": [], + "bbox": [ + 364, + 349, + 437, + 405 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "tion maps using the similarity score of image-text pairs, we mask the maps and aggregate scores for all mask proposals, and select the mask with the highest score.", + "bbox": [ + 89, + 487, + 470, + 532 + ], + "page_idx": 6 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- **Score Map:** The second baseline is the method extracting a dense score map as in MaskCLIP [63]. As in MaskCLIP, to obtain dense score maps without pooling, a value linear layer and the last layer in the attention pooling are transformed into two consecutive $1 \\times 1$ convolution layers. The feature map extracted from ResNet is forwarded to those two layers to get language-compatible dense image feature map, and then compute a cosine similarity with CLIP's textual feature. After obtaining a score map, we project mask proposals to a score map. The scores in the mask area are averaged and then we select the mask with the maximum score.", + "- Region Token in ViT: The third baseline is a method used in Adapting CLIP [25]. Similar to Adapting CLIP, we use region tokens for each mask proposal for all Transformer layers in CLIP's visual encoder instead of using superpixels. We finally compute the cosine similarity between each class token of a mask proposal and CLIP's textual feature, and then choose the mask with the highest score.", + "- Cropping: The last baseline is our local-context visual features described in Section 3.2. Cropping and masking is a commonly used approach utilizing CLIP for extracting" + ], + "bbox": [ + 76, + 544, + 472, + 901 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/d089c8e983c72f3e3a84cfdc2f7e0e6570f7c5077619bd9faaae69c9bfe4f760.jpg", + "image_caption": [ + "Image", + "Expression:" + ], + "image_footnote": [], + "bbox": [ + 503, + 104, + 578, + 160 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/c826d8a656f514f40ec0182748be048e09bceaad87e61a349f05ebf5274e128d.jpg", + "image_caption": [ + "GT" + ], + "image_footnote": [], + "bbox": [ + 581, + 103, + 658, + 160 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/693e8c61517c4226c11239327aa7bb5a086b8732a1c16208cafb2c46de142f76.jpg", + "image_caption": [ + "Local text" + ], + "image_footnote": [], + "bbox": [ + 658, + 103, + 735, + 160 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/24d4d4286cc6ab8a9f49e5603053500f9ef0d3b1e82e7d36e267bf2f0f723f68.jpg", + "image_caption": [ + "Global text" + ], + "image_footnote": [], + "bbox": [ + 736, + 104, + 813, + 160 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/49f44335a1d20a198480e16c451552af1187a0ee38d981a4fdbb05a3a5908a28.jpg", + "image_caption": [ + "Global-Local" + ], + "image_footnote": [], + "bbox": [ + 816, + 104, + 890, + 160 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/a14079ffe6982d3c747e9bea2fbf2aec856b788958d3798b3ca83079635f0001.jpg", + "image_caption": [ + "guy in wheelchair" + ], + "image_footnote": [], + "bbox": [ + 571, + 162, + 584, + 172 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/621bc8c4868298223080137b6ceabe78f8cab2af98288a579c6015cb4d613000.jpg", + "image_caption": [ + "Expression:", + "a woman", + "umbrella", + "Figure 6. Qualitative results with different levels of textual features using COCO Instance GT mask proposals." + ], + "image_footnote": [], + "bbox": [ + 503, + 176, + 578, + 234 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/16f9ecf60c777341519c67a95f51f5bf77fa161a4bc7aec0ec76dd83dfc10b5f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 581, + 176, + 656, + 234 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/e9f6d407e67b0869c4afe74bee37216f3551102fb5d448873ec78b0396564757.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 658, + 176, + 733, + 234 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/286426ff765b1c17f783baa567b5fc5c4f9d116f6f071d4e6afdcfb261e43c91.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 736, + 176, + 813, + 234 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/a4f41c652470c5b8489327c2338666d59e5502f0b47c446bf3de4a255db73e34.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 815, + 176, + 890, + 234 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "mask or box region feature in a range of zero-shot dense prediction tasks [7, 9, 13, 49, 59]. Therefore, we consider cropping as one of the zero-shot RIS baselines.", + "bbox": [ + 511, + 323, + 890, + 368 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.3. Results", + "text_level": 1, + "bbox": [ + 500, + 387, + 591, + 402 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Main Results. We report referring image segmentation performances of our global-local CLIP and other baselines on RefCOCO, RefCOCO+ and RefCOCOg in terms of IoU and mIoU metrics in Table 1. For a fair comparison, all methods including baselines use FreeSOLO [53] mask proposals to produce the final output mask. The experimental results show that our method outperforms other baseline methods with substantial margins. Our method also surpasses the weakly supervised referring image segmentation method (TSEG) [48] in terms of mIoU1. We also show upper-bound performances of using FreeSOLO, where the scores are computed by the IoU between ground-truth masks and its max-overlapped mask proposal. Although there is still a gap compared to the fully-supervised referring image segmentation methods, our method improves performance significantly compared to the baselines with the same upper-bound.", + "bbox": [ + 496, + 410, + 893, + 652 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Zero-shot Evaluation on Unseen Domain. To verify the effectiveness of our method in a more practical setting, we report the zero-shot evaluation results with SoTA supervised methods [54, 60] on the test split of PhraseCut [56] in Figure 4 (left). Note that, RefCOCO contains expressions for only 80 salient object classes, whereas PhraseCut covers a variety of additional visual concepts i.e. 1272 categories in the test set. Our method outperforms both supervised methods, even though our models were never trained under RIS supervision. When evaluated on a subset of classes that are not seen in the RefCOCO datasets (Unseen column), the supervised methods show significant performance degradation, whereas our method works robustly on this subset.", + "bbox": [ + 496, + 670, + 893, + 866 + ], + "page_idx": 6 + }, + { + "type": "page_footnote", + "text": "1We only compare mIoU scores with TSEG since it reports only mIoU scores in the paper.", + "bbox": [ + 500, + 875, + 890, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "19462", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/b8fb3126971edeccdc43fd85ba0718c00bb0a1c780e4a477b816262e833f3ebc.jpg", + "image_caption": [ + "Figure 7. Qualitative results of our method with the several baselines. Note that all methods use mask proposals generated by FreeSOLO." + ], + "image_footnote": [], + "bbox": [ + 99, + 88, + 872, + 277 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Comparison to supervised methods in few-shot Setting. We also compare our model to two supervised RIS methods [54, 60] in a few-shot learning setting, where the training set includes $k$ instances for each of 80 classes in RefCOCO $^2$ . Note that the supervised methods use additional forms of supervision in training, whereas our method does not require any form of training or additional supervision; thus this setting is even disadvantageous to our method. Figure 4 (right) shows oIoU while varying $k$ on RefCOCOg. The results clearly show that our method outperforms both supervised methods with large margins when $k$ is small, and the gaps narrow as $k$ gets larger (64 and 256 for LAVT [60] and CRIS [54], respectively). Note that it covers about $10\\%$ of the training set when $k = 64$ and the same trends hold for both RefCOCO and RefCOCO+.", + "bbox": [ + 76, + 328, + 472, + 553 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.4. Ablation Study", + "text_level": 1, + "bbox": [ + 76, + 566, + 230, + 583 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Effects of Mask Quality. To show the impact of the proposed method without considering the mask quality of the mask generators, we evaluate the performance of our method and the baselines with COCO instance GT masks in Table 2. Our approach has demonstrated superior performance compared to all baselines and has shown a performance improvement of over $3.5\\%$ , particularly on RefCOCOg which includes longer expressions. We believe that our method performs well on challenging examples that involve complex expressions, such as those with multiple clauses, which require an understanding of both the language and the scene.", + "bbox": [ + 75, + 592, + 470, + 758 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Effects of Global-Local Context Features. We also study the effects of global-local context features in both visual and textual modalities and show the results in Table 3. For this analysis, we use RefCOCOg as it contains more complex expressions with multiple clauses. Among all combinations", + "bbox": [ + 76, + 784, + 468, + 861 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "of two modalities, using both global-local context features in the visual and textual domains leads to the best performance.", + "bbox": [ + 500, + 328, + 893, + 357 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Qualitative Analysis. We demonstrate several results that support the effectiveness of our global-local context visual features in Figure 5. To show this effect more clearly, we use COCO instance GT masks as mask proposals. When using only local-context visual features, the predicted mask tends to focus on the instance that shares the same class as the target object. However, when using only global-context visual features, the predicted mask tends to capture the context of the expression but may focus on a different object class. By combining global and local context, our method successfully finds the target mask. We also demonstrate the effectiveness of our global-local context textual features in Figure 6. Furthermore, we compare the qualitative results of our method with baseline methods in Figure 7. Our proposed global-local CLIP outperforms the baseline methods in identifying the target object by taking into account the global context of the image and expression.", + "bbox": [ + 498, + 380, + 893, + 637 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 652, + 619, + 669 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this paper, we propose a simple yet effective zero-shot referring image segmentation framework focusing on transferring knowledges from image-text cross-modal representations of CLIP. To tackle the difficulty of the referring image segmentation task, we propose global-local context encodings to compute similarities between images and expressions, where both target object semantics and relations between the objects are dealt in a unified framework. The proposed method significantly outperforms all baseline methods and weakly supervised method as well.", + "bbox": [ + 498, + 678, + 893, + 816 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgement. This work was supported by the IITP grants (No.2019-0-01842, No.2021-0-02068, No.2022-0-00926) funded by MSIT, the ISTD program (No.20018334) funded by MOTIE, and the GIST-MIT Research Collaboration grant funded by GIST, Korea.", + "bbox": [ + 498, + 830, + 893, + 900 + ], + "page_idx": 7 + }, + { + "type": "page_footnote", + "text": "2we use object classes in RefCOCO GT annotation. This is to cover all salient objects in the dataset during the few-shot training.", + "bbox": [ + 76, + 875, + 468, + 900 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "19463", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Alberto Baldrati, Marco Bertini, Tiberio Uricchio, and Alberto Del Bimbo. Effective conditioned and composed image retrieval combining clip-based features. In CVPR, 2022. 2", + "[2] Bo Chen, Zhiwei Hu, Zhilong Ji, Jinfeng Bai, and Wangmeng Zuo. Position-aware contrastive alignment for referring image segmentation. arXiv preprint arXiv:2212.13419, 2022. 3", + "[3] Shiming Chen, Ziming Hong, Yang Liu, Guo-Sen Xie, Baigui Sun, Hao Li, Qinmu Peng, Ke Lu, and Xinge You. Transzero: Attribute-guided transformer for zero-shot learning. In AAAI, 2022. 2", + "[4] Yen-Chun Chen, Linjie Li, Licheng Yu, Ahmed El Kholy, Faisal Ahmed, Zhe Gan, Yu Cheng, and Jingjing Liu. Uniter: Universal image-text representation learning. In ECCV, 2020. 1", + "[5] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. In ACL, 2019. 2", + "[6] Henghui Ding, Chang Liu, Suchen Wang, and Xudong Jiang. Vision-language transformer and query generation for referring segmentation. In ICCV, 2021. 3, 6", + "[7] Jian Ding, Nan Xue, Gui-Song Xia, and Dengxin Dai. Decoupling zero-shot semantic segmentation. In CVPR, 2022. 1, 2, 5, 7", + "[8] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. In ICLR, 2020. 4, 5", + "[9] Yu Du, Fangyun Wei, Zihe Zhang, Miaojing Shi, Yue Gao, and Guoqi Li. Learning to prompt for open-vocabulary object detection with vision-language model. In CVPR, 2022. 1, 2, 7", + "[10] Chengjian Feng, Yujie Zhong, Zequn Jie, Xiangxiang Chu, Haibing Ren, Xiaolin Wei, Weidi Xie, and Lin Ma. Promptdet: Expand your detector vocabulary with uncurated images. In ECCV, 2022. 1, 2", + "[11] Guang Feng, Zhiwei Hu, Lihe Zhang, and Huchuan Lu. Encoder fusion network with co-attention embedding for referring image segmentation. In CVPR, 2021. 3", + "[12] Golnaz Ghiasi, Xiuye Gu, Yin Cui, and Tsung-Yi Lin. Scaling open-vocabulary image segmentation with image-level labels. In ECCV, 2022. 1, 2", + "[13] Xiuye Gu, Tsung-Yi Lin, Weicheng Kuo, and Yin Cui. Open-vocabulary object detection via vision and language knowledge distillation. In ICML, 2022. 1, 2, 7", + "[14] Zongyan Han, Zhenyong Fu, Shuo Chen, and Jian Yang. Contrastive embedding for generalized zero-shot learning. In CVPR, 2021. 2", + "[15] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In CVPR, 2016. 4, 5", + "[16] Matthew Honnibal and Mark Johnson. An improved non-monotonic transition system for dependency parsing. In EMNLP, 2015. 3, 5" + ], + "bbox": [ + 78, + 114, + 470, + 898 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[17] Hsuan-An Hsia, Che-Hsien Lin, Bo-Han Kung, Jhao-Ting Chen, Daniel Stanley Tan, Jun-Cheng Chen, and Kai-Lung Hua. Clipcam: A simple baseline for zero-shot text-guided object and action localization. In ICASSP, 2022. 2, 6", + "[18] Ronghang Hu, Marcus Rohrbach, and Trevor Darrell. Segmentation from natural language expressions. In ECCV, 2016. 3", + "[19] Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. In ICML, 2021. 1, 2", + "[20] Ya Jing, Tao Kong, Wei Wang, Liang Wang, Lei Li, and Tieniu Tan. Locate then segment: A strong pipeline for referring image segmentation. In CVPR, 2021. 3", + "[21] Sahar Kazemzadeh, Vicente Ordonez, Mark Matten, and Tamara Berg. Referitagame: Referring to objects in photographs of natural scenes. In EMNLP, 2014. 5", + "[22] Kwanyoung Kim, Yujin Oh, and Jong Chul Ye. Zegot: Zero-shot segmentation through optimal transport of text prompts. arXiv preprint arXiv:2301.12171, 2023. 2", + "[23] Namyup Kim, Dongwon Kim, Cuiling Lan, Wenjun Zeng, and Suha Kwak. Restr: Convolution-free referring image segmentation using transformers. In CVPR, 2022. 3, 6", + "[24] Weicheng Kuo, Yin Cui, Xiuye Gu, AJ Piergiovanni, and Anelia Angelova. F-vlm: Open-vocabulary object detection upon frozen vision and language models. arXiv preprint arXiv:2209.15639, 2022. 2", + "[25] Jiahao Li, Greg Shakhnarovich, and Raymond A Yeh. Adapting clip for phrase localization without further training. arXiv preprint arXiv:2204.03647, 2022. 2, 7", + "[26] Ruiyu Li, Kaican Li, Yi-Chun Kuo, Michelle Shu, Xiaojuan Qi, Xiaoyong Shen, and Jiaya Jia. Referring image segmentation via recurrent refinement networks. In CVPR, 2018. 3", + "[27] Wei Li, Can Gao, Guocheng Niu, Xinyan Xiao, Hao Liu, Jiachen Liu, Hua Wu, and Haifeng Wang. Unimo: Towards unified-modal understanding and generation via cross-modal contrastive learning. In ACL, 2021. 1", + "[28] Xiujun Li, Xi Yin, Chunyuan Li, Pengchuan Zhang, Xiaowei Hu, Lei Zhang, Lijuan Wang, Houdong Hu, Li Dong, Furu Wei, et al. Oscar: Object-semantics aligned pre-training for vision-language tasks. In ECCV, 2020. 1", + "[29] Feng Liang, Bichen Wu, Xiaoliang Dai, Kunpeng Li, Yinan Zhao, Hang Zhang, Peizhao Zhang, Peter Vajda, and Diana Marculescu. Open-vocabulary semantic segmentation with mask-adapted clip. arXiv preprint arXiv:2210.04150, 2022. 2", + "[30] Chuang Lin, Peize Sun, Yi Jiang, Ping Luo, Lizhen Qu, Gholamreza Haffari, Zehuan Yuan, and Jianfei Cai. Learning object-language alignments for open-vocabulary object detection. arXiv preprint arXiv:2211.14843, 2022. 2", + "[31] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dálár, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In ECCV, 2014. 5", + "[32] Chenxi Liu, Zhe Lin, Xiaohui Shen, Jimei Yang, Xin Lu, and Alan Yuille. Recurrent multimodal interaction for referring image segmentation. In ICCV, 2017. 3" + ], + "bbox": [ + 501, + 92, + 893, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "19464", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[33] Jiang Liu, Hui Ding, Zhaowei Cai, Yuting Zhang, Ravi Kumar Satzoda, Vijay Mahadevan, and R Manmatha. Polyformer: Referring image segmentation as sequential polygon generation. arXiv preprint arXiv:2302.07387, 2023. 3", + "[34] Lu Liu, Tianyi Zhou, Guodong Long, Jing Jiang, Xuanyi Dong, and Chengqi Zhang. Isometric propagation network for generalized zero-shot learning. In ICLR, 2020. 2", + "[35] Jonathan Long, Evan Shelhamer, and Trevor Darrell. Fully convolutional networks for semantic segmentation. In CVPR, 2015. 3", + "[36] Jiasen Lu, Dhruv Batra, Devi Parikh, and Stefan Lee. Vilbert: Pretraining task-agnostic visiolinguistic representations for vision-and-language tasks. In NeurlPS, 2019. 1", + "[37] Huaishao Luo, Junwei Bao, Youzheng Wu, Xiaodong He, and Tianrui Li. Segclip: Patch aggregation with learnable centers for open-vocabulary semantic segmentation. arXiv preprint arXiv:2211.14813, 2022. 2", + "[38] Junhua Mao, Jonathan Huang, Alexander Toshev, Oana Camburu, Alan L Yuille, and Kevin Murphy. Generation and comprehension of unambiguous object descriptions. In CVPR, 2016. 5", + "[39] Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg S Corrado, and Jeff Dean. Distributed representations of words and phrases and their compositionality. In NeurIPS, 2013. 2", + "[40] Ron Mokady, Amir Hertz, and Amit H Bermano. Clipcap: Clip prefix for image captioning. arXiv preprint arXiv:2111.09734, 2021. 1, 2", + "[41] Varun K Nagaraja, Vlad I Morariu, and Larry S Davis. Modeling context between objects for referring expression understanding. In ECCV, 2016. 5", + "[42] Prashant Pandey, Mustafa Chasmai, Monish Natarajan, and Brejesh Lall. A language-guided benchmark for weakly supervised open vocabulary semantic segmentation. arXiv preprint arXiv:2302.14163, 2023. 2", + "[43] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, 2021. 1, 2", + "[44] Yongming Rao, Wenliang Zhao, Guangyi Chen, Yansong Tang, Zheng Zhu, Guan Huang, Jie Zhou, and Jiwen Lu. Denseclip: Language-guided dense prediction with context-aware prompting. In CVPR, 2022. 1, 2", + "[45] Hanoona Abdul Rasheed, Muhammad Maaz, Muhammad Uzair Khattak, Salman Khan, and Fahad Khan. Bridging the gap between object and image-level representations for open-vocabulary detection. In NeurIPS, 2022. 2", + "[46] Ramprasaath R Selvaraju, Michael Cogswell, Abhishek Das, Ramakrishna Vedantam, Devi Parikh, and Dhruv Batra. Gradcam: Visual explanations from deep networks via gradient-based localization. In ICCV, 2017. 6", + "[47] Sheng Shen, Liunian Harold Li, Hao Tan, Mohit Bansal, Anna Rohrbach, Kai-Wei Chang, Zhewei Yao, and Kurt Keutzer. How much can clip benefit vision-and-language tasks? In ICLR, 2021. 1" + ], + "bbox": [ + 78, + 90, + 470, + 898 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[48] Robin Strudel, Ivan Laptev, and Cordelia Schmid. Weakly-supervised segmentation of referring expressions. arXiv preprint arXiv:2205.04725, 2022. 1, 3, 6, 7", + "[49] Sanjay Subramanian, William Merrill, Trevor Darrell, Matt Gardner, Sameer Singh, and Anna Rohrbach. Reclip: A strong zero-shot baseline for referring expression comprehension. In ACL, 2022. 2, 7", + "[50] Hao Tan and Mohit Bansal. Lxmert: Learning cross-modality encoder representations from transformers. In EMNLP, 2019. 1", + "[51] Mengmeng Wang, Jiazheng Xing, and Yong Liu. Actionclip: A new paradigm for video action recognition. arXiv preprint arXiv:2109.08472, 2021. 2", + "[52] Xudong Wang, Rohit Girdhar, Stella X Yu, and Ishan Misra. Cut and learn for unsupervised object detection and instance segmentation. arXiv preprint arXiv:2301.11320, 2023. 3", + "[53] Xinlong Wang, Zhiding Yu, Shalini De Mello, Jan Kautz, Anima Anandkumar, Chunhua Shen, and Jose M Alvarez. Freesolo: Learning to segment objects without annotations. In CVPR, 2022. 3, 4, 5, 6, 7", + "[54] Zhaoqing Wang, Yu Lu, Qiang Li, Xunqiang Tao, Yandong Guo, Mingming Gong, and Tongliang Liu. Cris: Clip-driven referring image segmentation. In CVPR, 2022. 1, 3, 7, 8", + "[55] Zhichao Wei, Xiaohao Chen, Mingqiang Chen, and Siyu Zhu. Learning aligned cross-modal representations for referring image segmentation. arXiv preprint arXiv:2301.06429, 2023. 3", + "[56] Chenyun Wu, Zhe Lin, Scott Cohen, Trung Bui, and Subhransu Maji. Phrasescut: Language-based image segmentation in the wild. In CVPR, 2020. 7", + "[57] Jianzong Wu, Xiangtai Li, Xia Li, Henghui Ding, Yunhai Tong, and Dacheng Tao. Towards robust referring image segmentation. arXiv preprint arXiv:2209.09554, 2022. 3", + "[58] Mengde Xu, Zheng Zhang, Fangyun Wei, Han Hu, and Xiang Bai. Side adapter network for open-vocabulary semantic segmentation. arXiv preprint arXiv:2302.12242, 2023. 2", + "[59] Mengde Xu, Zheng Zhang, Fangyun Wei, Yutong Lin, Yue Cao, Han Hu, and Xiang Bai. A simple baseline for open-vocabulary semantic segmentation with pre-trained vision-language model. In ECCV, 2022. 1, 2, 5, 7", + "[60] Zhao Yang, Jiaqi Wang, Yansong Tang, Kai Chen, Hengshuang Zhao, and Philip HS Torr. Lavt: Language-aware vision transformer for referring image segmentation. In CVPR, 2022. 3, 6, 7, 8", + "[61] Linwei Ye, Mrigank Rochan, Zhi Liu, and Yang Wang. Cross-modal self-attention network for referring image segmentation. In CVPR, 2019. 3", + "[62] Zicheng Zhang, Yi Zhu, Jianzhuang Liu, Xiaodan Liang, and Wei Ke. Coupalign: Coupling word-pixel with sentence-mask alignments for referring image segmentation. In NeurIPS, 2022. 3", + "[63] Chong Zhou, Chen Change Loy, and Bo Dai. Extract free dense labels from clip. In ECCV, 2022. 1, 2, 7", + "[64] Ziqin Zhou, Bowen Zhang, Yinjie Lei, Lingqiao Liu, and Yifan Liu. Zegclip: Towards adapting clip for zero-shot semantic segmentation. arXiv preprint arXiv:2212.03588, 2022.2" + ], + "bbox": [ + 501, + 92, + 893, + 891 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "19465", + "bbox": [ + 480, + 945, + 517, + 955 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2023/Zero-Shot Referring Image Segmentation With Global-Local Context Features/d944ca48-5a24-4209-88df-55a9c1e47851_model.json b/2023/Zero-Shot Referring Image Segmentation With Global-Local Context Features/d944ca48-5a24-4209-88df-55a9c1e47851_model.json new file mode 100644 index 0000000000000000000000000000000000000000..440d2ada3194ebe6a88aa4fc4793a1fa24d6e4c7 --- /dev/null +++ b/2023/Zero-Shot Referring Image Segmentation With Global-Local Context Features/d944ca48-5a24-4209-88df-55a9c1e47851_model.json @@ -0,0 +1,2992 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.808, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.094, + 0.131, + 0.877, + 0.153 + ], + "angle": 0, + "content": "Zero-shot Referring Image Segmentation with Global-Local Context Features" + }, + { + "type": "text", + "bbox": [ + 0.253, + 0.179, + 0.38, + 0.198 + ], + "angle": 0, + "content": "Seonghoon \\(\\mathrm{Yu}^{1}\\)" + }, + { + "type": "text", + "bbox": [ + 0.419, + 0.181, + 0.585, + 0.199 + ], + "angle": 0, + "content": "Paul Hongsuck Seo2" + }, + { + "type": "text", + "bbox": [ + 0.624, + 0.181, + 0.715, + 0.199 + ], + "angle": 0, + "content": "Jeany Son1" + }, + { + "type": "text", + "bbox": [ + 0.282, + 0.199, + 0.503, + 0.216 + ], + "angle": 0, + "content": "\\(^{1}\\)AI Graduate School, GIST" + }, + { + "type": "text", + "bbox": [ + 0.544, + 0.2, + 0.689, + 0.217 + ], + "angle": 0, + "content": "2Google Research" + }, + { + "type": "text", + "bbox": [ + 0.195, + 0.22, + 0.402, + 0.234 + ], + "angle": 0, + "content": "seonghoon@gm.gist.ac.kr" + }, + { + "type": "text", + "bbox": [ + 0.441, + 0.22, + 0.587, + 0.234 + ], + "angle": 0, + "content": "phseo@google.com" + }, + { + "type": "text", + "bbox": [ + 0.628, + 0.22, + 0.771, + 0.234 + ], + "angle": 0, + "content": "jeany@gist.ac.kr" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.269, + 0.314, + 0.285 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.301, + 0.474, + 0.619 + ], + "angle": 0, + "content": "Referring image segmentation (RIS) aims to find a segmentation mask given a referring expression grounded to a region of the input image. Collecting labelled datasets for this task, however, is notoriously costly and labor-intensive. To overcome this issue, we propose a simple yet effective zero-shot referring image segmentation method by leveraging the pre-trained cross-modal knowledge from CLIP. In order to obtain segmentation masks grounded to the input text, we propose a mask-guided visual encoder that captures global and local contextual information of an input image. By utilizing instance masks obtained from off-the-shelf mask proposal techniques, our method is able to segment fine-detailed instance-level groundings. We also introduce a global-local text encoder where the global feature captures complex sentence-level semantics of the entire input expression while the local feature focuses on the target noun phrase extracted by a dependency parser. In our experiments, the proposed method outperforms several zero-shot baselines of the task and even the weakly supervised referring expression segmentation method with substantial margins. Our code is available at https://github.com/Seonghoon-Yu/Zero-shot-RIS." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.663, + 0.21, + 0.68 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.689, + 0.471, + 0.84 + ], + "angle": 0, + "content": "Recent advances of deep learning has revolutionised computer vision and natural language processing, and addressed various tasks in the field of vision-and-language [4, 19, 27, 28, 36, 43, 50]. A key element in the recent success of the multi-modal models such as CLIP [43] is the contrastive image-text pre-training on a large set of image and text pairs. It has shown a remarkable zero-shot transferability on a wide range of tasks, such as object detection [9, 10, 13], semantic segmentation [7, 12, 59, 63], image captioning [40], visual question answering (VQA) [47] and so on." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.84, + 0.471, + 0.902 + ], + "angle": 0, + "content": "Despite its good transferability of pre-trained multi-modal models, it is not straightforward to handle dense prediction tasks such as object detection and image segmentation. A pixel-level dense prediction task is challenging since there" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.272, + 0.704, + 0.284 + ], + "angle": 0, + "content": "(a) Referring image segmentation task" + }, + { + "type": "image_caption", + "bbox": [ + 0.523, + 0.285, + 0.585, + 0.295 + ], + "angle": 0, + "content": "Input image" + }, + { + "type": "image_caption", + "bbox": [ + 0.671, + 0.285, + 0.722, + 0.296 + ], + "angle": 0, + "content": "Input text" + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.295, + 0.599, + 0.366 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.603, + 0.307, + 0.792, + 0.317 + ], + "angle": 0, + "content": "\"a cat is lying on the seat of the scooter\"" + }, + { + "type": "image_caption", + "bbox": [ + 0.659, + 0.326, + 0.735, + 0.336 + ], + "angle": 0, + "content": "\"the bottom cat\"" + }, + { + "type": "image_footnote", + "bbox": [ + 0.616, + 0.345, + 0.778, + 0.356 + ], + "angle": 0, + "content": "\"a scooter with two cats sitting on\"" + }, + { + "type": "image_caption", + "bbox": [ + 0.807, + 0.285, + 0.875, + 0.295 + ], + "angle": 0, + "content": "Output mask" + }, + { + "type": "image", + "bbox": [ + 0.795, + 0.295, + 0.887, + 0.366 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.372, + 0.664, + 0.383 + ], + "angle": 0, + "content": "(b) Global-Local context in RIS" + }, + { + "type": "image_caption", + "bbox": [ + 0.534, + 0.386, + 0.611, + 0.395 + ], + "angle": 0, + "content": "Local-context" + }, + { + "type": "image_caption", + "bbox": [ + 0.536, + 0.402, + 0.608, + 0.411 + ], + "angle": 0, + "content": "a cat is lying on" + }, + { + "type": "image_caption", + "bbox": [ + 0.517, + 0.412, + 0.604, + 0.421 + ], + "angle": 0, + "content": "the seat of the scod" + }, + { + "type": "image", + "bbox": [ + 0.558, + 0.426, + 0.588, + 0.452 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.462, + 0.556, + 0.494 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.596, + 0.462, + 0.622, + 0.492 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.657, + 0.385, + 0.739, + 0.395 + ], + "angle": 0, + "content": "Global-context" + }, + { + "type": "image_caption", + "bbox": [ + 0.658, + 0.402, + 0.736, + 0.412 + ], + "angle": 0, + "content": "a cat is lying on" + }, + { + "type": "image_caption", + "bbox": [ + 0.639, + 0.412, + 0.753, + 0.421 + ], + "angle": 0, + "content": "the seat of the scooter" + }, + { + "type": "image", + "bbox": [ + 0.644, + 0.425, + 0.748, + 0.494 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.765, + 0.385, + 0.882, + 0.395 + ], + "angle": 0, + "content": "Global-Local context" + }, + { + "type": "image_caption", + "bbox": [ + 0.781, + 0.404, + 0.863, + 0.414 + ], + "angle": 0, + "content": "a cat is lying on" + }, + { + "type": "image_caption", + "bbox": [ + 0.769, + 0.414, + 0.876, + 0.423 + ], + "angle": 0, + "content": "the seat of the scooter" + }, + { + "type": "image", + "bbox": [ + 0.774, + 0.426, + 0.872, + 0.496 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.516, + 0.895, + 0.572 + ], + "angle": 0, + "content": "Figure 1. Illustrations of the task of referring image segmentation and motivations of global-local context features. To find the grounded mask given an expression, we need to understand the relations between the objects as well as their semantics." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.599, + 0.895, + 0.704 + ], + "angle": 0, + "content": "is a substantial gap between the image-level contrastive pretraining task and the pixel-level downstream task such as semantic segmentation. There have been several attempts to reduce gap between two tasks [44, 54, 63], but these works aim to fine-tune the model consequently requiring task-specific dense annotations, which is notoriously labor-intensive and costly." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.705, + 0.896, + 0.902 + ], + "angle": 0, + "content": "Referring image segmentation is a task to find the specific region in an image given a natural language text describing the region, and it is well-known as one of challenging vision- and language tasks. Collecting annotations for this task is even more challenging as the task requires to collect precise referring expression of the target region as well as its dense mask annotation. Recently, a weakly-supervised referring image segmentation method [48] is proposed to overcome this issue. However, it still requires high-level text expression annotations pairing with images for the target datasets and the performance of the method is far from that of the supervised methods. To tackle this issue, in this paper, we focus on zero-shot transferring from the pre-trained knowledge" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "19456" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.423, + 0.106 + ], + "angle": 0, + "content": "of CLIP to the task of referring image segmentation." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.107, + 0.471, + 0.379 + ], + "angle": 0, + "content": "Moreover, this task is challenging because it requires high-level understanding of language and comprehensive understanding of an image, as well as a dense instance-level prediction. There have been several works for zero-shot semantic segmentation [7, 12, 59, 63], but they cannot be directly extended to the zero-shot referring image segmentation task because it has different characteristics. Specifically, the semantic segmentation task does not need to distinguish instances, but the referring image segmentation task should be able to predict an instance-level segmentation mask. In addition, among multiple instances of the same class, only one instance described by the expression must be selected. For example, in Figure 1, there are two cats in the input image. If the input text is given by \"a cat is lying on the seat of the scooter\", the cat with the green mask is the proper output. To find this correct mask, we need to understand the relation between the objects (i.e. \"lying on the seat\") as well as their semantics (i.e. \"cat\", \"scooter\")." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.38, + 0.471, + 0.636 + ], + "angle": 0, + "content": "In this paper, we propose a new baseline of zero-shot referring image segmentation task using a pre-trained model from CLIP, where global and local contexts of an image and an expression are handled in a consistent way. In order to localize an object mask region in an image given a textual referring expression, we propose a mask-guided visual encoder that captures global and local context information of an image given a mask. We also present a global-local textual encoder where the local-context is captured by a target noun phrase and the global context is captured by a whole sentence of the expressions. By combining features in two different context levels, our method is able to understand a comprehensive knowledge as well as a specific trait of the target object. Note that, although our method does not require any additional training on CLIP model, it outperforms all baselines and the weakly supervised referring image segmentation method with a big margin." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.638, + 0.458, + 0.652 + ], + "angle": 0, + "content": "Our main contributions can be summarised as follows:" + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.664, + 0.47, + 0.739 + ], + "angle": 0, + "content": "- We propose a new task of zero-shot referring image segmentation based on CLIP without any additional training. To the best of our knowledge, this is the first work to study the zero-shot referring image segmentation task." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.752, + 0.47, + 0.828 + ], + "angle": 0, + "content": "- We present a visual encoder and a textual encoder that integrates global and local contexts of images and sentences, respectively. Although the modalities of two encoders are different, our visual and textual features are dealt in a consistent way." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.84, + 0.47, + 0.9 + ], + "angle": 0, + "content": "- The proposed global-local context features take full advantage of CLIP to capture the target object semantics as well as the relations between the objects in both visual and textual modalities." + }, + { + "type": "list", + "bbox": [ + 0.097, + 0.664, + 0.47, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.092, + 0.892, + 0.152 + ], + "angle": 0, + "content": "- Our method consistently shows outstanding results compared to several baseline methods, and also outperforms the weakly supervised referring image segmentation method with substantial margins." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.169, + 0.642, + 0.185 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.196, + 0.895, + 0.513 + ], + "angle": 0, + "content": "Zero-shot Transfer. Classical zero-shot learning aims to predict unseen classes that have not seen before by transferring the knowledge trained on the seen classes. Early works [3, 14, 34] leverage the pre-trained word embedding [5, 39] of class names or attributes and perform zero-shot prediction via mapping between visual representations of images and this word embedding. Recently, CLIP [43] and ALIGNN [19] shed a new light on the zero-shot learning via large-scale image-text pre-training. They show the successive results on various downstream tasks via zero-shot knowledge transfer, such as image captioning [40], video action localization [51], image-text retrieval [1] and so on. Contrary to classical zero-shot learning, zero-shot transfer has an advantage of avoiding fine-tuning the pre-trained model on the task-specific dataset, where collecting datasets is time-consuming. There have been several works that apply CLIP encoders directly with tiny architectural modification without additional training for semantic segmentation [63], referring expression comprehension [49], phrase localization [25] and object localization [17]. Our work is also lying on the line of this research field." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.539, + 0.895, + 0.901 + ], + "angle": 0, + "content": "Zero-shot Dense Prediction Tasks. Very recently, with the success of pre-training models using large-scale image-text pairs, there have been several attempts to deal with dense prediction tasks with CLIP, e.g. object detection [9, 10, 13, 24, 30, 45], semantic segmentation [22, 29, 37, 42, 44, 58, 59, 63, 64] and so on. These dense prediction tasks, however, are challenging since CLIP learns image-level features not pixel-level fine-grained features. In order to handle this issue, ViLD [13] introduces a method which crop the image to contain only the bounding box region, and then extract the visual features of cropped regions using CLIP to classify the unseen objects. This approach is applied in a wide range of dense prediction tasks which are demanded the zero-shot transfer ability of CLIP [7, 9, 10, 12, 49, 59]. While this method only considers the cropped area, there are several methods [25, 63] to consider the global context in the image, not only just the cropped region. Adapting CLIP [25] proposed the phrase localization method by modifying CLIP to generate high-resolution spatial feature maps using superpixels. MaskCLIP [63] modifies the image encoder of CLIP by transforming the value embedding layer and the last linear layer into two \\(1 \\times 1\\) convolutional layers to handle pixel-level predictions. In this work, we focus on extracting both global and local context visual features with CLIP." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "19457" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.081, + 0.087, + 0.895, + 0.288 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.298, + 0.893, + 0.34 + ], + "angle": 0, + "content": "Figure 2. Overall framework of our global-local CLIP. Given an image and an expression as inputs, we extract global-local context visual features using mask proposals, and also we extract a global-local context textual feature. After computing the cosine similarity scores between all global-local context visual features and a global-local context textual feature, we choose the mask with the highest score." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.367, + 0.473, + 0.79 + ], + "angle": 0, + "content": "Referring Image Segmentation. Referring image segmentation aims to segment a target object in an image given a natural linguistic expression introduced by [18]. There have been several fully-supervised methods for this task, where images and expressions are used as an input, and the target mask is given for training [2, 20, 33, 55, 60, 62]. Most of works [6, 11, 23, 60, 61] focuses on how to fuse those two features in different modalities extracted from independent encoders. Early works [26, 32] extract multi-modal features by simply concatenating visual and textual features and feed them into the segmentation networks [35] to predict dense segmentation masks. There have been two branches of works fusing cross-modal features; an attention based encoder fusion [11, 57, 60] and a cross-modal decoder fusion based on a Transformer decoder [6, 54, 61]. Recently, a CLIP-based approach, which learns separated image and text transformer using a contrastive pre-training, has been proposed [54]. Those fully supervised referring image segmentation methods show good performances in general, but they require dense annotations for target masks and comprehensive expressions describing the target object. To address this problem, TSEG [48] proposed a weakly-supervised referring image segmentation method which learns the segmentation model using text-based image-level supervisions. However, this method still requires high-level referring expression annotations with images for specific datasets. Therefore, we propose a new baseline for zero-shot referring image segmentation without any training or supervisions." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.812, + 0.168, + 0.828 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.841, + 0.471, + 0.901 + ], + "angle": 0, + "content": "In this section, we present the proposed method for zero-shot referring image segmentation in detail. We first show an overall framework of the proposed method (3.1), and then discuss the detailed methods for extracting visual features" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.366, + 0.892, + 0.396 + ], + "angle": 0, + "content": "(3.2) and textual features (3.3) to encode global and local contextual information." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.408, + 0.691, + 0.423 + ], + "angle": 0, + "content": "3.1. Overall Framework" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.432, + 0.895, + 0.522 + ], + "angle": 0, + "content": "To solve the task of referring image segmentation, which aims to predict the target region grounded to the text description, it is essential to learn image and text representations in a shared embedding space. To this end, we adopt CLIP to leverage the pre-trained cross-modal features for images and natural language." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.524, + 0.896, + 0.869 + ], + "angle": 0, + "content": "Our framework consists of two parts as shown in Fig 2: (1) global-local visual encoder for visual representation, and (2) global-local natural language encoder for referring expression representation. Given a set of mask proposals generated by an unsupervised mask generator [52, 53], we first extract two visual features in global-context and local-context levels for each mask proposal, and then combine them into a single visual feature. Our global-context visual features can comprehensively represent the masked area as well as the surrounding region, while the local-context visual features can capture the representation of the specific masked region. This acts key roles in the referring image segmentation task because we need to focus a small specific target region using a comprehensive expression of the target. At the same time, given a sentence of expressing the target, our textual representation is extracted by the CLIP text encoder. In order to understand a holistic expression of the target as well as to focus on the target object itself, we first extract a key noun phrase from a sentence using a dependency parsing provided by spaCy [16], and then combine a global sentence feature and a local target noun phrase feature. Note that, our visual and text encoders are designed to handle both global-context and local-context information in a consistent way." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.871, + 0.894, + 0.901 + ], + "angle": 0, + "content": "Since our method is built on CLIP where the visual and textual features are embedded in the common embedding" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "19458" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.107, + 0.089, + 0.264, + 0.426 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.276, + 0.089, + 0.451, + 0.426 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.436, + 0.473, + 0.478 + ], + "angle": 0, + "content": "Figure 3. Detailed illustration of our mask-guided global-context visual encoders in ResNet and ViT architectures: (a) Masked attention pooling in ResNet, (b) Token masking in ViT." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.505, + 0.472, + 0.581 + ], + "angle": 0, + "content": "space, we can formulate the objective of our zero-shot image referring segmentation task as follows. Given inputs of an image \\(I\\) and a referring expression \\(T\\), our method finds the mask that has the maximum similarity between its visual feature and the given textual feature among all mask proposals:" + }, + { + "type": "equation", + "bbox": [ + 0.175, + 0.607, + 0.472, + 0.631 + ], + "angle": 0, + "content": "\\[\n\\hat {m} = \\arg \\max _ {m \\in M (I)} \\operatorname {s i m} (\\mathbf {t}, \\mathbf {f} _ {m}), \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.642, + 0.472, + 0.703 + ], + "angle": 0, + "content": "where \\(\\mathrm{sim}(\\cdot, \\cdot)\\) is a cosine similarity, \\(\\mathbf{t}\\) is the proposed global-local textual feature for a referring expression \\(T\\), \\(\\mathbf{f}\\) is the mask-guided global-local visual feature, and \\(M(I)\\) is a mask proposal set for a given image \\(I\\)." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.712, + 0.436, + 0.728 + ], + "angle": 0, + "content": "3.2. Mask-guided Global-local Visual Features" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.735, + 0.471, + 0.824 + ], + "angle": 0, + "content": "To segment the target region related to the referring expression, it is essential to understand a global relationship between multiple objects in the image as well as local semantic information of the target. In this section, we demonstrate how to extract global and local-context features using CLIP, and how to fuse them." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.825, + 0.473, + 0.902 + ], + "angle": 0, + "content": "Since CLIP is designed to learn image-level representation, it is not well-suited for a pixel-level dense prediction such as an image segmentation. To overcome the limitation of using CLIP, we decompose the task into two sub-tasks: mask proposal generation and masked image-text matching." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.895, + 0.168 + ], + "angle": 0, + "content": "In order to generate mask proposals, we use the off-the-shelf mask extractor [53] which is the unsupervised instance-level mask generation model. By using mask proposals explicitly, our method can handle fine-detailed instance-level segmentation masks with CLIP." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.185, + 0.895, + 0.32 + ], + "angle": 0, + "content": "Global-context Visual Features. For each mask proposals, we first extract global-context visual features using the CLIP pre-trained model. The original visual features from CLIP, however, is designed to generate one single feature vector to describe the whole image. To tackle this issue, we modify a visual encoder from CLIP to extract features that contain information from not only the masked region but also surrounding regions to understand relationships between multiple objects." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.321, + 0.895, + 0.427 + ], + "angle": 0, + "content": "In this paper, we use two different architectures for the visual encoder as in CLIP: ResNet [15] and Vision Transformer (ViT) [8]. For the visual encoder with the ResNet architecture, we denote a visual feature extractor without a pooling layer as \\(\\phi_{\\mathrm{f}}\\) and its attention pooling layer as \\(\\phi_{\\mathrm{att}}\\). Then the visual feature, \\(\\mathbf{f}\\), using the visual encoder of CLIP, \\(\\phi_{\\mathrm{CLIP}}\\), can be expressed as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.601, + 0.435, + 0.892, + 0.452 + ], + "angle": 0, + "content": "\\[\n\\mathbf {f} = \\phi_ {\\mathrm {C L I P}} (I) = \\phi_ {\\mathrm {a t t}} \\left(\\phi_ {\\mathrm {f}} (I)\\right), \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.459, + 0.892, + 0.534 + ], + "angle": 0, + "content": "where \\(I\\) is a given image. Similarly, since ViT has multiple multi-head attention layers, we divide this visual encoder into two parts: last \\(k\\) layers and the rest. We denote the former one by \\(\\phi_{\\mathrm{att}}\\), and the later one by \\(\\phi_{\\mathrm{f}}\\) for ViT architectures based on CLIP." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.535, + 0.892, + 0.565 + ], + "angle": 0, + "content": "Then given an image \\(I\\) and a mask \\(m\\), our global-context visual feature is defined as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.616, + 0.572, + 0.892, + 0.591 + ], + "angle": 0, + "content": "\\[\n\\mathbf {f} _ {m} ^ {G} = \\phi_ {\\text {a t t}} \\left(\\phi_ {f} (I) \\odot \\bar {m}\\right), \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.599, + 0.892, + 0.658 + ], + "angle": 0, + "content": "where \\(\\bar{m}\\) is the resized mask scaled to the size of the feature map, and \\(\\odot\\) is a Hadamard product operation. We illustrate more details of this masking strategy for each architecture of CLIP in Section 4.1 and Figure 3." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.659, + 0.892, + 0.763 + ], + "angle": 0, + "content": "We refer to it as the global context visual feature, because the entire image is passed through the encoder and the feature map at the last layer contains the holistic information about the image. Although we use mask proposals to obtain the features only on masked regions on the feature map, these features already have comprehensive information about the scene." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.782, + 0.895, + 0.874 + ], + "angle": 0, + "content": "Local-context Visual Features. To obtain local-context visual features given a mask proposal, we first mask the image and then crop the image to obtain a new image surrounding only an area of the mask proposal. After cropping and masking the image, it is passed to the visual encoder of CLIP to extract our local-context visual feature \\(\\mathbf{f}_m^L\\):" + }, + { + "type": "equation", + "bbox": [ + 0.604, + 0.88, + 0.892, + 0.898 + ], + "angle": 0, + "content": "\\[\n\\mathbf {f} _ {m} ^ {L} = \\phi_ {\\mathrm {C L I P}} \\left(\\mathcal {T} _ {\\text {c r o p}} (I \\odot m)\\right), \\tag {4}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "19459" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.091, + 0.471, + 0.168 + ], + "angle": 0, + "content": "where \\(\\mathcal{T}_{crop}(\\cdot)\\) denotes a cropping operation. This approach is commonly used in zero-shot semantic segmentation methods [7,59]. Since this feature focuses on the masked region in the image where irrelevant regions are removed, it concentrates only on the target object itself." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.185, + 0.47, + 0.26 + ], + "angle": 0, + "content": "Global-local Context Visual features. We aggregate global- and local-context features over masked regions to obtain one single visual feature that describe a representation of masked regions of the image. The global-local context visual feature is computed as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.185, + 0.266, + 0.47, + 0.285 + ], + "angle": 0, + "content": "\\[\n\\mathbf {f} _ {m} = \\alpha \\mathbf {f} _ {m} ^ {G} + (1 - \\alpha) \\mathbf {f} _ {m} ^ {L}, \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.292, + 0.471, + 0.397 + ], + "angle": 0, + "content": "where \\(\\alpha \\in [0,1]\\) is a constant parameter, \\(m\\) is a mask proposal, \\(\\mathbf{f}^G\\) and \\(\\mathbf{f}^L\\) are global-context and local-context visual features in Eq. (3) and Eq. (4), respectively. As in Eq. (1), the score for each mask proposal is then obtained by computing similarity between our global-local context visual features and the textual feature of the expression described in the next section." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.405, + 0.34, + 0.42 + ], + "angle": 0, + "content": "3.3. Global-local Textual Features" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.429, + 0.47, + 0.504 + ], + "angle": 0, + "content": "Similar to the visual features, it is important to understand a holistic meaning as well as the target object noun in given expressions. Given a referring expression \\( T \\), we extract a global sentence feature, \\( \\mathbf{t}^G \\), using the pre-trained CLIP text encoder, \\( \\psi_{\\mathrm{CLIP}} \\), as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.217, + 0.51, + 0.47, + 0.528 + ], + "angle": 0, + "content": "\\[\n\\mathbf {t} ^ {G} = \\psi_ {\\mathrm {C L I P}} (T). \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.535, + 0.47, + 0.626 + ], + "angle": 0, + "content": "Although the CLIP text encoder can extract the textual representation aligning with the image-level representation, it is hard to focus on the target noun in the expression because the expression of this task is formed as a complex sentence containing multiple clauses, e.g. \"a dark brown leather sofa behind a foot stool that has a laptop computer on it\"." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.626, + 0.47, + 0.746 + ], + "angle": 0, + "content": "To address this problem, we exploit a dependency parsing using spaCy [16] to find the target noun phrase, \\(\\mathrm{NP}(T)\\), given the text expression \\(T\\). To find the target noun phrase, we first find all noun phrases in the expression, and then select the target noun phrase that contains the root noun of the sentence. After identifying the target noun phrase in the input sentence, we extract the local-context textual feature from the CLIP textual encoder:" + }, + { + "type": "equation", + "bbox": [ + 0.201, + 0.753, + 0.47, + 0.771 + ], + "angle": 0, + "content": "\\[\n\\mathbf {t} ^ {L} = \\psi_ {\\mathrm {C L I P}} (\\mathrm {N P} (T)). \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.778, + 0.47, + 0.824 + ], + "angle": 0, + "content": "Finally, our global-local context textual feature is computed by a weighted sum of the global and local textual features described in Eq. (6) and Eq. (7) as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.191, + 0.829, + 0.47, + 0.848 + ], + "angle": 0, + "content": "\\[\n\\mathbf {t} = \\beta \\mathbf {t} ^ {G} + (1 - \\beta) \\mathbf {t} ^ {L}, \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.855, + 0.47, + 0.902 + ], + "angle": 0, + "content": "where \\(\\beta \\in [0,1]\\) is a constant parameter, \\(\\mathbf{t}^G\\) and \\(\\mathbf{t}^L\\) are global sentence and local noun-phrase textual features, respectively." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.091, + 0.722, + 0.108 + ], + "angle": 0, + "content": "4. Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.116, + 0.894, + 0.207 + ], + "angle": 0, + "content": "We use unsupervised instance segmentation methods, FreeSOLO [53], to obtain mask proposals, and the shorter size of an input image is set to 800. For CLIP, the size of an image is set to \\(224 \\times 224\\). The number of masking layers, \\(k\\) in ViT is set to 3. We set \\(\\alpha = 0.85\\) for RefCOCOg, 0.95 for RefCOCO and RefCOCO+, and \\(\\beta = 0.5\\) for all datasets." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.217, + 0.864, + 0.233 + ], + "angle": 0, + "content": "4.1. Masking in Global-context Visual Encoder" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.24, + 0.894, + 0.3 + ], + "angle": 0, + "content": "We use both ResNet-50 and ViT-B/32 architectures for the CLIP visual encoder. Masking strategies of the global-context visual encoder for these two architecture are mostly similar but have small differences, described next." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.32, + 0.895, + 0.532 + ], + "angle": 0, + "content": "Masked Attention Pooling in ResNet [15]. In a ResNet-based visual encoder of the original CLIP, a global average pooling layer is replaced by an attention pooling layer. This attention pooling layer has the same architecture as the multi-head attention in a Transformer. A query of the attention pooling layer is computed by a global average pooling operation onto the feature maps extracted by the ResNet backbone. A key and a value of the attention pooling layer is given by a flattened feature map. In our masked attention pooling, we mask the feature map using a given mask before computing query, key and value. After masking feature maps, we compute query, key and value, and then they are fed into the multi-head attention layer. The detailed illustration of our masked attention pooling in ResNet is shown in Figure 3a." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.551, + 0.894, + 0.793 + ], + "angle": 0, + "content": "Token Masking in ViT [8]. Following ViT, we divide an image into grid patches, and embed patches to a linear layer with positional embeddings to get tokens, and then process those tokens with a series of Transformer layer. To capture global-context of images, we mask tokens in only the last \\( k \\) Transformer layers. The tokens are reshaped and masked by a given mask proposal, and then flattened and applied to the subsequent Transformer layer. As ViT has a class token (CLS), we use the final output feature from this CLS token as our global-context visual representation. The detailed method of our token masking in ViT is also shown in Figure 3b. In our experiments, we use ViT-B/32 architecture for the backbone of our ViT-based visual encoder, and we apply a token masking to the last 3 layers in the visual encoder. We show the performances with respect to the location of token masking layers in the supplementary materials." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.807, + 0.634, + 0.825 + ], + "angle": 0, + "content": "5. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.832, + 0.698, + 0.847 + ], + "angle": 0, + "content": "5.1. Datasets and Metrics" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.856, + 0.894, + 0.901 + ], + "angle": 0, + "content": "We evaluate our method on RefCOCO [41], RefCOCO+ [41] and RefCOCOg [21, 38], where the images and masks in MS-COCO [31] dataset are used to annotate" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "19460" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.089, + 0.895, + 0.145 + ], + "angle": 0, + "content": "Table 1. Comparison with Zero-shot RIS baseline methods on three standard benchmark datasets. U: The UMD partition. G: The Google partition. All baseline methods use FreeSOLO as the mask proposal network. † denotes that the model is initialized with the ImageNet pre-trained weights and trained on RIS datasets. FreeSOLO upper-bound is computed between the GT mask and the maximum overlapped FreeSOLO mask with the GT mask." + }, + { + "type": "table", + "bbox": [ + 0.097, + 0.156, + 0.875, + 0.442 + ], + "angle": 0, + "content": "
MetricMethodsVisual EncoderRefCOCORefCOCO+RefCOCOg
valtest Atest Bvaltest Atest Bval(U)test(U)val(G)
oIoUSupervised SoTA method [60]72.7375.8268.7962.1468.3855.1061.2462.0960.50
Zero-Shot Baselines
Grad-CAMResNet-5014.0215.0713.4914.4614.9714.0412.5112.8112.86
Score mapResNet-5019.8719.3120.2220.3719.6520.7518.8819.1619.15
Region tokenViT-B/3221.7120.3122.6322.6120.9123.4625.5225.3825.29
CroppingResNet-5022.3620.4922.6923.9522.0323.4928.2027.6427.47
CroppingViT-B/3222.7321.1123.0824.0922.4223.9328.6927.5127.70
Global-Local CLIP (ours)ResNet-5024.5823.3824.3525.8724.6125.6130.0729.8329.45
Global-Local CLIP (ours)ViT-B/3224.8823.6124.6626.1624.9025.8331.1130.9630.69
FreeSOLO upper-bound-42.0842.5243.5242.1742.5243.8048.8148.9648.49
mIoUZero-Shot Baselines
Grad-CAMResNet-5014.2215.9313.1814.8015.8713.7812.4713.1613.30
Score mapResNet-5021.3220.9621.5721.6121.1722.3020.0720.4320.63
Region tokenViT-B/3223.4322.0724.6224.5122.6425.3727.5727.3427.69
CroppingResNet-5024.3122.3724.6626.3123.9425.6931.2730.8730.78
CroppingViT-B/3224.8322.5825.7226.3324.0626.4631.8830.9431.06
Global-Local CLIP (ours)ResNet-5026.7024.9926.4828.2226.5427.8633.0233.1232.79
Global-Local CLIP (ours)ViT-B/3226.2024.9426.5627.8025.6427.8433.5233.6733.61
FreeSOLO upper-bound-48.2546.6250.4348.2846.6250.6252.4452.9152.76
Weakly-supervised method
TSEG [48]ViT-S/16†25.95--22.62--23.41--
" + }, + { + "type": "table", + "bbox": [ + 0.081, + 0.463, + 0.295, + 0.557 + ], + "angle": 0, + "content": "
MethodTrain datasetoIoU on PhraseCut
AllUnseen
CRISRefCOCO15.5313.75
RefCOCO+16.3014.62
RefCOCOg16.2413.88
LAVTRefCOCO16.6814.43
RefCOCO+16.6413.49
RefCOCOg16.0513.48
OursN/A23.6422.98
" + }, + { + "type": "image", + "bbox": [ + 0.298, + 0.463, + 0.457, + 0.558 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.077, + 0.562, + 0.472, + 0.603 + ], + "angle": 0, + "content": "Figure 4. Comparisons to supervised methods in zero-shot setting on PhraseCut (left), and in few-shot setting on RefCOCOg (right). Unseen denotes a subset with classes that are not seen in RefCOCO." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.626, + 0.472, + 0.776 + ], + "angle": 0, + "content": "the ground-truth of the referring image segmentation task. RefCOCO, RefCOCO+ and RefCOCOg have 19,994, 19,992 and 26,711 images with 142,210, 141,564 and 104,560 referring expressions, respectively. RefCOCO and RefCOCO+ have shorter expressions and an average of 1.6 nouns and 3.6 words are included in one expression, while RefCOCOg expresses more complex relations with longer sentences and has an average of about 2.8 nouns and 8.4 words. The detailed statistics of those datasets are demonstrated in our supplementary materials." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.78, + 0.472, + 0.903 + ], + "angle": 0, + "content": "For the evaluation metrics, we use the overall Intersection over Union (oIoU) and the mean Intersection over Union (mIoU) which are the common metrics for the referring image segmentation task. The oIoU is measured by the total area of intersection divided by the total area of union, where the total area is computed by accumulating over all examples. In our ablation study, we use oIoUs since most of supervised RIS methods [6, 23] adopt it. We also report the mIoUs as" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.463, + 0.892, + 0.49 + ], + "angle": 0, + "content": "Table 2. oIoU results of our method and the baselines using COCO instance GT masks. We use a ViT-B/32 model for a visual encoder." + }, + { + "type": "table", + "bbox": [ + 0.529, + 0.502, + 0.861, + 0.58 + ], + "angle": 0, + "content": "
MethodRefCOCORefCOCO+RefCOCOg
Grad-CAM18.3218.1421.24
Score map23.9725.5028.11
Region token35.5938.1340.19
Cropping36.3242.0747.42
Ours37.0542.5951.01
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.592, + 0.892, + 0.621 + ], + "angle": 0, + "content": "Table 3. oIoU results with different context-level features on the val split of RefCOCOg. We use a ViT-B/32 model for a visual encoder." + }, + { + "type": "table", + "bbox": [ + 0.521, + 0.631, + 0.871, + 0.697 + ], + "angle": 0, + "content": "
Encoder VariantsTextual features
GlobalLocalGlobal-Local
Visual \nfeaturesGlobal27.0327.3727.60
Local28.6925.2329.48
Global-Local30.1827.9431.11
" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.72, + 0.891, + 0.751 + ], + "angle": 0, + "content": "in [48], which computes the average IoU across all examples while considering the object sizes." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.76, + 0.608, + 0.774 + ], + "angle": 0, + "content": "5.2. Baselines" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.783, + 0.895, + 0.843 + ], + "angle": 0, + "content": "We modify some baseline methods extracting dense predictions from CLIP into zero-shot RIS task to compare with our framework, and use FreeSOLO [53] as a mask generator in all baselines." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.856, + 0.895, + 0.901 + ], + "angle": 0, + "content": "- Grad-CAM: The first baseline is utilizing gradient-based activation map based on Grad-CAM [46] which has been verified in the prior work [17]. After obtaining the activa" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "19461" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.141, + 0.089, + 0.176, + 0.099 + ], + "angle": 0, + "content": "Image" + }, + { + "type": "image", + "bbox": [ + 0.11, + 0.101, + 0.21, + 0.177 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.216, + 0.126, + 0.279, + 0.137 + ], + "angle": 0, + "content": "Expression:" + }, + { + "type": "image_caption", + "bbox": [ + 0.23, + 0.138, + 0.437, + 0.148 + ], + "angle": 0, + "content": "the banana the person is holding" + }, + { + "type": "image_caption", + "bbox": [ + 0.141, + 0.179, + 0.159, + 0.187 + ], + "angle": 0, + "content": "GT" + }, + { + "type": "image", + "bbox": [ + 0.111, + 0.189, + 0.185, + 0.246 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.201, + 0.179, + 0.263, + 0.188 + ], + "angle": 0, + "content": "Local visual" + }, + { + "type": "image", + "bbox": [ + 0.194, + 0.189, + 0.267, + 0.245 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.281, + 0.179, + 0.351, + 0.188 + ], + "angle": 0, + "content": "Global visual" + }, + { + "type": "image", + "bbox": [ + 0.279, + 0.189, + 0.353, + 0.245 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.368, + 0.178, + 0.434, + 0.188 + ], + "angle": 0, + "content": "Global-Local" + }, + { + "type": "image", + "bbox": [ + 0.365, + 0.189, + 0.438, + 0.245 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.141, + 0.25, + 0.176, + 0.26 + ], + "angle": 0, + "content": "Image" + }, + { + "type": "image", + "bbox": [ + 0.11, + 0.261, + 0.21, + 0.338 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.217, + 0.281, + 0.279, + 0.291 + ], + "angle": 0, + "content": "Expression:" + }, + { + "type": "image_caption", + "bbox": [ + 0.23, + 0.292, + 0.429, + 0.312 + ], + "angle": 0, + "content": "a green bicycle ridden by a man in a black windbreaker" + }, + { + "type": "image_caption", + "bbox": [ + 0.141, + 0.34, + 0.159, + 0.348 + ], + "angle": 0, + "content": "GT" + }, + { + "type": "image", + "bbox": [ + 0.11, + 0.351, + 0.183, + 0.406 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.2, + 0.34, + 0.263, + 0.349 + ], + "angle": 0, + "content": "Local visual" + }, + { + "type": "image", + "bbox": [ + 0.196, + 0.351, + 0.268, + 0.406 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.282, + 0.34, + 0.351, + 0.349 + ], + "angle": 0, + "content": "Global visual" + }, + { + "type": "image", + "bbox": [ + 0.28, + 0.351, + 0.353, + 0.406 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.368, + 0.339, + 0.435, + 0.349 + ], + "angle": 0, + "content": "Global-Local" + }, + { + "type": "image", + "bbox": [ + 0.365, + 0.351, + 0.438, + 0.406 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.418, + 0.471, + 0.46 + ], + "angle": 0, + "content": "Figure 5. Qualitative results with different levels of visual features. COCO instance GT masks are used as mask proposals to validate the effect of the global-local context visual features." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.488, + 0.471, + 0.533 + ], + "angle": 0, + "content": "tion maps using the similarity score of image-text pairs, we mask the maps and aggregate scores for all mask proposals, and select the mask with the highest score." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.545, + 0.473, + 0.726 + ], + "angle": 0, + "content": "- **Score Map:** The second baseline is the method extracting a dense score map as in MaskCLIP [63]. As in MaskCLIP, to obtain dense score maps without pooling, a value linear layer and the last layer in the attention pooling are transformed into two consecutive \\(1 \\times 1\\) convolution layers. The feature map extracted from ResNet is forwarded to those two layers to get language-compatible dense image feature map, and then compute a cosine similarity with CLIP's textual feature. After obtaining a score map, we project mask proposals to a score map. The scores in the mask area are averaged and then we select the mask with the maximum score." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.738, + 0.472, + 0.844 + ], + "angle": 0, + "content": "- Region Token in ViT: The third baseline is a method used in Adapting CLIP [25]. Similar to Adapting CLIP, we use region tokens for each mask proposal for all Transformer layers in CLIP's visual encoder instead of using superpixels. We finally compute the cosine similarity between each class token of a mask proposal and CLIP's textual feature, and then choose the mask with the highest score." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.856, + 0.471, + 0.902 + ], + "angle": 0, + "content": "- Cropping: The last baseline is our local-context visual features described in Section 3.2. Cropping and masking is a commonly used approach utilizing CLIP for extracting" + }, + { + "type": "list", + "bbox": [ + 0.078, + 0.545, + 0.473, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.527, + 0.093, + 0.563, + 0.102 + ], + "angle": 0, + "content": "Image" + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.105, + 0.58, + 0.161 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.612, + 0.092, + 0.631, + 0.102 + ], + "angle": 0, + "content": "GT" + }, + { + "type": "image", + "bbox": [ + 0.582, + 0.104, + 0.659, + 0.161 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.668, + 0.092, + 0.724, + 0.102 + ], + "angle": 0, + "content": "Local text" + }, + { + "type": "image", + "bbox": [ + 0.66, + 0.104, + 0.736, + 0.161 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.744, + 0.092, + 0.805, + 0.102 + ], + "angle": 0, + "content": "Global text" + }, + { + "type": "image", + "bbox": [ + 0.738, + 0.105, + 0.814, + 0.161 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.818, + 0.092, + 0.887, + 0.102 + ], + "angle": 0, + "content": "Global-Local" + }, + { + "type": "image", + "bbox": [ + 0.817, + 0.105, + 0.892, + 0.161 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.505, + 0.163, + 0.57, + 0.173 + ], + "angle": 0, + "content": "Expression:" + }, + { + "type": "image", + "bbox": [ + 0.572, + 0.163, + 0.585, + 0.173 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.587, + 0.163, + 0.706, + 0.174 + ], + "angle": 0, + "content": "guy in wheelchair" + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.178, + 0.58, + 0.236 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.583, + 0.178, + 0.658, + 0.236 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.66, + 0.178, + 0.735, + 0.236 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.738, + 0.178, + 0.814, + 0.236 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.816, + 0.178, + 0.892, + 0.236 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.505, + 0.238, + 0.57, + 0.247 + ], + "angle": 0, + "content": "Expression:" + }, + { + "type": "image_caption", + "bbox": [ + 0.572, + 0.238, + 0.624, + 0.248 + ], + "angle": 0, + "content": "a woman" + }, + { + "type": "image_caption", + "bbox": [ + 0.576, + 0.249, + 0.632, + 0.258 + ], + "angle": 0, + "content": "umbrella" + }, + { + "type": "image_caption", + "bbox": [ + 0.5, + 0.27, + 0.892, + 0.299 + ], + "angle": 0, + "content": "Figure 6. Qualitative results with different levels of textual features using COCO Instance GT mask proposals." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.324, + 0.892, + 0.369 + ], + "angle": 0, + "content": "mask or box region feature in a range of zero-shot dense prediction tasks [7, 9, 13, 49, 59]. Therefore, we consider cropping as one of the zero-shot RIS baselines." + }, + { + "type": "title", + "bbox": [ + 0.501, + 0.388, + 0.593, + 0.403 + ], + "angle": 0, + "content": "5.3. Results" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.411, + 0.895, + 0.654 + ], + "angle": 0, + "content": "Main Results. We report referring image segmentation performances of our global-local CLIP and other baselines on RefCOCO, RefCOCO+ and RefCOCOg in terms of IoU and mIoU metrics in Table 1. For a fair comparison, all methods including baselines use FreeSOLO [53] mask proposals to produce the final output mask. The experimental results show that our method outperforms other baseline methods with substantial margins. Our method also surpasses the weakly supervised referring image segmentation method (TSEG) [48] in terms of mIoU1. We also show upper-bound performances of using FreeSOLO, where the scores are computed by the IoU between ground-truth masks and its max-overlapped mask proposal. Although there is still a gap compared to the fully-supervised referring image segmentation methods, our method improves performance significantly compared to the baselines with the same upper-bound." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.671, + 0.895, + 0.867 + ], + "angle": 0, + "content": "Zero-shot Evaluation on Unseen Domain. To verify the effectiveness of our method in a more practical setting, we report the zero-shot evaluation results with SoTA supervised methods [54, 60] on the test split of PhraseCut [56] in Figure 4 (left). Note that, RefCOCO contains expressions for only 80 salient object classes, whereas PhraseCut covers a variety of additional visual concepts i.e. 1272 categories in the test set. Our method outperforms both supervised methods, even though our models were never trained under RIS supervision. When evaluated on a subset of classes that are not seen in the RefCOCO datasets (Unseen column), the supervised methods show significant performance degradation, whereas our method works robustly on this subset." + }, + { + "type": "page_footnote", + "bbox": [ + 0.501, + 0.875, + 0.892, + 0.901 + ], + "angle": 0, + "content": "1We only compare mIoU scores with TSEG since it reports only mIoU scores in the paper." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "19462" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.101, + 0.089, + 0.874, + 0.278 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.079, + 0.288, + 0.89, + 0.302 + ], + "angle": 0, + "content": "Figure 7. Qualitative results of our method with the several baselines. Note that all methods use mask proposals generated by FreeSOLO." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.329, + 0.473, + 0.554 + ], + "angle": 0, + "content": "Comparison to supervised methods in few-shot Setting. We also compare our model to two supervised RIS methods [54, 60] in a few-shot learning setting, where the training set includes \\( k \\) instances for each of 80 classes in RefCOCO\\(^2\\). Note that the supervised methods use additional forms of supervision in training, whereas our method does not require any form of training or additional supervision; thus this setting is even disadvantageous to our method. Figure 4 (right) shows oIoU while varying \\( k \\) on RefCOCOg. The results clearly show that our method outperforms both supervised methods with large margins when \\( k \\) is small, and the gaps narrow as \\( k \\) gets larger (64 and 256 for LAVT [60] and CRIS [54], respectively). Note that it covers about \\( 10\\% \\) of the training set when \\( k = 64 \\) and the same trends hold for both RefCOCO and RefCOCO+." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.568, + 0.231, + 0.584 + ], + "angle": 0, + "content": "5.4. Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.593, + 0.472, + 0.76 + ], + "angle": 0, + "content": "Effects of Mask Quality. To show the impact of the proposed method without considering the mask quality of the mask generators, we evaluate the performance of our method and the baselines with COCO instance GT masks in Table 2. Our approach has demonstrated superior performance compared to all baselines and has shown a performance improvement of over \\(3.5\\%\\), particularly on RefCOCOg which includes longer expressions. We believe that our method performs well on challenging examples that involve complex expressions, such as those with multiple clauses, which require an understanding of both the language and the scene." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.785, + 0.47, + 0.862 + ], + "angle": 0, + "content": "Effects of Global-Local Context Features. We also study the effects of global-local context features in both visual and textual modalities and show the results in Table 3. For this analysis, we use RefCOCOg as it contains more complex expressions with multiple clauses. Among all combinations" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.329, + 0.894, + 0.358 + ], + "angle": 0, + "content": "of two modalities, using both global-local context features in the visual and textual domains leads to the best performance." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.381, + 0.895, + 0.638 + ], + "angle": 0, + "content": "Qualitative Analysis. We demonstrate several results that support the effectiveness of our global-local context visual features in Figure 5. To show this effect more clearly, we use COCO instance GT masks as mask proposals. When using only local-context visual features, the predicted mask tends to focus on the instance that shares the same class as the target object. However, when using only global-context visual features, the predicted mask tends to capture the context of the expression but may focus on a different object class. By combining global and local context, our method successfully finds the target mask. We also demonstrate the effectiveness of our global-local context textual features in Figure 6. Furthermore, we compare the qualitative results of our method with baseline methods in Figure 7. Our proposed global-local CLIP outperforms the baseline methods in identifying the target object by taking into account the global context of the image and expression." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.654, + 0.62, + 0.67 + ], + "angle": 0, + "content": "6. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.679, + 0.895, + 0.818 + ], + "angle": 0, + "content": "In this paper, we propose a simple yet effective zero-shot referring image segmentation framework focusing on transferring knowledges from image-text cross-modal representations of CLIP. To tackle the difficulty of the referring image segmentation task, we propose global-local context encodings to compute similarities between images and expressions, where both target object semantics and relations between the objects are dealt in a unified framework. The proposed method significantly outperforms all baseline methods and weakly supervised method as well." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.831, + 0.894, + 0.901 + ], + "angle": 0, + "content": "Acknowledgement. This work was supported by the IITP grants (No.2019-0-01842, No.2021-0-02068, No.2022-0-00926) funded by MSIT, the ISTD program (No.20018334) funded by MOTIE, and the GIST-MIT Research Collaboration grant funded by GIST, Korea." + }, + { + "type": "page_footnote", + "bbox": [ + 0.078, + 0.875, + 0.469, + 0.901 + ], + "angle": 0, + "content": "2we use object classes in RefCOCO GT annotation. This is to cover all salient objects in the dataset during the few-shot training." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "19463" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.116, + 0.47, + 0.157 + ], + "angle": 0, + "content": "[1] Alberto Baldrati, Marco Bertini, Tiberio Uricchio, and Alberto Del Bimbo. Effective conditioned and composed image retrieval combining clip-based features. In CVPR, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.159, + 0.469, + 0.2 + ], + "angle": 0, + "content": "[2] Bo Chen, Zhiwei Hu, Zhilong Ji, Jinfeng Bai, and Wangmeng Zuo. Position-aware contrastive alignment for referring image segmentation. arXiv preprint arXiv:2212.13419, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.089, + 0.202, + 0.471, + 0.256 + ], + "angle": 0, + "content": "[3] Shiming Chen, Ziming Hong, Yang Liu, Guo-Sen Xie, Baigui Sun, Hao Li, Qinmu Peng, Ke Lu, and Xinge You. Transzero: Attribute-guided transformer for zero-shot learning. In AAAI, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.259, + 0.471, + 0.313 + ], + "angle": 0, + "content": "[4] Yen-Chun Chen, Linjie Li, Licheng Yu, Ahmed El Kholy, Faisal Ahmed, Zhe Gan, Yu Cheng, and Jingjing Liu. Uniter: Universal image-text representation learning. In ECCV, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.316, + 0.471, + 0.357 + ], + "angle": 0, + "content": "[5] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. In ACL, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.36, + 0.471, + 0.4 + ], + "angle": 0, + "content": "[6] Henghui Ding, Chang Liu, Suchen Wang, and Xudong Jiang. Vision-language transformer and query generation for referring segmentation. In ICCV, 2021. 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.402, + 0.471, + 0.442 + ], + "angle": 0, + "content": "[7] Jian Ding, Nan Xue, Gui-Song Xia, and Dengxin Dai. Decoupling zero-shot semantic segmentation. In CVPR, 2022. 1, 2, 5, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.446, + 0.471, + 0.527 + ], + "angle": 0, + "content": "[8] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. In ICLR, 2020. 4, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.53, + 0.471, + 0.584 + ], + "angle": 0, + "content": "[9] Yu Du, Fangyun Wei, Zihe Zhang, Miaojing Shi, Yue Gao, and Guoqi Li. Learning to prompt for open-vocabulary object detection with vision-language model. In CVPR, 2022. 1, 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.587, + 0.471, + 0.641 + ], + "angle": 0, + "content": "[10] Chengjian Feng, Yujie Zhong, Zequn Jie, Xiangxiang Chu, Haibing Ren, Xiaolin Wei, Weidi Xie, and Lin Ma. Promptdet: Expand your detector vocabulary with uncurated images. In ECCV, 2022. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.644, + 0.471, + 0.685 + ], + "angle": 0, + "content": "[11] Guang Feng, Zhiwei Hu, Lihe Zhang, and Huchuan Lu. Encoder fusion network with co-attention embedding for referring image segmentation. In CVPR, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.687, + 0.471, + 0.728 + ], + "angle": 0, + "content": "[12] Golnaz Ghiasi, Xiuye Gu, Yin Cui, and Tsung-Yi Lin. Scaling open-vocabulary image segmentation with image-level labels. In ECCV, 2022. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.731, + 0.471, + 0.771 + ], + "angle": 0, + "content": "[13] Xiuye Gu, Tsung-Yi Lin, Weicheng Kuo, and Yin Cui. Open-vocabulary object detection via vision and language knowledge distillation. In ICML, 2022. 1, 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.774, + 0.471, + 0.813 + ], + "angle": 0, + "content": "[14] Zongyan Han, Zhenyong Fu, Shuo Chen, and Jian Yang. Contrastive embedding for generalized zero-shot learning. In CVPR, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.816, + 0.471, + 0.856 + ], + "angle": 0, + "content": "[15] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In CVPR, 2016. 4, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.859, + 0.471, + 0.9 + ], + "angle": 0, + "content": "[16] Matthew Honnibal and Mark Johnson. An improved non-monotonic transition system for dependency parsing. In EMNLP, 2015. 3, 5" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.116, + 0.471, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.147 + ], + "angle": 0, + "content": "[17] Hsuan-An Hsia, Che-Hsien Lin, Bo-Han Kung, Jhao-Ting Chen, Daniel Stanley Tan, Jun-Cheng Chen, and Kai-Lung Hua. Clipcam: A simple baseline for zero-shot text-guided object and action localization. In ICASSP, 2022. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.149, + 0.894, + 0.188 + ], + "angle": 0, + "content": "[18] Ronghang Hu, Marcus Rohrbach, and Trevor Darrell. Segmentation from natural language expressions. In ECCV, 2016. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.19, + 0.892, + 0.245 + ], + "angle": 0, + "content": "[19] Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. In ICML, 2021. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.247, + 0.892, + 0.287 + ], + "angle": 0, + "content": "[20] Ya Jing, Tao Kong, Wei Wang, Liang Wang, Lei Li, and Tieniu Tan. Locate then segment: A strong pipeline for referring image segmentation. In CVPR, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.289, + 0.892, + 0.328 + ], + "angle": 0, + "content": "[21] Sahar Kazemzadeh, Vicente Ordonez, Mark Matten, and Tamara Berg. Referitagame: Referring to objects in photographs of natural scenes. In EMNLP, 2014. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.33, + 0.892, + 0.371 + ], + "angle": 0, + "content": "[22] Kwanyoung Kim, Yujin Oh, and Jong Chul Ye. Zegot: Zero-shot segmentation through optimal transport of text prompts. arXiv preprint arXiv:2301.12171, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.372, + 0.892, + 0.413 + ], + "angle": 0, + "content": "[23] Namyup Kim, Dongwon Kim, Cuiling Lan, Wenjun Zeng, and Suha Kwak. Restr: Convolution-free referring image segmentation using transformers. In CVPR, 2022. 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.415, + 0.892, + 0.468 + ], + "angle": 0, + "content": "[24] Weicheng Kuo, Yin Cui, Xiuye Gu, AJ Piergiovanni, and Anelia Angelova. F-vlm: Open-vocabulary object detection upon frozen vision and language models. arXiv preprint arXiv:2209.15639, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.47, + 0.892, + 0.51 + ], + "angle": 0, + "content": "[25] Jiahao Li, Greg Shakhnarovich, and Raymond A Yeh. Adapting clip for phrase localization without further training. arXiv preprint arXiv:2204.03647, 2022. 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.512, + 0.892, + 0.564 + ], + "angle": 0, + "content": "[26] Ruiyu Li, Kaican Li, Yi-Chun Kuo, Michelle Shu, Xiaojuan Qi, Xiaoyong Shen, and Jiaya Jia. Referring image segmentation via recurrent refinement networks. In CVPR, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.567, + 0.892, + 0.622 + ], + "angle": 0, + "content": "[27] Wei Li, Can Gao, Guocheng Niu, Xinyan Xiao, Hao Liu, Jiachen Liu, Hua Wu, and Haifeng Wang. Unimo: Towards unified-modal understanding and generation via cross-modal contrastive learning. In ACL, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.624, + 0.892, + 0.678 + ], + "angle": 0, + "content": "[28] Xiujun Li, Xi Yin, Chunyuan Li, Pengchuan Zhang, Xiaowei Hu, Lei Zhang, Lijuan Wang, Houdong Hu, Li Dong, Furu Wei, et al. Oscar: Object-semantics aligned pre-training for vision-language tasks. In ECCV, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.679, + 0.892, + 0.746 + ], + "angle": 0, + "content": "[29] Feng Liang, Bichen Wu, Xiaoliang Dai, Kunpeng Li, Yinan Zhao, Hang Zhang, Peizhao Zhang, Peter Vajda, and Diana Marculescu. Open-vocabulary semantic segmentation with mask-adapted clip. arXiv preprint arXiv:2210.04150, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.748, + 0.892, + 0.803 + ], + "angle": 0, + "content": "[30] Chuang Lin, Peize Sun, Yi Jiang, Ping Luo, Lizhen Qu, Gholamreza Haffari, Zehuan Yuan, and Jianfei Cai. Learning object-language alignments for open-vocabulary object detection. arXiv preprint arXiv:2211.14843, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.805, + 0.892, + 0.858 + ], + "angle": 0, + "content": "[31] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dálár, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In ECCV, 2014. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.86, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[32] Chenxi Liu, Zhe Lin, Xiaohui Shen, Jimei Yang, Xin Lu, and Alan Yuille. Recurrent multimodal interaction for referring image segmentation. In ICCV, 2017. 3" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "19464" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.472, + 0.147 + ], + "angle": 0, + "content": "[33] Jiang Liu, Hui Ding, Zhaowei Cai, Yuting Zhang, Ravi Kumar Satzoda, Vijay Mahadevan, and R Manmatha. Polyformer: Referring image segmentation as sequential polygon generation. arXiv preprint arXiv:2302.07387, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.151, + 0.47, + 0.192 + ], + "angle": 0, + "content": "[34] Lu Liu, Tianyi Zhou, Guodong Long, Jing Jiang, Xuanyi Dong, and Chengqi Zhang. Isometric propagation network for generalized zero-shot learning. In ICLR, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.194, + 0.471, + 0.234 + ], + "angle": 0, + "content": "[35] Jonathan Long, Evan Shelhamer, and Trevor Darrell. Fully convolutional networks for semantic segmentation. In CVPR, 2015. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.238, + 0.471, + 0.279 + ], + "angle": 0, + "content": "[36] Jiasen Lu, Dhruv Batra, Devi Parikh, and Stefan Lee. Vilbert: Pretraining task-agnostic visiolinguistic representations for vision-and-language tasks. In NeurlPS, 2019. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.282, + 0.47, + 0.336 + ], + "angle": 0, + "content": "[37] Huaishao Luo, Junwei Bao, Youzheng Wu, Xiaodong He, and Tianrui Li. Segclip: Patch aggregation with learnable centers for open-vocabulary semantic segmentation. arXiv preprint arXiv:2211.14813, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.34, + 0.471, + 0.394 + ], + "angle": 0, + "content": "[38] Junhua Mao, Jonathan Huang, Alexander Toshev, Oana Camburu, Alan L Yuille, and Kevin Murphy. Generation and comprehension of unambiguous object descriptions. In CVPR, 2016. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.398, + 0.471, + 0.439 + ], + "angle": 0, + "content": "[39] Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg S Corrado, and Jeff Dean. Distributed representations of words and phrases and their compositionality. In NeurIPS, 2013. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.442, + 0.471, + 0.482 + ], + "angle": 0, + "content": "[40] Ron Mokady, Amir Hertz, and Amit H Bermano. Clipcap: Clip prefix for image captioning. arXiv preprint arXiv:2111.09734, 2021. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.485, + 0.471, + 0.526 + ], + "angle": 0, + "content": "[41] Varun K Nagaraja, Vlad I Morariu, and Larry S Davis. Modeling context between objects for referring expression understanding. In ECCV, 2016. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.529, + 0.471, + 0.583 + ], + "angle": 0, + "content": "[42] Prashant Pandey, Mustafa Chasmai, Monish Natarajan, and Brejesh Lall. A language-guided benchmark for weakly supervised open vocabulary semantic segmentation. arXiv preprint arXiv:2302.14163, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.587, + 0.471, + 0.655 + ], + "angle": 0, + "content": "[43] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, 2021. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.659, + 0.471, + 0.713 + ], + "angle": 0, + "content": "[44] Yongming Rao, Wenliang Zhao, Guangyi Chen, Yansong Tang, Zheng Zhu, Guan Huang, Jie Zhou, and Jiwen Lu. Denseclip: Language-guided dense prediction with context-aware prompting. In CVPR, 2022. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.716, + 0.471, + 0.784 + ], + "angle": 0, + "content": "[45] Hanoona Abdul Rasheed, Muhammad Maaz, Muhammad Uzair Khattak, Salman Khan, and Fahad Khan. Bridging the gap between object and image-level representations for open-vocabulary detection. In NeurIPS, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.788, + 0.471, + 0.842 + ], + "angle": 0, + "content": "[46] Ramprasaath R Selvaraju, Michael Cogswell, Abhishek Das, Ramakrishna Vedantam, Devi Parikh, and Dhruv Batra. Gradcam: Visual explanations from deep networks via gradient-based localization. In ICCV, 2017. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.846, + 0.471, + 0.899 + ], + "angle": 0, + "content": "[47] Sheng Shen, Liunian Harold Li, Hao Tan, Mohit Bansal, Anna Rohrbach, Kai-Wei Chang, Zhewei Yao, and Kurt Keutzer. How much can clip benefit vision-and-language tasks? In ICLR, 2021. 1" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.472, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.134 + ], + "angle": 0, + "content": "[48] Robin Strudel, Ivan Laptev, and Cordelia Schmid. Weakly-supervised segmentation of referring expressions. arXiv preprint arXiv:2205.04725, 2022. 1, 3, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.136, + 0.894, + 0.189 + ], + "angle": 0, + "content": "[49] Sanjay Subramanian, William Merrill, Trevor Darrell, Matt Gardner, Sameer Singh, and Anna Rohrbach. Reclip: A strong zero-shot baseline for referring expression comprehension. In ACL, 2022. 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.192, + 0.894, + 0.232 + ], + "angle": 0, + "content": "[50] Hao Tan and Mohit Bansal. Lxmert: Learning cross-modality encoder representations from transformers. In EMNLP, 2019. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.234, + 0.894, + 0.274 + ], + "angle": 0, + "content": "[51] Mengmeng Wang, Jiazheng Xing, and Yong Liu. Actionclip: A new paradigm for video action recognition. arXiv preprint arXiv:2109.08472, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.277, + 0.894, + 0.317 + ], + "angle": 0, + "content": "[52] Xudong Wang, Rohit Girdhar, Stella X Yu, and Ishan Misra. Cut and learn for unsupervised object detection and instance segmentation. arXiv preprint arXiv:2301.11320, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.319, + 0.894, + 0.372 + ], + "angle": 0, + "content": "[53] Xinlong Wang, Zhiding Yu, Shalini De Mello, Jan Kautz, Anima Anandkumar, Chunhua Shen, and Jose M Alvarez. Freesolo: Learning to segment objects without annotations. In CVPR, 2022. 3, 4, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.375, + 0.893, + 0.415 + ], + "angle": 0, + "content": "[54] Zhaoqing Wang, Yu Lu, Qiang Li, Xunqiang Tao, Yandong Guo, Mingming Gong, and Tongliang Liu. Cris: Clip-driven referring image segmentation. In CVPR, 2022. 1, 3, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.417, + 0.894, + 0.469 + ], + "angle": 0, + "content": "[55] Zhichao Wei, Xiaohao Chen, Mingqiang Chen, and Siyu Zhu. Learning aligned cross-modal representations for referring image segmentation. arXiv preprint arXiv:2301.06429, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.473, + 0.894, + 0.513 + ], + "angle": 0, + "content": "[56] Chenyun Wu, Zhe Lin, Scott Cohen, Trung Bui, and Subhransu Maji. Phrasescut: Language-based image segmentation in the wild. In CVPR, 2020. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.516, + 0.893, + 0.556 + ], + "angle": 0, + "content": "[57] Jianzong Wu, Xiangtai Li, Xia Li, Henghui Ding, Yunhai Tong, and Dacheng Tao. Towards robust referring image segmentation. arXiv preprint arXiv:2209.09554, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.558, + 0.893, + 0.599 + ], + "angle": 0, + "content": "[58] Mengde Xu, Zheng Zhang, Fangyun Wei, Han Hu, and Xiang Bai. Side adapter network for open-vocabulary semantic segmentation. arXiv preprint arXiv:2302.12242, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.601, + 0.894, + 0.655 + ], + "angle": 0, + "content": "[59] Mengde Xu, Zheng Zhang, Fangyun Wei, Yutong Lin, Yue Cao, Han Hu, and Xiang Bai. A simple baseline for open-vocabulary semantic segmentation with pre-trained vision-language model. In ECCV, 2022. 1, 2, 5, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.657, + 0.894, + 0.71 + ], + "angle": 0, + "content": "[60] Zhao Yang, Jiaqi Wang, Yansong Tang, Kai Chen, Hengshuang Zhao, and Philip HS Torr. Lavt: Language-aware vision transformer for referring image segmentation. In CVPR, 2022. 3, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.713, + 0.894, + 0.752 + ], + "angle": 0, + "content": "[61] Linwei Ye, Mrigank Rochan, Zhi Liu, and Yang Wang. Cross-modal self-attention network for referring image segmentation. In CVPR, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.755, + 0.893, + 0.808 + ], + "angle": 0, + "content": "[62] Zicheng Zhang, Yi Zhu, Jianzhuang Liu, Xiaodan Liang, and Wei Ke. Coupalign: Coupling word-pixel with sentence-mask alignments for referring image segmentation. In NeurIPS, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.811, + 0.893, + 0.838 + ], + "angle": 0, + "content": "[63] Chong Zhou, Chen Change Loy, and Bo Dai. Extract free dense labels from clip. In ECCV, 2022. 1, 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.84, + 0.894, + 0.892 + ], + "angle": 0, + "content": "[64] Ziqin Zhou, Bowen Zhang, Yinjie Lei, Lingqiao Liu, and Yifan Liu. Zegclip: Towards adapting clip for zero-shot semantic segmentation. arXiv preprint arXiv:2212.03588, 2022.2" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.892 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.946, + 0.519, + 0.957 + ], + "angle": 0, + "content": "19465" + } + ] +] \ No newline at end of file diff --git a/2023/Zero-Shot Referring Image Segmentation With Global-Local Context Features/d944ca48-5a24-4209-88df-55a9c1e47851_origin.pdf b/2023/Zero-Shot Referring Image Segmentation With Global-Local Context Features/d944ca48-5a24-4209-88df-55a9c1e47851_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..188bcb1a662fed4a7143b55eae8a0ff731a42991 --- /dev/null +++ b/2023/Zero-Shot Referring Image Segmentation With Global-Local Context Features/d944ca48-5a24-4209-88df-55a9c1e47851_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c8cebeff8a9395e0e4a8ef918c59d2be6987ba0d9f8a98c2fe2c4674c34f4aad +size 10260921 diff --git a/2023/Zero-Shot Referring Image Segmentation With Global-Local Context Features/full.md b/2023/Zero-Shot Referring Image Segmentation With Global-Local Context Features/full.md new file mode 100644 index 0000000000000000000000000000000000000000..7d28c5997507931d875737740a27351469e1279a --- /dev/null +++ b/2023/Zero-Shot Referring Image Segmentation With Global-Local Context Features/full.md @@ -0,0 +1,406 @@ +# Zero-shot Referring Image Segmentation with Global-Local Context Features + +Seonghoon $\mathrm{Yu}^{1}$ + +Paul Hongsuck Seo2 + +Jeany Son1 + +$^{1}$ AI Graduate School, GIST + +2Google Research + +seonghoon@gm.gist.ac.kr + +phseo@google.com + +jeany@gist.ac.kr + +# Abstract + +Referring image segmentation (RIS) aims to find a segmentation mask given a referring expression grounded to a region of the input image. Collecting labelled datasets for this task, however, is notoriously costly and labor-intensive. To overcome this issue, we propose a simple yet effective zero-shot referring image segmentation method by leveraging the pre-trained cross-modal knowledge from CLIP. In order to obtain segmentation masks grounded to the input text, we propose a mask-guided visual encoder that captures global and local contextual information of an input image. By utilizing instance masks obtained from off-the-shelf mask proposal techniques, our method is able to segment fine-detailed instance-level groundings. We also introduce a global-local text encoder where the global feature captures complex sentence-level semantics of the entire input expression while the local feature focuses on the target noun phrase extracted by a dependency parser. In our experiments, the proposed method outperforms several zero-shot baselines of the task and even the weakly supervised referring expression segmentation method with substantial margins. Our code is available at https://github.com/Seonghoon-Yu/Zero-shot-RIS. + +# 1. Introduction + +Recent advances of deep learning has revolutionised computer vision and natural language processing, and addressed various tasks in the field of vision-and-language [4, 19, 27, 28, 36, 43, 50]. A key element in the recent success of the multi-modal models such as CLIP [43] is the contrastive image-text pre-training on a large set of image and text pairs. It has shown a remarkable zero-shot transferability on a wide range of tasks, such as object detection [9, 10, 13], semantic segmentation [7, 12, 59, 63], image captioning [40], visual question answering (VQA) [47] and so on. + +Despite its good transferability of pre-trained multi-modal models, it is not straightforward to handle dense prediction tasks such as object detection and image segmentation. A pixel-level dense prediction task is challenging since there + +(a) Referring image segmentation task + +Input image +Input text +"the bottom cat" +Local-context +![](images/1566abd5ebc5a9381de29c7ad4240a71b1407982ee1c649987e6d2bd794fe85f.jpg) +"a cat is lying on the seat of the scooter" + +Output mask +Global-Local context +![](images/845f9db0e94f532cc292e82afcc911a6a0642a2ee247df769d8d528e523bc19a.jpg) +"a scooter with two cats sitting on" + +(b) Global-Local context in RIS + +![](images/a29450ea7923b69ad64fef1514291153faf7e39ea55008fce3ecdcef6ba549b8.jpg) +a cat is lying on +the seat of the scod + +![](images/178bc5b70cd7d3de40222d1dfbb2bf377146b8f9c8606ad03ed9f872450cbae3.jpg) + +![](images/1ee188e3d920ecfb3a8b26ae7b9c96712b7d816fc6ce10f5b982ef6191de3586.jpg) + +![](images/d399ed7c9d810758449eb86e4fbbc0063b1a527981df09b0b9af26650ba47630.jpg) +Global-context +a cat is lying on +the seat of the scooter + +![](images/23985f799d7872410bfd8292adda80207ee84a87eda2daa6213395e3b0dd9f7b.jpg) +a cat is lying on +the seat of the scooter +Figure 1. Illustrations of the task of referring image segmentation and motivations of global-local context features. To find the grounded mask given an expression, we need to understand the relations between the objects as well as their semantics. + +is a substantial gap between the image-level contrastive pretraining task and the pixel-level downstream task such as semantic segmentation. There have been several attempts to reduce gap between two tasks [44, 54, 63], but these works aim to fine-tune the model consequently requiring task-specific dense annotations, which is notoriously labor-intensive and costly. + +Referring image segmentation is a task to find the specific region in an image given a natural language text describing the region, and it is well-known as one of challenging vision- and language tasks. Collecting annotations for this task is even more challenging as the task requires to collect precise referring expression of the target region as well as its dense mask annotation. Recently, a weakly-supervised referring image segmentation method [48] is proposed to overcome this issue. However, it still requires high-level text expression annotations pairing with images for the target datasets and the performance of the method is far from that of the supervised methods. To tackle this issue, in this paper, we focus on zero-shot transferring from the pre-trained knowledge + +of CLIP to the task of referring image segmentation. + +Moreover, this task is challenging because it requires high-level understanding of language and comprehensive understanding of an image, as well as a dense instance-level prediction. There have been several works for zero-shot semantic segmentation [7, 12, 59, 63], but they cannot be directly extended to the zero-shot referring image segmentation task because it has different characteristics. Specifically, the semantic segmentation task does not need to distinguish instances, but the referring image segmentation task should be able to predict an instance-level segmentation mask. In addition, among multiple instances of the same class, only one instance described by the expression must be selected. For example, in Figure 1, there are two cats in the input image. If the input text is given by "a cat is lying on the seat of the scooter", the cat with the green mask is the proper output. To find this correct mask, we need to understand the relation between the objects (i.e. "lying on the seat") as well as their semantics (i.e. "cat", "scooter"). + +In this paper, we propose a new baseline of zero-shot referring image segmentation task using a pre-trained model from CLIP, where global and local contexts of an image and an expression are handled in a consistent way. In order to localize an object mask region in an image given a textual referring expression, we propose a mask-guided visual encoder that captures global and local context information of an image given a mask. We also present a global-local textual encoder where the local-context is captured by a target noun phrase and the global context is captured by a whole sentence of the expressions. By combining features in two different context levels, our method is able to understand a comprehensive knowledge as well as a specific trait of the target object. Note that, although our method does not require any additional training on CLIP model, it outperforms all baselines and the weakly supervised referring image segmentation method with a big margin. + +Our main contributions can be summarised as follows: + +- We propose a new task of zero-shot referring image segmentation based on CLIP without any additional training. To the best of our knowledge, this is the first work to study the zero-shot referring image segmentation task. +- We present a visual encoder and a textual encoder that integrates global and local contexts of images and sentences, respectively. Although the modalities of two encoders are different, our visual and textual features are dealt in a consistent way. +- The proposed global-local context features take full advantage of CLIP to capture the target object semantics as well as the relations between the objects in both visual and textual modalities. + +- Our method consistently shows outstanding results compared to several baseline methods, and also outperforms the weakly supervised referring image segmentation method with substantial margins. + +# 2. Related Work + +Zero-shot Transfer. Classical zero-shot learning aims to predict unseen classes that have not seen before by transferring the knowledge trained on the seen classes. Early works [3, 14, 34] leverage the pre-trained word embedding [5, 39] of class names or attributes and perform zero-shot prediction via mapping between visual representations of images and this word embedding. Recently, CLIP [43] and ALIGNN [19] shed a new light on the zero-shot learning via large-scale image-text pre-training. They show the successive results on various downstream tasks via zero-shot knowledge transfer, such as image captioning [40], video action localization [51], image-text retrieval [1] and so on. Contrary to classical zero-shot learning, zero-shot transfer has an advantage of avoiding fine-tuning the pre-trained model on the task-specific dataset, where collecting datasets is time-consuming. There have been several works that apply CLIP encoders directly with tiny architectural modification without additional training for semantic segmentation [63], referring expression comprehension [49], phrase localization [25] and object localization [17]. Our work is also lying on the line of this research field. + +Zero-shot Dense Prediction Tasks. Very recently, with the success of pre-training models using large-scale image-text pairs, there have been several attempts to deal with dense prediction tasks with CLIP, e.g. object detection [9, 10, 13, 24, 30, 45], semantic segmentation [22, 29, 37, 42, 44, 58, 59, 63, 64] and so on. These dense prediction tasks, however, are challenging since CLIP learns image-level features not pixel-level fine-grained features. In order to handle this issue, ViLD [13] introduces a method which crop the image to contain only the bounding box region, and then extract the visual features of cropped regions using CLIP to classify the unseen objects. This approach is applied in a wide range of dense prediction tasks which are demanded the zero-shot transfer ability of CLIP [7, 9, 10, 12, 49, 59]. While this method only considers the cropped area, there are several methods [25, 63] to consider the global context in the image, not only just the cropped region. Adapting CLIP [25] proposed the phrase localization method by modifying CLIP to generate high-resolution spatial feature maps using superpixels. MaskCLIP [63] modifies the image encoder of CLIP by transforming the value embedding layer and the last linear layer into two $1 \times 1$ convolutional layers to handle pixel-level predictions. In this work, we focus on extracting both global and local context visual features with CLIP. + +![](images/616e1a9316f9da7ca1627269ffaee1c21ebbda64d8d1632898fb35145bf1b376.jpg) +Figure 2. Overall framework of our global-local CLIP. Given an image and an expression as inputs, we extract global-local context visual features using mask proposals, and also we extract a global-local context textual feature. After computing the cosine similarity scores between all global-local context visual features and a global-local context textual feature, we choose the mask with the highest score. + +Referring Image Segmentation. Referring image segmentation aims to segment a target object in an image given a natural linguistic expression introduced by [18]. There have been several fully-supervised methods for this task, where images and expressions are used as an input, and the target mask is given for training [2, 20, 33, 55, 60, 62]. Most of works [6, 11, 23, 60, 61] focuses on how to fuse those two features in different modalities extracted from independent encoders. Early works [26, 32] extract multi-modal features by simply concatenating visual and textual features and feed them into the segmentation networks [35] to predict dense segmentation masks. There have been two branches of works fusing cross-modal features; an attention based encoder fusion [11, 57, 60] and a cross-modal decoder fusion based on a Transformer decoder [6, 54, 61]. Recently, a CLIP-based approach, which learns separated image and text transformer using a contrastive pre-training, has been proposed [54]. Those fully supervised referring image segmentation methods show good performances in general, but they require dense annotations for target masks and comprehensive expressions describing the target object. To address this problem, TSEG [48] proposed a weakly-supervised referring image segmentation method which learns the segmentation model using text-based image-level supervisions. However, this method still requires high-level referring expression annotations with images for specific datasets. Therefore, we propose a new baseline for zero-shot referring image segmentation without any training or supervisions. + +# 3. Method + +In this section, we present the proposed method for zero-shot referring image segmentation in detail. We first show an overall framework of the proposed method (3.1), and then discuss the detailed methods for extracting visual features + +(3.2) and textual features (3.3) to encode global and local contextual information. + +# 3.1. Overall Framework + +To solve the task of referring image segmentation, which aims to predict the target region grounded to the text description, it is essential to learn image and text representations in a shared embedding space. To this end, we adopt CLIP to leverage the pre-trained cross-modal features for images and natural language. + +Our framework consists of two parts as shown in Fig 2: (1) global-local visual encoder for visual representation, and (2) global-local natural language encoder for referring expression representation. Given a set of mask proposals generated by an unsupervised mask generator [52, 53], we first extract two visual features in global-context and local-context levels for each mask proposal, and then combine them into a single visual feature. Our global-context visual features can comprehensively represent the masked area as well as the surrounding region, while the local-context visual features can capture the representation of the specific masked region. This acts key roles in the referring image segmentation task because we need to focus a small specific target region using a comprehensive expression of the target. At the same time, given a sentence of expressing the target, our textual representation is extracted by the CLIP text encoder. In order to understand a holistic expression of the target as well as to focus on the target object itself, we first extract a key noun phrase from a sentence using a dependency parsing provided by spaCy [16], and then combine a global sentence feature and a local target noun phrase feature. Note that, our visual and text encoders are designed to handle both global-context and local-context information in a consistent way. + +Since our method is built on CLIP where the visual and textual features are embedded in the common embedding + +![](images/8a1d0440dc97753c1d5e4d1c3617b6fc3c94d70f956ae0dae518057008c5d6aa.jpg) +Figure 3. Detailed illustration of our mask-guided global-context visual encoders in ResNet and ViT architectures: (a) Masked attention pooling in ResNet, (b) Token masking in ViT. + +![](images/41270556ea24f805230e9b1fdf4d566cd914203d9d021aeb35626cfe26ce26af.jpg) + +space, we can formulate the objective of our zero-shot image referring segmentation task as follows. Given inputs of an image $I$ and a referring expression $T$ , our method finds the mask that has the maximum similarity between its visual feature and the given textual feature among all mask proposals: + +$$ +\hat {m} = \arg \max _ {m \in M (I)} \operatorname {s i m} (\mathbf {t}, \mathbf {f} _ {m}), \tag {1} +$$ + +where $\mathrm{sim}(\cdot, \cdot)$ is a cosine similarity, $\mathbf{t}$ is the proposed global-local textual feature for a referring expression $T$ , $\mathbf{f}$ is the mask-guided global-local visual feature, and $M(I)$ is a mask proposal set for a given image $I$ . + +# 3.2. Mask-guided Global-local Visual Features + +To segment the target region related to the referring expression, it is essential to understand a global relationship between multiple objects in the image as well as local semantic information of the target. In this section, we demonstrate how to extract global and local-context features using CLIP, and how to fuse them. + +Since CLIP is designed to learn image-level representation, it is not well-suited for a pixel-level dense prediction such as an image segmentation. To overcome the limitation of using CLIP, we decompose the task into two sub-tasks: mask proposal generation and masked image-text matching. + +In order to generate mask proposals, we use the off-the-shelf mask extractor [53] which is the unsupervised instance-level mask generation model. By using mask proposals explicitly, our method can handle fine-detailed instance-level segmentation masks with CLIP. + +Global-context Visual Features. For each mask proposals, we first extract global-context visual features using the CLIP pre-trained model. The original visual features from CLIP, however, is designed to generate one single feature vector to describe the whole image. To tackle this issue, we modify a visual encoder from CLIP to extract features that contain information from not only the masked region but also surrounding regions to understand relationships between multiple objects. + +In this paper, we use two different architectures for the visual encoder as in CLIP: ResNet [15] and Vision Transformer (ViT) [8]. For the visual encoder with the ResNet architecture, we denote a visual feature extractor without a pooling layer as $\phi_{\mathrm{f}}$ and its attention pooling layer as $\phi_{\mathrm{att}}$ . Then the visual feature, $\mathbf{f}$ , using the visual encoder of CLIP, $\phi_{\mathrm{CLIP}}$ , can be expressed as follows: + +$$ +\mathbf {f} = \phi_ {\mathrm {C L I P}} (I) = \phi_ {\mathrm {a t t}} \left(\phi_ {\mathrm {f}} (I)\right), \tag {2} +$$ + +where $I$ is a given image. Similarly, since ViT has multiple multi-head attention layers, we divide this visual encoder into two parts: last $k$ layers and the rest. We denote the former one by $\phi_{\mathrm{att}}$ , and the later one by $\phi_{\mathrm{f}}$ for ViT architectures based on CLIP. + +Then given an image $I$ and a mask $m$ , our global-context visual feature is defined as follows: + +$$ +\mathbf {f} _ {m} ^ {G} = \phi_ {\text {a t t}} \left(\phi_ {f} (I) \odot \bar {m}\right), \tag {3} +$$ + +where $\bar{m}$ is the resized mask scaled to the size of the feature map, and $\odot$ is a Hadamard product operation. We illustrate more details of this masking strategy for each architecture of CLIP in Section 4.1 and Figure 3. + +We refer to it as the global context visual feature, because the entire image is passed through the encoder and the feature map at the last layer contains the holistic information about the image. Although we use mask proposals to obtain the features only on masked regions on the feature map, these features already have comprehensive information about the scene. + +Local-context Visual Features. To obtain local-context visual features given a mask proposal, we first mask the image and then crop the image to obtain a new image surrounding only an area of the mask proposal. After cropping and masking the image, it is passed to the visual encoder of CLIP to extract our local-context visual feature $\mathbf{f}_m^L$ : + +$$ +\mathbf {f} _ {m} ^ {L} = \phi_ {\mathrm {C L I P}} \left(\mathcal {T} _ {\text {c r o p}} (I \odot m)\right), \tag {4} +$$ + +where $\mathcal{T}_{crop}(\cdot)$ denotes a cropping operation. This approach is commonly used in zero-shot semantic segmentation methods [7,59]. Since this feature focuses on the masked region in the image where irrelevant regions are removed, it concentrates only on the target object itself. + +Global-local Context Visual features. We aggregate global- and local-context features over masked regions to obtain one single visual feature that describe a representation of masked regions of the image. The global-local context visual feature is computed as follows: + +$$ +\mathbf {f} _ {m} = \alpha \mathbf {f} _ {m} ^ {G} + (1 - \alpha) \mathbf {f} _ {m} ^ {L}, \tag {5} +$$ + +where $\alpha \in [0,1]$ is a constant parameter, $m$ is a mask proposal, $\mathbf{f}^G$ and $\mathbf{f}^L$ are global-context and local-context visual features in Eq. (3) and Eq. (4), respectively. As in Eq. (1), the score for each mask proposal is then obtained by computing similarity between our global-local context visual features and the textual feature of the expression described in the next section. + +# 3.3. Global-local Textual Features + +Similar to the visual features, it is important to understand a holistic meaning as well as the target object noun in given expressions. Given a referring expression $T$ , we extract a global sentence feature, $\mathbf{t}^G$ , using the pre-trained CLIP text encoder, $\psi_{\mathrm{CLIP}}$ , as follows: + +$$ +\mathbf {t} ^ {G} = \psi_ {\mathrm {C L I P}} (T). \tag {6} +$$ + +Although the CLIP text encoder can extract the textual representation aligning with the image-level representation, it is hard to focus on the target noun in the expression because the expression of this task is formed as a complex sentence containing multiple clauses, e.g. "a dark brown leather sofa behind a foot stool that has a laptop computer on it". + +To address this problem, we exploit a dependency parsing using spaCy [16] to find the target noun phrase, $\mathrm{NP}(T)$ , given the text expression $T$ . To find the target noun phrase, we first find all noun phrases in the expression, and then select the target noun phrase that contains the root noun of the sentence. After identifying the target noun phrase in the input sentence, we extract the local-context textual feature from the CLIP textual encoder: + +$$ +\mathbf {t} ^ {L} = \psi_ {\mathrm {C L I P}} (\mathrm {N P} (T)). \tag {7} +$$ + +Finally, our global-local context textual feature is computed by a weighted sum of the global and local textual features described in Eq. (6) and Eq. (7) as follows: + +$$ +\mathbf {t} = \beta \mathbf {t} ^ {G} + (1 - \beta) \mathbf {t} ^ {L}, \tag {8} +$$ + +where $\beta \in [0,1]$ is a constant parameter, $\mathbf{t}^G$ and $\mathbf{t}^L$ are global sentence and local noun-phrase textual features, respectively. + +# 4. Implementation Details + +We use unsupervised instance segmentation methods, FreeSOLO [53], to obtain mask proposals, and the shorter size of an input image is set to 800. For CLIP, the size of an image is set to $224 \times 224$ . The number of masking layers, $k$ in ViT is set to 3. We set $\alpha = 0.85$ for RefCOCOg, 0.95 for RefCOCO and RefCOCO+, and $\beta = 0.5$ for all datasets. + +# 4.1. Masking in Global-context Visual Encoder + +We use both ResNet-50 and ViT-B/32 architectures for the CLIP visual encoder. Masking strategies of the global-context visual encoder for these two architecture are mostly similar but have small differences, described next. + +Masked Attention Pooling in ResNet [15]. In a ResNet-based visual encoder of the original CLIP, a global average pooling layer is replaced by an attention pooling layer. This attention pooling layer has the same architecture as the multi-head attention in a Transformer. A query of the attention pooling layer is computed by a global average pooling operation onto the feature maps extracted by the ResNet backbone. A key and a value of the attention pooling layer is given by a flattened feature map. In our masked attention pooling, we mask the feature map using a given mask before computing query, key and value. After masking feature maps, we compute query, key and value, and then they are fed into the multi-head attention layer. The detailed illustration of our masked attention pooling in ResNet is shown in Figure 3a. + +Token Masking in ViT [8]. Following ViT, we divide an image into grid patches, and embed patches to a linear layer with positional embeddings to get tokens, and then process those tokens with a series of Transformer layer. To capture global-context of images, we mask tokens in only the last $k$ Transformer layers. The tokens are reshaped and masked by a given mask proposal, and then flattened and applied to the subsequent Transformer layer. As ViT has a class token (CLS), we use the final output feature from this CLS token as our global-context visual representation. The detailed method of our token masking in ViT is also shown in Figure 3b. In our experiments, we use ViT-B/32 architecture for the backbone of our ViT-based visual encoder, and we apply a token masking to the last 3 layers in the visual encoder. We show the performances with respect to the location of token masking layers in the supplementary materials. + +# 5. Experiments + +# 5.1. Datasets and Metrics + +We evaluate our method on RefCOCO [41], RefCOCO+ [41] and RefCOCOg [21, 38], where the images and masks in MS-COCO [31] dataset are used to annotate + +Table 1. Comparison with Zero-shot RIS baseline methods on three standard benchmark datasets. U: The UMD partition. G: The Google partition. All baseline methods use FreeSOLO as the mask proposal network. † denotes that the model is initialized with the ImageNet pre-trained weights and trained on RIS datasets. FreeSOLO upper-bound is computed between the GT mask and the maximum overlapped FreeSOLO mask with the GT mask. + +
MetricMethodsVisual EncoderRefCOCORefCOCO+RefCOCOg
valtest Atest Bvaltest Atest Bval(U)test(U)val(G)
oIoUSupervised SoTA method [60]72.7375.8268.7962.1468.3855.1061.2462.0960.50
Zero-Shot Baselines
Grad-CAMResNet-5014.0215.0713.4914.4614.9714.0412.5112.8112.86
Score mapResNet-5019.8719.3120.2220.3719.6520.7518.8819.1619.15
Region tokenViT-B/3221.7120.3122.6322.6120.9123.4625.5225.3825.29
CroppingResNet-5022.3620.4922.6923.9522.0323.4928.2027.6427.47
CroppingViT-B/3222.7321.1123.0824.0922.4223.9328.6927.5127.70
Global-Local CLIP (ours)ResNet-5024.5823.3824.3525.8724.6125.6130.0729.8329.45
Global-Local CLIP (ours)ViT-B/3224.8823.6124.6626.1624.9025.8331.1130.9630.69
FreeSOLO upper-bound-42.0842.5243.5242.1742.5243.8048.8148.9648.49
mIoUZero-Shot Baselines
Grad-CAMResNet-5014.2215.9313.1814.8015.8713.7812.4713.1613.30
Score mapResNet-5021.3220.9621.5721.6121.1722.3020.0720.4320.63
Region tokenViT-B/3223.4322.0724.6224.5122.6425.3727.5727.3427.69
CroppingResNet-5024.3122.3724.6626.3123.9425.6931.2730.8730.78
CroppingViT-B/3224.8322.5825.7226.3324.0626.4631.8830.9431.06
Global-Local CLIP (ours)ResNet-5026.7024.9926.4828.2226.5427.8633.0233.1232.79
Global-Local CLIP (ours)ViT-B/3226.2024.9426.5627.8025.6427.8433.5233.6733.61
FreeSOLO upper-bound-48.2546.6250.4348.2846.6250.6252.4452.9152.76
Weakly-supervised method
TSEG [48]ViT-S/16†25.95--22.62--23.41--
+ +
MethodTrain datasetoIoU on PhraseCut
AllUnseen
CRISRefCOCO15.5313.75
RefCOCO+16.3014.62
RefCOCOg16.2413.88
LAVTRefCOCO16.6814.43
RefCOCO+16.6413.49
RefCOCOg16.0513.48
OursN/A23.6422.98
+ +![](images/0ce096b4aec5fe9d6429feeca993b26f9dc691311bb2d6e48e649f2f9863ac2b.jpg) +Figure 4. Comparisons to supervised methods in zero-shot setting on PhraseCut (left), and in few-shot setting on RefCOCOg (right). Unseen denotes a subset with classes that are not seen in RefCOCO. + +the ground-truth of the referring image segmentation task. RefCOCO, RefCOCO+ and RefCOCOg have 19,994, 19,992 and 26,711 images with 142,210, 141,564 and 104,560 referring expressions, respectively. RefCOCO and RefCOCO+ have shorter expressions and an average of 1.6 nouns and 3.6 words are included in one expression, while RefCOCOg expresses more complex relations with longer sentences and has an average of about 2.8 nouns and 8.4 words. The detailed statistics of those datasets are demonstrated in our supplementary materials. + +For the evaluation metrics, we use the overall Intersection over Union (oIoU) and the mean Intersection over Union (mIoU) which are the common metrics for the referring image segmentation task. The oIoU is measured by the total area of intersection divided by the total area of union, where the total area is computed by accumulating over all examples. In our ablation study, we use oIoUs since most of supervised RIS methods [6, 23] adopt it. We also report the mIoUs as + +Table 2. oIoU results of our method and the baselines using COCO instance GT masks. We use a ViT-B/32 model for a visual encoder. + +
MethodRefCOCORefCOCO+RefCOCOg
Grad-CAM18.3218.1421.24
Score map23.9725.5028.11
Region token35.5938.1340.19
Cropping36.3242.0747.42
Ours37.0542.5951.01
+ +Table 3. oIoU results with different context-level features on the val split of RefCOCOg. We use a ViT-B/32 model for a visual encoder. + +
Encoder VariantsTextual features
GlobalLocalGlobal-Local
Visual +featuresGlobal27.0327.3727.60
Local28.6925.2329.48
Global-Local30.1827.9431.11
+ +in [48], which computes the average IoU across all examples while considering the object sizes. + +# 5.2. Baselines + +We modify some baseline methods extracting dense predictions from CLIP into zero-shot RIS task to compare with our framework, and use FreeSOLO [53] as a mask generator in all baselines. + +- Grad-CAM: The first baseline is utilizing gradient-based activation map based on Grad-CAM [46] which has been verified in the prior work [17]. After obtaining the activa + +![](images/175ec71baec5767f30f4da75d8358cc443024c51dc4780ec22d5a79bdb451c73.jpg) +Image +Expression: +the banana the person is holding +Local visual + +![](images/59bcf0e50b884b1cc9e410e95f689b8a0e0ef98b998d456350f68a6d85a6c57c.jpg) +GT + +![](images/b79efdc9aef4bdd6cea7434cf89bcb992bfa031c993f159d593661738218379c.jpg) + +![](images/34f4f27f3a778b3fbe4a15c39d0954a877e63fb00293b0bf1be62da800064c68.jpg) +Global visual + +![](images/54814498ca74ac1fd2495ddf3c8a74115c921e8dcb790e4479c8484932d955e5.jpg) +Global-Local + +![](images/8dba6ad77e3b2a9dc43c6ec0d74e28331d5380c6cc456c8723e59e9e1c0b4759.jpg) +Image +Expression: +a green bicycle ridden by a man in a black windbreaker +GT + +![](images/60c7fe2b38e6fe473564eceab8aa34e9d80bdaeec8881270a1d8de660b904858.jpg) +Figure 5. Qualitative results with different levels of visual features. COCO instance GT masks are used as mask proposals to validate the effect of the global-local context visual features. + +![](images/5da1f89587b0963d59efff3c90e8212d3c46fce91c0dbf41b445923fa81b8cc1.jpg) +Local visual + +![](images/a3a590e289940f97c228a5acbfef851aa08b50e856c05a0e352cac07442bfa86.jpg) +Global visual + +![](images/07e6c57a984bdebd860e3e01c1404e23b6827ecdcfbae57a653560c07aa2766f.jpg) +Global-Local + +tion maps using the similarity score of image-text pairs, we mask the maps and aggregate scores for all mask proposals, and select the mask with the highest score. + +- **Score Map:** The second baseline is the method extracting a dense score map as in MaskCLIP [63]. As in MaskCLIP, to obtain dense score maps without pooling, a value linear layer and the last layer in the attention pooling are transformed into two consecutive $1 \times 1$ convolution layers. The feature map extracted from ResNet is forwarded to those two layers to get language-compatible dense image feature map, and then compute a cosine similarity with CLIP's textual feature. After obtaining a score map, we project mask proposals to a score map. The scores in the mask area are averaged and then we select the mask with the maximum score. +- Region Token in ViT: The third baseline is a method used in Adapting CLIP [25]. Similar to Adapting CLIP, we use region tokens for each mask proposal for all Transformer layers in CLIP's visual encoder instead of using superpixels. We finally compute the cosine similarity between each class token of a mask proposal and CLIP's textual feature, and then choose the mask with the highest score. +- Cropping: The last baseline is our local-context visual features described in Section 3.2. Cropping and masking is a commonly used approach utilizing CLIP for extracting + +![](images/d089c8e983c72f3e3a84cfdc2f7e0e6570f7c5077619bd9faaae69c9bfe4f760.jpg) +Image +Expression: + +![](images/c826d8a656f514f40ec0182748be048e09bceaad87e61a349f05ebf5274e128d.jpg) +GT + +![](images/693e8c61517c4226c11239327aa7bb5a086b8732a1c16208cafb2c46de142f76.jpg) +Local text + +![](images/24d4d4286cc6ab8a9f49e5603053500f9ef0d3b1e82e7d36e267bf2f0f723f68.jpg) +Global text + +![](images/49f44335a1d20a198480e16c451552af1187a0ee38d981a4fdbb05a3a5908a28.jpg) +Global-Local + +![](images/a14079ffe6982d3c747e9bea2fbf2aec856b788958d3798b3ca83079635f0001.jpg) +guy in wheelchair + +![](images/621bc8c4868298223080137b6ceabe78f8cab2af98288a579c6015cb4d613000.jpg) +Expression: +a woman +umbrella +Figure 6. Qualitative results with different levels of textual features using COCO Instance GT mask proposals. + +![](images/16f9ecf60c777341519c67a95f51f5bf77fa161a4bc7aec0ec76dd83dfc10b5f.jpg) + +![](images/e9f6d407e67b0869c4afe74bee37216f3551102fb5d448873ec78b0396564757.jpg) + +![](images/286426ff765b1c17f783baa567b5fc5c4f9d116f6f071d4e6afdcfb261e43c91.jpg) + +![](images/a4f41c652470c5b8489327c2338666d59e5502f0b47c446bf3de4a255db73e34.jpg) + +mask or box region feature in a range of zero-shot dense prediction tasks [7, 9, 13, 49, 59]. Therefore, we consider cropping as one of the zero-shot RIS baselines. + +# 5.3. Results + +Main Results. We report referring image segmentation performances of our global-local CLIP and other baselines on RefCOCO, RefCOCO+ and RefCOCOg in terms of IoU and mIoU metrics in Table 1. For a fair comparison, all methods including baselines use FreeSOLO [53] mask proposals to produce the final output mask. The experimental results show that our method outperforms other baseline methods with substantial margins. Our method also surpasses the weakly supervised referring image segmentation method (TSEG) [48] in terms of mIoU1. We also show upper-bound performances of using FreeSOLO, where the scores are computed by the IoU between ground-truth masks and its max-overlapped mask proposal. Although there is still a gap compared to the fully-supervised referring image segmentation methods, our method improves performance significantly compared to the baselines with the same upper-bound. + +Zero-shot Evaluation on Unseen Domain. To verify the effectiveness of our method in a more practical setting, we report the zero-shot evaluation results with SoTA supervised methods [54, 60] on the test split of PhraseCut [56] in Figure 4 (left). Note that, RefCOCO contains expressions for only 80 salient object classes, whereas PhraseCut covers a variety of additional visual concepts i.e. 1272 categories in the test set. Our method outperforms both supervised methods, even though our models were never trained under RIS supervision. When evaluated on a subset of classes that are not seen in the RefCOCO datasets (Unseen column), the supervised methods show significant performance degradation, whereas our method works robustly on this subset. + +![](images/b8fb3126971edeccdc43fd85ba0718c00bb0a1c780e4a477b816262e833f3ebc.jpg) +Figure 7. Qualitative results of our method with the several baselines. Note that all methods use mask proposals generated by FreeSOLO. + +Comparison to supervised methods in few-shot Setting. We also compare our model to two supervised RIS methods [54, 60] in a few-shot learning setting, where the training set includes $k$ instances for each of 80 classes in RefCOCO $^2$ . Note that the supervised methods use additional forms of supervision in training, whereas our method does not require any form of training or additional supervision; thus this setting is even disadvantageous to our method. Figure 4 (right) shows oIoU while varying $k$ on RefCOCOg. The results clearly show that our method outperforms both supervised methods with large margins when $k$ is small, and the gaps narrow as $k$ gets larger (64 and 256 for LAVT [60] and CRIS [54], respectively). Note that it covers about $10\%$ of the training set when $k = 64$ and the same trends hold for both RefCOCO and RefCOCO+. + +# 5.4. Ablation Study + +Effects of Mask Quality. To show the impact of the proposed method without considering the mask quality of the mask generators, we evaluate the performance of our method and the baselines with COCO instance GT masks in Table 2. Our approach has demonstrated superior performance compared to all baselines and has shown a performance improvement of over $3.5\%$ , particularly on RefCOCOg which includes longer expressions. We believe that our method performs well on challenging examples that involve complex expressions, such as those with multiple clauses, which require an understanding of both the language and the scene. + +Effects of Global-Local Context Features. We also study the effects of global-local context features in both visual and textual modalities and show the results in Table 3. For this analysis, we use RefCOCOg as it contains more complex expressions with multiple clauses. Among all combinations + +of two modalities, using both global-local context features in the visual and textual domains leads to the best performance. + +Qualitative Analysis. We demonstrate several results that support the effectiveness of our global-local context visual features in Figure 5. To show this effect more clearly, we use COCO instance GT masks as mask proposals. When using only local-context visual features, the predicted mask tends to focus on the instance that shares the same class as the target object. However, when using only global-context visual features, the predicted mask tends to capture the context of the expression but may focus on a different object class. By combining global and local context, our method successfully finds the target mask. We also demonstrate the effectiveness of our global-local context textual features in Figure 6. Furthermore, we compare the qualitative results of our method with baseline methods in Figure 7. Our proposed global-local CLIP outperforms the baseline methods in identifying the target object by taking into account the global context of the image and expression. + +# 6. Conclusion + +In this paper, we propose a simple yet effective zero-shot referring image segmentation framework focusing on transferring knowledges from image-text cross-modal representations of CLIP. To tackle the difficulty of the referring image segmentation task, we propose global-local context encodings to compute similarities between images and expressions, where both target object semantics and relations between the objects are dealt in a unified framework. The proposed method significantly outperforms all baseline methods and weakly supervised method as well. + +Acknowledgement. This work was supported by the IITP grants (No.2019-0-01842, No.2021-0-02068, No.2022-0-00926) funded by MSIT, the ISTD program (No.20018334) funded by MOTIE, and the GIST-MIT Research Collaboration grant funded by GIST, Korea. + +# References + +[1] Alberto Baldrati, Marco Bertini, Tiberio Uricchio, and Alberto Del Bimbo. Effective conditioned and composed image retrieval combining clip-based features. In CVPR, 2022. 2 +[2] Bo Chen, Zhiwei Hu, Zhilong Ji, Jinfeng Bai, and Wangmeng Zuo. Position-aware contrastive alignment for referring image segmentation. arXiv preprint arXiv:2212.13419, 2022. 3 +[3] Shiming Chen, Ziming Hong, Yang Liu, Guo-Sen Xie, Baigui Sun, Hao Li, Qinmu Peng, Ke Lu, and Xinge You. Transzero: Attribute-guided transformer for zero-shot learning. In AAAI, 2022. 2 +[4] Yen-Chun Chen, Linjie Li, Licheng Yu, Ahmed El Kholy, Faisal Ahmed, Zhe Gan, Yu Cheng, and Jingjing Liu. Uniter: Universal image-text representation learning. In ECCV, 2020. 1 +[5] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. In ACL, 2019. 2 +[6] Henghui Ding, Chang Liu, Suchen Wang, and Xudong Jiang. Vision-language transformer and query generation for referring segmentation. In ICCV, 2021. 3, 6 +[7] Jian Ding, Nan Xue, Gui-Song Xia, and Dengxin Dai. Decoupling zero-shot semantic segmentation. In CVPR, 2022. 1, 2, 5, 7 +[8] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. In ICLR, 2020. 4, 5 +[9] Yu Du, Fangyun Wei, Zihe Zhang, Miaojing Shi, Yue Gao, and Guoqi Li. Learning to prompt for open-vocabulary object detection with vision-language model. In CVPR, 2022. 1, 2, 7 +[10] Chengjian Feng, Yujie Zhong, Zequn Jie, Xiangxiang Chu, Haibing Ren, Xiaolin Wei, Weidi Xie, and Lin Ma. Promptdet: Expand your detector vocabulary with uncurated images. In ECCV, 2022. 1, 2 +[11] Guang Feng, Zhiwei Hu, Lihe Zhang, and Huchuan Lu. Encoder fusion network with co-attention embedding for referring image segmentation. In CVPR, 2021. 3 +[12] Golnaz Ghiasi, Xiuye Gu, Yin Cui, and Tsung-Yi Lin. Scaling open-vocabulary image segmentation with image-level labels. In ECCV, 2022. 1, 2 +[13] Xiuye Gu, Tsung-Yi Lin, Weicheng Kuo, and Yin Cui. Open-vocabulary object detection via vision and language knowledge distillation. In ICML, 2022. 1, 2, 7 +[14] Zongyan Han, Zhenyong Fu, Shuo Chen, and Jian Yang. Contrastive embedding for generalized zero-shot learning. In CVPR, 2021. 2 +[15] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In CVPR, 2016. 4, 5 +[16] Matthew Honnibal and Mark Johnson. An improved non-monotonic transition system for dependency parsing. In EMNLP, 2015. 3, 5 + +[17] Hsuan-An Hsia, Che-Hsien Lin, Bo-Han Kung, Jhao-Ting Chen, Daniel Stanley Tan, Jun-Cheng Chen, and Kai-Lung Hua. Clipcam: A simple baseline for zero-shot text-guided object and action localization. In ICASSP, 2022. 2, 6 +[18] Ronghang Hu, Marcus Rohrbach, and Trevor Darrell. Segmentation from natural language expressions. In ECCV, 2016. 3 +[19] Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. In ICML, 2021. 1, 2 +[20] Ya Jing, Tao Kong, Wei Wang, Liang Wang, Lei Li, and Tieniu Tan. Locate then segment: A strong pipeline for referring image segmentation. In CVPR, 2021. 3 +[21] Sahar Kazemzadeh, Vicente Ordonez, Mark Matten, and Tamara Berg. Referitagame: Referring to objects in photographs of natural scenes. In EMNLP, 2014. 5 +[22] Kwanyoung Kim, Yujin Oh, and Jong Chul Ye. Zegot: Zero-shot segmentation through optimal transport of text prompts. arXiv preprint arXiv:2301.12171, 2023. 2 +[23] Namyup Kim, Dongwon Kim, Cuiling Lan, Wenjun Zeng, and Suha Kwak. Restr: Convolution-free referring image segmentation using transformers. In CVPR, 2022. 3, 6 +[24] Weicheng Kuo, Yin Cui, Xiuye Gu, AJ Piergiovanni, and Anelia Angelova. F-vlm: Open-vocabulary object detection upon frozen vision and language models. arXiv preprint arXiv:2209.15639, 2022. 2 +[25] Jiahao Li, Greg Shakhnarovich, and Raymond A Yeh. Adapting clip for phrase localization without further training. arXiv preprint arXiv:2204.03647, 2022. 2, 7 +[26] Ruiyu Li, Kaican Li, Yi-Chun Kuo, Michelle Shu, Xiaojuan Qi, Xiaoyong Shen, and Jiaya Jia. Referring image segmentation via recurrent refinement networks. In CVPR, 2018. 3 +[27] Wei Li, Can Gao, Guocheng Niu, Xinyan Xiao, Hao Liu, Jiachen Liu, Hua Wu, and Haifeng Wang. Unimo: Towards unified-modal understanding and generation via cross-modal contrastive learning. In ACL, 2021. 1 +[28] Xiujun Li, Xi Yin, Chunyuan Li, Pengchuan Zhang, Xiaowei Hu, Lei Zhang, Lijuan Wang, Houdong Hu, Li Dong, Furu Wei, et al. Oscar: Object-semantics aligned pre-training for vision-language tasks. In ECCV, 2020. 1 +[29] Feng Liang, Bichen Wu, Xiaoliang Dai, Kunpeng Li, Yinan Zhao, Hang Zhang, Peizhao Zhang, Peter Vajda, and Diana Marculescu. Open-vocabulary semantic segmentation with mask-adapted clip. arXiv preprint arXiv:2210.04150, 2022. 2 +[30] Chuang Lin, Peize Sun, Yi Jiang, Ping Luo, Lizhen Qu, Gholamreza Haffari, Zehuan Yuan, and Jianfei Cai. Learning object-language alignments for open-vocabulary object detection. arXiv preprint arXiv:2211.14843, 2022. 2 +[31] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dálár, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In ECCV, 2014. 5 +[32] Chenxi Liu, Zhe Lin, Xiaohui Shen, Jimei Yang, Xin Lu, and Alan Yuille. Recurrent multimodal interaction for referring image segmentation. In ICCV, 2017. 3 + +[33] Jiang Liu, Hui Ding, Zhaowei Cai, Yuting Zhang, Ravi Kumar Satzoda, Vijay Mahadevan, and R Manmatha. Polyformer: Referring image segmentation as sequential polygon generation. arXiv preprint arXiv:2302.07387, 2023. 3 +[34] Lu Liu, Tianyi Zhou, Guodong Long, Jing Jiang, Xuanyi Dong, and Chengqi Zhang. Isometric propagation network for generalized zero-shot learning. In ICLR, 2020. 2 +[35] Jonathan Long, Evan Shelhamer, and Trevor Darrell. Fully convolutional networks for semantic segmentation. In CVPR, 2015. 3 +[36] Jiasen Lu, Dhruv Batra, Devi Parikh, and Stefan Lee. Vilbert: Pretraining task-agnostic visiolinguistic representations for vision-and-language tasks. In NeurlPS, 2019. 1 +[37] Huaishao Luo, Junwei Bao, Youzheng Wu, Xiaodong He, and Tianrui Li. Segclip: Patch aggregation with learnable centers for open-vocabulary semantic segmentation. arXiv preprint arXiv:2211.14813, 2022. 2 +[38] Junhua Mao, Jonathan Huang, Alexander Toshev, Oana Camburu, Alan L Yuille, and Kevin Murphy. Generation and comprehension of unambiguous object descriptions. In CVPR, 2016. 5 +[39] Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg S Corrado, and Jeff Dean. Distributed representations of words and phrases and their compositionality. In NeurIPS, 2013. 2 +[40] Ron Mokady, Amir Hertz, and Amit H Bermano. Clipcap: Clip prefix for image captioning. arXiv preprint arXiv:2111.09734, 2021. 1, 2 +[41] Varun K Nagaraja, Vlad I Morariu, and Larry S Davis. Modeling context between objects for referring expression understanding. In ECCV, 2016. 5 +[42] Prashant Pandey, Mustafa Chasmai, Monish Natarajan, and Brejesh Lall. A language-guided benchmark for weakly supervised open vocabulary semantic segmentation. arXiv preprint arXiv:2302.14163, 2023. 2 +[43] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, 2021. 1, 2 +[44] Yongming Rao, Wenliang Zhao, Guangyi Chen, Yansong Tang, Zheng Zhu, Guan Huang, Jie Zhou, and Jiwen Lu. Denseclip: Language-guided dense prediction with context-aware prompting. In CVPR, 2022. 1, 2 +[45] Hanoona Abdul Rasheed, Muhammad Maaz, Muhammad Uzair Khattak, Salman Khan, and Fahad Khan. Bridging the gap between object and image-level representations for open-vocabulary detection. In NeurIPS, 2022. 2 +[46] Ramprasaath R Selvaraju, Michael Cogswell, Abhishek Das, Ramakrishna Vedantam, Devi Parikh, and Dhruv Batra. Gradcam: Visual explanations from deep networks via gradient-based localization. In ICCV, 2017. 6 +[47] Sheng Shen, Liunian Harold Li, Hao Tan, Mohit Bansal, Anna Rohrbach, Kai-Wei Chang, Zhewei Yao, and Kurt Keutzer. How much can clip benefit vision-and-language tasks? In ICLR, 2021. 1 + +[48] Robin Strudel, Ivan Laptev, and Cordelia Schmid. Weakly-supervised segmentation of referring expressions. arXiv preprint arXiv:2205.04725, 2022. 1, 3, 6, 7 +[49] Sanjay Subramanian, William Merrill, Trevor Darrell, Matt Gardner, Sameer Singh, and Anna Rohrbach. Reclip: A strong zero-shot baseline for referring expression comprehension. In ACL, 2022. 2, 7 +[50] Hao Tan and Mohit Bansal. Lxmert: Learning cross-modality encoder representations from transformers. In EMNLP, 2019. 1 +[51] Mengmeng Wang, Jiazheng Xing, and Yong Liu. Actionclip: A new paradigm for video action recognition. arXiv preprint arXiv:2109.08472, 2021. 2 +[52] Xudong Wang, Rohit Girdhar, Stella X Yu, and Ishan Misra. Cut and learn for unsupervised object detection and instance segmentation. arXiv preprint arXiv:2301.11320, 2023. 3 +[53] Xinlong Wang, Zhiding Yu, Shalini De Mello, Jan Kautz, Anima Anandkumar, Chunhua Shen, and Jose M Alvarez. Freesolo: Learning to segment objects without annotations. In CVPR, 2022. 3, 4, 5, 6, 7 +[54] Zhaoqing Wang, Yu Lu, Qiang Li, Xunqiang Tao, Yandong Guo, Mingming Gong, and Tongliang Liu. Cris: Clip-driven referring image segmentation. In CVPR, 2022. 1, 3, 7, 8 +[55] Zhichao Wei, Xiaohao Chen, Mingqiang Chen, and Siyu Zhu. Learning aligned cross-modal representations for referring image segmentation. arXiv preprint arXiv:2301.06429, 2023. 3 +[56] Chenyun Wu, Zhe Lin, Scott Cohen, Trung Bui, and Subhransu Maji. Phrasescut: Language-based image segmentation in the wild. In CVPR, 2020. 7 +[57] Jianzong Wu, Xiangtai Li, Xia Li, Henghui Ding, Yunhai Tong, and Dacheng Tao. Towards robust referring image segmentation. arXiv preprint arXiv:2209.09554, 2022. 3 +[58] Mengde Xu, Zheng Zhang, Fangyun Wei, Han Hu, and Xiang Bai. Side adapter network for open-vocabulary semantic segmentation. arXiv preprint arXiv:2302.12242, 2023. 2 +[59] Mengde Xu, Zheng Zhang, Fangyun Wei, Yutong Lin, Yue Cao, Han Hu, and Xiang Bai. A simple baseline for open-vocabulary semantic segmentation with pre-trained vision-language model. In ECCV, 2022. 1, 2, 5, 7 +[60] Zhao Yang, Jiaqi Wang, Yansong Tang, Kai Chen, Hengshuang Zhao, and Philip HS Torr. Lavt: Language-aware vision transformer for referring image segmentation. In CVPR, 2022. 3, 6, 7, 8 +[61] Linwei Ye, Mrigank Rochan, Zhi Liu, and Yang Wang. Cross-modal self-attention network for referring image segmentation. In CVPR, 2019. 3 +[62] Zicheng Zhang, Yi Zhu, Jianzhuang Liu, Xiaodan Liang, and Wei Ke. Coupalign: Coupling word-pixel with sentence-mask alignments for referring image segmentation. In NeurIPS, 2022. 3 +[63] Chong Zhou, Chen Change Loy, and Bo Dai. Extract free dense labels from clip. In ECCV, 2022. 1, 2, 7 +[64] Ziqin Zhou, Bowen Zhang, Yinjie Lei, Lingqiao Liu, and Yifan Liu. Zegclip: Towards adapting clip for zero-shot semantic segmentation. arXiv preprint arXiv:2212.03588, 2022.2 \ No newline at end of file diff --git a/2023/Zero-Shot Referring Image Segmentation With Global-Local Context Features/images.zip b/2023/Zero-Shot Referring Image Segmentation With Global-Local Context Features/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..99f6f64cb94f5c35da2001f2933196c7b4704414 --- /dev/null +++ b/2023/Zero-Shot Referring Image Segmentation With Global-Local Context Features/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3b4d48a4fe773ad076f0a66349c20a2b29f6a5b473a7f58dddc4488251baf036 +size 614072 diff --git a/2023/Zero-Shot Referring Image Segmentation With Global-Local Context Features/layout.json b/2023/Zero-Shot Referring Image Segmentation With Global-Local Context Features/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..1887e44a658ad4116349b505d99cd0a122aa251b --- /dev/null +++ b/2023/Zero-Shot Referring Image Segmentation With Global-Local Context Features/layout.json @@ -0,0 +1,10368 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 57, + 103, + 536, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 103, + 536, + 121 + ], + "spans": [ + { + "bbox": [ + 57, + 103, + 536, + 121 + ], + "type": "text", + "content": "Zero-shot Referring Image Segmentation with Global-Local Context Features" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 154, + 141, + 232, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 154, + 141, + 232, + 156 + ], + "spans": [ + { + "bbox": [ + 154, + 141, + 232, + 156 + ], + "type": "text", + "content": "Seonghoon " + }, + { + "bbox": [ + 154, + 141, + 232, + 156 + ], + "type": "inline_equation", + "content": "\\mathrm{Yu}^{1}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 256, + 143, + 358, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 256, + 143, + 358, + 157 + ], + "spans": [ + { + "bbox": [ + 256, + 143, + 358, + 157 + ], + "type": "text", + "content": "Paul Hongsuck Seo2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 381, + 143, + 437, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 381, + 143, + 437, + 157 + ], + "spans": [ + { + "bbox": [ + 381, + 143, + 437, + 157 + ], + "type": "text", + "content": "Jeany Son1" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 172, + 157, + 307, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 172, + 157, + 307, + 171 + ], + "spans": [ + { + "bbox": [ + 172, + 157, + 307, + 171 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 172, + 157, + 307, + 171 + ], + "type": "text", + "content": "AI Graduate School, GIST" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 332, + 158, + 421, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 332, + 158, + 421, + 171 + ], + "spans": [ + { + "bbox": [ + 332, + 158, + 421, + 171 + ], + "type": "text", + "content": "2Google Research" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 119, + 174, + 246, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 174, + 246, + 185 + ], + "spans": [ + { + "bbox": [ + 119, + 174, + 246, + 185 + ], + "type": "text", + "content": "seonghoon@gm.gist.ac.kr" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 269, + 174, + 359, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 269, + 174, + 359, + 185 + ], + "spans": [ + { + "bbox": [ + 269, + 174, + 359, + 185 + ], + "type": "text", + "content": "phseo@google.com" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 384, + 174, + 471, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 384, + 174, + 471, + 185 + ], + "spans": [ + { + "bbox": [ + 384, + 174, + 471, + 185 + ], + "type": "text", + "content": "jeany@gist.ac.kr" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 143, + 213, + 192, + 225 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 213, + 192, + 225 + ], + "spans": [ + { + "bbox": [ + 143, + 213, + 192, + 225 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 46, + 238, + 290, + 490 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 238, + 290, + 490 + ], + "spans": [ + { + "bbox": [ + 46, + 238, + 290, + 490 + ], + "type": "text", + "content": "Referring image segmentation (RIS) aims to find a segmentation mask given a referring expression grounded to a region of the input image. Collecting labelled datasets for this task, however, is notoriously costly and labor-intensive. To overcome this issue, we propose a simple yet effective zero-shot referring image segmentation method by leveraging the pre-trained cross-modal knowledge from CLIP. In order to obtain segmentation masks grounded to the input text, we propose a mask-guided visual encoder that captures global and local contextual information of an input image. By utilizing instance masks obtained from off-the-shelf mask proposal techniques, our method is able to segment fine-detailed instance-level groundings. We also introduce a global-local text encoder where the global feature captures complex sentence-level semantics of the entire input expression while the local feature focuses on the target noun phrase extracted by a dependency parser. In our experiments, the proposed method outperforms several zero-shot baselines of the task and even the weakly supervised referring expression segmentation method with substantial margins. Our code is available at https://github.com/Seonghoon-Yu/Zero-shot-RIS." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 525, + 128, + 538 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 525, + 128, + 538 + ], + "spans": [ + { + "bbox": [ + 47, + 525, + 128, + 538 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 46, + 545, + 288, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 545, + 288, + 665 + ], + "spans": [ + { + "bbox": [ + 46, + 545, + 288, + 665 + ], + "type": "text", + "content": "Recent advances of deep learning has revolutionised computer vision and natural language processing, and addressed various tasks in the field of vision-and-language [4, 19, 27, 28, 36, 43, 50]. A key element in the recent success of the multi-modal models such as CLIP [43] is the contrastive image-text pre-training on a large set of image and text pairs. It has shown a remarkable zero-shot transferability on a wide range of tasks, such as object detection [9, 10, 13], semantic segmentation [7, 12, 59, 63], image captioning [40], visual question answering (VQA) [47] and so on." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 46, + 665, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 665, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 665, + 288, + 714 + ], + "type": "text", + "content": "Despite its good transferability of pre-trained multi-modal models, it is not straightforward to handle dense prediction tasks such as object detection and image segmentation. A pixel-level dense prediction task is challenging since there" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 215, + 430, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 215, + 430, + 224 + ], + "spans": [ + { + "bbox": [ + 307, + 215, + 430, + 224 + ], + "type": "text", + "content": "(a) Referring image segmentation task" + } + ] + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 310, + 233, + 366, + 289 + ], + "blocks": [ + { + "bbox": [ + 320, + 225, + 358, + 233 + ], + "lines": [ + { + "bbox": [ + 320, + 225, + 358, + 233 + ], + "spans": [ + { + "bbox": [ + 320, + 225, + 358, + 233 + ], + "type": "text", + "content": "Input image" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 410, + 225, + 441, + 234 + ], + "lines": [ + { + "bbox": [ + 410, + 225, + 441, + 234 + ], + "spans": [ + { + "bbox": [ + 410, + 225, + 441, + 234 + ], + "type": "text", + "content": "Input text" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 310, + 233, + 366, + 289 + ], + "lines": [ + { + "bbox": [ + 310, + 233, + 366, + 289 + ], + "spans": [ + { + "bbox": [ + 310, + 233, + 366, + 289 + ], + "type": "image", + "image_path": "1566abd5ebc5a9381de29c7ad4240a71b1407982ee1c649987e6d2bd794fe85f.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 369, + 243, + 484, + 251 + ], + "lines": [ + { + "bbox": [ + 369, + 243, + 484, + 251 + ], + "spans": [ + { + "bbox": [ + 369, + 243, + 484, + 251 + ], + "type": "text", + "content": "\"a cat is lying on the seat of the scooter\"" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 403, + 258, + 449, + 266 + ], + "lines": [ + { + "bbox": [ + 403, + 258, + 449, + 266 + ], + "spans": [ + { + "bbox": [ + 403, + 258, + 449, + 266 + ], + "type": "text", + "content": "\"the bottom cat\"" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 326, + 305, + 373, + 312 + ], + "lines": [ + { + "bbox": [ + 326, + 305, + 373, + 312 + ], + "spans": [ + { + "bbox": [ + 326, + 305, + 373, + 312 + ], + "type": "text", + "content": "Local-context" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 486, + 233, + 542, + 289 + ], + "blocks": [ + { + "bbox": [ + 376, + 273, + 476, + 281 + ], + "lines": [ + { + "bbox": [ + 376, + 273, + 476, + 281 + ], + "spans": [ + { + "bbox": [ + 376, + 273, + 476, + 281 + ], + "type": "text", + "content": "\"a scooter with two cats sitting on\"" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 493, + 225, + 535, + 233 + ], + "lines": [ + { + "bbox": [ + 493, + 225, + 535, + 233 + ], + "spans": [ + { + "bbox": [ + 493, + 225, + 535, + 233 + ], + "type": "text", + "content": "Output mask" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 486, + 233, + 542, + 289 + ], + "lines": [ + { + "bbox": [ + 486, + 233, + 542, + 289 + ], + "spans": [ + { + "bbox": [ + 486, + 233, + 542, + 289 + ], + "type": "image", + "image_path": "845f9db0e94f532cc292e82afcc911a6a0642a2ee247df769d8d528e523bc19a.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 468, + 304, + 539, + 312 + ], + "lines": [ + { + "bbox": [ + 468, + 304, + 539, + 312 + ], + "spans": [ + { + "bbox": [ + 468, + 304, + 539, + 312 + ], + "type": "text", + "content": "Global-Local context" + } + ] + } + ], + "index": 36, + "angle": 0, + "type": "image_caption" + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 294, + 406, + 303 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 294, + 406, + 303 + ], + "spans": [ + { + "bbox": [ + 308, + 294, + 406, + 303 + ], + "type": "text", + "content": "(b) Global-Local context in RIS" + } + ] + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 341, + 337, + 359, + 357 + ], + "blocks": [ + { + "bbox": [ + 328, + 318, + 372, + 325 + ], + "lines": [ + { + "bbox": [ + 328, + 318, + 372, + 325 + ], + "spans": [ + { + "bbox": [ + 328, + 318, + 372, + 325 + ], + "type": "text", + "content": "a cat is lying on" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 316, + 326, + 369, + 333 + ], + "lines": [ + { + "bbox": [ + 316, + 326, + 369, + 333 + ], + "spans": [ + { + "bbox": [ + 316, + 326, + 369, + 333 + ], + "type": "text", + "content": "the seat of the scod" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 341, + 337, + 359, + 357 + ], + "lines": [ + { + "bbox": [ + 341, + 337, + 359, + 357 + ], + "spans": [ + { + "bbox": [ + 341, + 337, + 359, + 357 + ], + "type": "image", + "image_path": "a29450ea7923b69ad64fef1514291153faf7e39ea55008fce3ecdcef6ba549b8.jpg" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_body" + } + ], + "index": 29 + }, + { + "type": "image", + "bbox": [ + 317, + 365, + 340, + 391 + ], + "blocks": [ + { + "bbox": [ + 317, + 365, + 340, + 391 + ], + "lines": [ + { + "bbox": [ + 317, + 365, + 340, + 391 + ], + "spans": [ + { + "bbox": [ + 317, + 365, + 340, + 391 + ], + "type": "image", + "image_path": "178bc5b70cd7d3de40222d1dfbb2bf377146b8f9c8606ad03ed9f872450cbae3.jpg" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_body" + } + ], + "index": 30 + }, + { + "type": "image", + "bbox": [ + 364, + 365, + 380, + 389 + ], + "blocks": [ + { + "bbox": [ + 364, + 365, + 380, + 389 + ], + "lines": [ + { + "bbox": [ + 364, + 365, + 380, + 389 + ], + "spans": [ + { + "bbox": [ + 364, + 365, + 380, + 389 + ], + "type": "image", + "image_path": "1ee188e3d920ecfb3a8b26ae7b9c96712b7d816fc6ce10f5b982ef6191de3586.jpg" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_body" + } + ], + "index": 31 + }, + { + "type": "image", + "bbox": [ + 394, + 336, + 457, + 391 + ], + "blocks": [ + { + "bbox": [ + 402, + 304, + 452, + 312 + ], + "lines": [ + { + "bbox": [ + 402, + 304, + 452, + 312 + ], + "spans": [ + { + "bbox": [ + 402, + 304, + 452, + 312 + ], + "type": "text", + "content": "Global-context" + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 402, + 318, + 450, + 326 + ], + "lines": [ + { + "bbox": [ + 402, + 318, + 450, + 326 + ], + "spans": [ + { + "bbox": [ + 402, + 318, + 450, + 326 + ], + "type": "text", + "content": "a cat is lying on" + } + ] + } + ], + "index": 33, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 391, + 326, + 460, + 333 + ], + "lines": [ + { + "bbox": [ + 391, + 326, + 460, + 333 + ], + "spans": [ + { + "bbox": [ + 391, + 326, + 460, + 333 + ], + "type": "text", + "content": "the seat of the scooter" + } + ] + } + ], + "index": 34, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 394, + 336, + 457, + 391 + ], + "lines": [ + { + "bbox": [ + 394, + 336, + 457, + 391 + ], + "spans": [ + { + "bbox": [ + 394, + 336, + 457, + 391 + ], + "type": "image", + "image_path": "d399ed7c9d810758449eb86e4fbbc0063b1a527981df09b0b9af26650ba47630.jpg" + } + ] + } + ], + "index": 35, + "angle": 0, + "type": "image_body" + } + ], + "index": 35 + }, + { + "type": "image", + "bbox": [ + 473, + 337, + 533, + 392 + ], + "blocks": [ + { + "bbox": [ + 477, + 319, + 528, + 327 + ], + "lines": [ + { + "bbox": [ + 477, + 319, + 528, + 327 + ], + "spans": [ + { + "bbox": [ + 477, + 319, + 528, + 327 + ], + "type": "text", + "content": "a cat is lying on" + } + ] + } + ], + "index": 37, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 470, + 327, + 536, + 335 + ], + "lines": [ + { + "bbox": [ + 470, + 327, + 536, + 335 + ], + "spans": [ + { + "bbox": [ + 470, + 327, + 536, + 335 + ], + "type": "text", + "content": "the seat of the scooter" + } + ] + } + ], + "index": 38, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 473, + 337, + 533, + 392 + ], + "lines": [ + { + "bbox": [ + 473, + 337, + 533, + 392 + ], + "spans": [ + { + "bbox": [ + 473, + 337, + 533, + 392 + ], + "type": "image", + "image_path": "23985f799d7872410bfd8292adda80207ee84a87eda2daa6213395e3b0dd9f7b.jpg" + } + ] + } + ], + "index": 39, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 408, + 547, + 453 + ], + "lines": [ + { + "bbox": [ + 304, + 408, + 547, + 453 + ], + "spans": [ + { + "bbox": [ + 304, + 408, + 547, + 453 + ], + "type": "text", + "content": "Figure 1. Illustrations of the task of referring image segmentation and motivations of global-local context features. To find the grounded mask given an expression, we need to understand the relations between the objects as well as their semantics." + } + ] + } + ], + "index": 40, + "angle": 0, + "type": "image_caption" + } + ], + "index": 39 + }, + { + "bbox": [ + 304, + 474, + 547, + 557 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 474, + 547, + 557 + ], + "spans": [ + { + "bbox": [ + 304, + 474, + 547, + 557 + ], + "type": "text", + "content": "is a substantial gap between the image-level contrastive pretraining task and the pixel-level downstream task such as semantic segmentation. There have been several attempts to reduce gap between two tasks [44, 54, 63], but these works aim to fine-tune the model consequently requiring task-specific dense annotations, which is notoriously labor-intensive and costly." + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 304, + 558, + 548, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 558, + 548, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 558, + 548, + 714 + ], + "type": "text", + "content": "Referring image segmentation is a task to find the specific region in an image given a natural language text describing the region, and it is well-known as one of challenging vision- and language tasks. Collecting annotations for this task is even more challenging as the task requires to collect precise referring expression of the target region as well as its dense mask annotation. Recently, a weakly-supervised referring image segmentation method [48] is proposed to overcome this issue. However, it still requires high-level text expression annotations pairing with images for the target datasets and the performance of the method is far from that of the supervised methods. To tackle this issue, in this paper, we focus on zero-shot transferring from the pre-trained knowledge" + } + ] + } + ], + "index": 42 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "19456" + } + ] + } + ], + "index": 43 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 258, + 83 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 258, + 83 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 258, + 83 + ], + "type": "text", + "content": "of CLIP to the task of referring image segmentation." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 84, + 288, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 84, + 288, + 300 + ], + "spans": [ + { + "bbox": [ + 46, + 84, + 288, + 300 + ], + "type": "text", + "content": "Moreover, this task is challenging because it requires high-level understanding of language and comprehensive understanding of an image, as well as a dense instance-level prediction. There have been several works for zero-shot semantic segmentation [7, 12, 59, 63], but they cannot be directly extended to the zero-shot referring image segmentation task because it has different characteristics. Specifically, the semantic segmentation task does not need to distinguish instances, but the referring image segmentation task should be able to predict an instance-level segmentation mask. In addition, among multiple instances of the same class, only one instance described by the expression must be selected. For example, in Figure 1, there are two cats in the input image. If the input text is given by \"a cat is lying on the seat of the scooter\", the cat with the green mask is the proper output. To find this correct mask, we need to understand the relation between the objects (i.e. \"lying on the seat\") as well as their semantics (i.e. \"cat\", \"scooter\")." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 300, + 288, + 503 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 300, + 288, + 503 + ], + "spans": [ + { + "bbox": [ + 46, + 300, + 288, + 503 + ], + "type": "text", + "content": "In this paper, we propose a new baseline of zero-shot referring image segmentation task using a pre-trained model from CLIP, where global and local contexts of an image and an expression are handled in a consistent way. In order to localize an object mask region in an image given a textual referring expression, we propose a mask-guided visual encoder that captures global and local context information of an image given a mask. We also present a global-local textual encoder where the local-context is captured by a target noun phrase and the global context is captured by a whole sentence of the expressions. By combining features in two different context levels, our method is able to understand a comprehensive knowledge as well as a specific trait of the target object. Note that, although our method does not require any additional training on CLIP model, it outperforms all baselines and the weakly supervised referring image segmentation method with a big margin." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 59, + 505, + 280, + 516 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 505, + 280, + 516 + ], + "spans": [ + { + "bbox": [ + 59, + 505, + 280, + 516 + ], + "type": "text", + "content": "Our main contributions can be summarised as follows:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 59, + 525, + 287, + 712 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 59, + 525, + 287, + 585 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 525, + 287, + 585 + ], + "spans": [ + { + "bbox": [ + 59, + 525, + 287, + 585 + ], + "type": "text", + "content": "- We propose a new task of zero-shot referring image segmentation based on CLIP without any additional training. To the best of our knowledge, this is the first work to study the zero-shot referring image segmentation task." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 59, + 595, + 287, + 655 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 595, + 287, + 655 + ], + "spans": [ + { + "bbox": [ + 59, + 595, + 287, + 655 + ], + "type": "text", + "content": "- We present a visual encoder and a textual encoder that integrates global and local contexts of images and sentences, respectively. Although the modalities of two encoders are different, our visual and textual features are dealt in a consistent way." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 59, + 665, + 287, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 665, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 59, + 665, + 287, + 712 + ], + "type": "text", + "content": "- The proposed global-local context features take full advantage of CLIP to capture the target object semantics as well as the relations between the objects in both visual and textual modalities." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 317, + 72, + 545, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 72, + 545, + 120 + ], + "spans": [ + { + "bbox": [ + 317, + 72, + 545, + 120 + ], + "type": "text", + "content": "- Our method consistently shows outstanding results compared to several baseline methods, and also outperforms the weakly supervised referring image segmentation method with substantial margins." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 133, + 392, + 146 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 133, + 392, + 146 + ], + "spans": [ + { + "bbox": [ + 306, + 133, + 392, + 146 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 155, + 547, + 406 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 155, + 547, + 406 + ], + "spans": [ + { + "bbox": [ + 304, + 155, + 547, + 406 + ], + "type": "text", + "content": "Zero-shot Transfer. Classical zero-shot learning aims to predict unseen classes that have not seen before by transferring the knowledge trained on the seen classes. Early works [3, 14, 34] leverage the pre-trained word embedding [5, 39] of class names or attributes and perform zero-shot prediction via mapping between visual representations of images and this word embedding. Recently, CLIP [43] and ALIGNN [19] shed a new light on the zero-shot learning via large-scale image-text pre-training. They show the successive results on various downstream tasks via zero-shot knowledge transfer, such as image captioning [40], video action localization [51], image-text retrieval [1] and so on. Contrary to classical zero-shot learning, zero-shot transfer has an advantage of avoiding fine-tuning the pre-trained model on the task-specific dataset, where collecting datasets is time-consuming. There have been several works that apply CLIP encoders directly with tiny architectural modification without additional training for semantic segmentation [63], referring expression comprehension [49], phrase localization [25] and object localization [17]. Our work is also lying on the line of this research field." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 426, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 426, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 426, + 547, + 713 + ], + "type": "text", + "content": "Zero-shot Dense Prediction Tasks. Very recently, with the success of pre-training models using large-scale image-text pairs, there have been several attempts to deal with dense prediction tasks with CLIP, e.g. object detection [9, 10, 13, 24, 30, 45], semantic segmentation [22, 29, 37, 42, 44, 58, 59, 63, 64] and so on. These dense prediction tasks, however, are challenging since CLIP learns image-level features not pixel-level fine-grained features. In order to handle this issue, ViLD [13] introduces a method which crop the image to contain only the bounding box region, and then extract the visual features of cropped regions using CLIP to classify the unseen objects. This approach is applied in a wide range of dense prediction tasks which are demanded the zero-shot transfer ability of CLIP [7, 9, 10, 12, 49, 59]. While this method only considers the cropped area, there are several methods [25, 63] to consider the global context in the image, not only just the cropped region. Adapting CLIP [25] proposed the phrase localization method by modifying CLIP to generate high-resolution spatial feature maps using superpixels. MaskCLIP [63] modifies the image encoder of CLIP by transforming the value embedding layer and the last linear layer into two " + }, + { + "bbox": [ + 304, + 426, + 547, + 713 + ], + "type": "inline_equation", + "content": "1 \\times 1" + }, + { + "bbox": [ + 304, + 426, + 547, + 713 + ], + "type": "text", + "content": " convolutional layers to handle pixel-level predictions. In this work, we focus on extracting both global and local context visual features with CLIP." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "19457" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 49, + 68, + 547, + 228 + ], + "blocks": [ + { + "bbox": [ + 49, + 68, + 547, + 228 + ], + "lines": [ + { + "bbox": [ + 49, + 68, + 547, + 228 + ], + "spans": [ + { + "bbox": [ + 49, + 68, + 547, + 228 + ], + "type": "image", + "image_path": "616e1a9316f9da7ca1627269ffaee1c21ebbda64d8d1632898fb35145bf1b376.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 236, + 546, + 269 + ], + "lines": [ + { + "bbox": [ + 46, + 236, + 546, + 269 + ], + "spans": [ + { + "bbox": [ + 46, + 236, + 546, + 269 + ], + "type": "text", + "content": "Figure 2. Overall framework of our global-local CLIP. Given an image and an expression as inputs, we extract global-local context visual features using mask proposals, and also we extract a global-local context textual feature. After computing the cosine similarity scores between all global-local context visual features and a global-local context textual feature, we choose the mask with the highest score." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 290, + 289, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 290, + 289, + 625 + ], + "spans": [ + { + "bbox": [ + 47, + 290, + 289, + 625 + ], + "type": "text", + "content": "Referring Image Segmentation. Referring image segmentation aims to segment a target object in an image given a natural linguistic expression introduced by [18]. There have been several fully-supervised methods for this task, where images and expressions are used as an input, and the target mask is given for training [2, 20, 33, 55, 60, 62]. Most of works [6, 11, 23, 60, 61] focuses on how to fuse those two features in different modalities extracted from independent encoders. Early works [26, 32] extract multi-modal features by simply concatenating visual and textual features and feed them into the segmentation networks [35] to predict dense segmentation masks. There have been two branches of works fusing cross-modal features; an attention based encoder fusion [11, 57, 60] and a cross-modal decoder fusion based on a Transformer decoder [6, 54, 61]. Recently, a CLIP-based approach, which learns separated image and text transformer using a contrastive pre-training, has been proposed [54]. Those fully supervised referring image segmentation methods show good performances in general, but they require dense annotations for target masks and comprehensive expressions describing the target object. To address this problem, TSEG [48] proposed a weakly-supervised referring image segmentation method which learns the segmentation model using text-based image-level supervisions. However, this method still requires high-level referring expression annotations with images for specific datasets. Therefore, we propose a new baseline for zero-shot referring image segmentation without any training or supervisions." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 643, + 102, + 655 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 643, + 102, + 655 + ], + "spans": [ + { + "bbox": [ + 47, + 643, + 102, + 655 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 666, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 666, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 666, + 288, + 713 + ], + "type": "text", + "content": "In this section, we present the proposed method for zero-shot referring image segmentation in detail. We first show an overall framework of the proposed method (3.1), and then discuss the detailed methods for extracting visual features" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 305, + 289, + 545, + 313 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 289, + 545, + 313 + ], + "spans": [ + { + "bbox": [ + 305, + 289, + 545, + 313 + ], + "type": "text", + "content": "(3.2) and textual features (3.3) to encode global and local contextual information." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 306, + 323, + 422, + 335 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 323, + 422, + 335 + ], + "spans": [ + { + "bbox": [ + 306, + 323, + 422, + 335 + ], + "type": "text", + "content": "3.1. Overall Framework" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 342, + 547, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 342, + 547, + 413 + ], + "spans": [ + { + "bbox": [ + 304, + 342, + 547, + 413 + ], + "type": "text", + "content": "To solve the task of referring image segmentation, which aims to predict the target region grounded to the text description, it is essential to learn image and text representations in a shared embedding space. To this end, we adopt CLIP to leverage the pre-trained cross-modal features for images and natural language." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 415, + 548, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 415, + 548, + 688 + ], + "spans": [ + { + "bbox": [ + 304, + 415, + 548, + 688 + ], + "type": "text", + "content": "Our framework consists of two parts as shown in Fig 2: (1) global-local visual encoder for visual representation, and (2) global-local natural language encoder for referring expression representation. Given a set of mask proposals generated by an unsupervised mask generator [52, 53], we first extract two visual features in global-context and local-context levels for each mask proposal, and then combine them into a single visual feature. Our global-context visual features can comprehensively represent the masked area as well as the surrounding region, while the local-context visual features can capture the representation of the specific masked region. This acts key roles in the referring image segmentation task because we need to focus a small specific target region using a comprehensive expression of the target. At the same time, given a sentence of expressing the target, our textual representation is extracted by the CLIP text encoder. In order to understand a holistic expression of the target as well as to focus on the target object itself, we first extract a key noun phrase from a sentence using a dependency parsing provided by spaCy [16], and then combine a global sentence feature and a local target noun phrase feature. Note that, our visual and text encoders are designed to handle both global-context and local-context information in a consistent way." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 689, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 689, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 689, + 547, + 713 + ], + "type": "text", + "content": "Since our method is built on CLIP where the visual and textual features are embedded in the common embedding" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "19458" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 65, + 70, + 161, + 337 + ], + "blocks": [ + { + "bbox": [ + 65, + 70, + 161, + 337 + ], + "lines": [ + { + "bbox": [ + 65, + 70, + 161, + 337 + ], + "spans": [ + { + "bbox": [ + 65, + 70, + 161, + 337 + ], + "type": "image", + "image_path": "8a1d0440dc97753c1d5e4d1c3617b6fc3c94d70f956ae0dae518057008c5d6aa.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 345, + 289, + 378 + ], + "lines": [ + { + "bbox": [ + 46, + 345, + 289, + 378 + ], + "spans": [ + { + "bbox": [ + 46, + 345, + 289, + 378 + ], + "type": "text", + "content": "Figure 3. Detailed illustration of our mask-guided global-context visual encoders in ResNet and ViT architectures: (a) Masked attention pooling in ResNet, (b) Token masking in ViT." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 168, + 70, + 276, + 337 + ], + "blocks": [ + { + "bbox": [ + 168, + 70, + 276, + 337 + ], + "lines": [ + { + "bbox": [ + 168, + 70, + 276, + 337 + ], + "spans": [ + { + "bbox": [ + 168, + 70, + 276, + 337 + ], + "type": "image", + "image_path": "41270556ea24f805230e9b1fdf4d566cd914203d9d021aeb35626cfe26ce26af.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 399, + 288, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 399, + 288, + 460 + ], + "spans": [ + { + "bbox": [ + 46, + 399, + 288, + 460 + ], + "type": "text", + "content": "space, we can formulate the objective of our zero-shot image referring segmentation task as follows. Given inputs of an image " + }, + { + "bbox": [ + 46, + 399, + 288, + 460 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 46, + 399, + 288, + 460 + ], + "type": "text", + "content": " and a referring expression " + }, + { + "bbox": [ + 46, + 399, + 288, + 460 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 46, + 399, + 288, + 460 + ], + "type": "text", + "content": ", our method finds the mask that has the maximum similarity between its visual feature and the given textual feature among all mask proposals:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 480, + 288, + 499 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 480, + 288, + 499 + ], + "spans": [ + { + "bbox": [ + 107, + 480, + 288, + 499 + ], + "type": "interline_equation", + "content": "\\hat {m} = \\arg \\max _ {m \\in M (I)} \\operatorname {s i m} (\\mathbf {t}, \\mathbf {f} _ {m}), \\tag {1}", + "image_path": "8dcacc4181c76a30edc20b87b03652937d5e985a2f3ee4e0442f0e7880096963.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 508, + 288, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 508, + 288, + 556 + ], + "spans": [ + { + "bbox": [ + 46, + 508, + 288, + 556 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 508, + 288, + 556 + ], + "type": "inline_equation", + "content": "\\mathrm{sim}(\\cdot, \\cdot)" + }, + { + "bbox": [ + 46, + 508, + 288, + 556 + ], + "type": "text", + "content": " is a cosine similarity, " + }, + { + "bbox": [ + 46, + 508, + 288, + 556 + ], + "type": "inline_equation", + "content": "\\mathbf{t}" + }, + { + "bbox": [ + 46, + 508, + 288, + 556 + ], + "type": "text", + "content": " is the proposed global-local textual feature for a referring expression " + }, + { + "bbox": [ + 46, + 508, + 288, + 556 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 46, + 508, + 288, + 556 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 508, + 288, + 556 + ], + "type": "inline_equation", + "content": "\\mathbf{f}" + }, + { + "bbox": [ + 46, + 508, + 288, + 556 + ], + "type": "text", + "content": " is the mask-guided global-local visual feature, and " + }, + { + "bbox": [ + 46, + 508, + 288, + 556 + ], + "type": "inline_equation", + "content": "M(I)" + }, + { + "bbox": [ + 46, + 508, + 288, + 556 + ], + "type": "text", + "content": " is a mask proposal set for a given image " + }, + { + "bbox": [ + 46, + 508, + 288, + 556 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 46, + 508, + 288, + 556 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 563, + 266, + 576 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 563, + 266, + 576 + ], + "spans": [ + { + "bbox": [ + 47, + 563, + 266, + 576 + ], + "type": "text", + "content": "3.2. Mask-guided Global-local Visual Features" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 582, + 288, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 582, + 288, + 652 + ], + "spans": [ + { + "bbox": [ + 46, + 582, + 288, + 652 + ], + "type": "text", + "content": "To segment the target region related to the referring expression, it is essential to understand a global relationship between multiple objects in the image as well as local semantic information of the target. In this section, we demonstrate how to extract global and local-context features using CLIP, and how to fuse them." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 653, + 289, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 653, + 289, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 653, + 289, + 714 + ], + "type": "text", + "content": "Since CLIP is designed to learn image-level representation, it is not well-suited for a pixel-level dense prediction such as an image segmentation. To overcome the limitation of using CLIP, we decompose the task into two sub-tasks: mask proposal generation and masked image-text matching." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 72, + 547, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 547, + 133 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 547, + 133 + ], + "type": "text", + "content": "In order to generate mask proposals, we use the off-the-shelf mask extractor [53] which is the unsupervised instance-level mask generation model. By using mask proposals explicitly, our method can handle fine-detailed instance-level segmentation masks with CLIP." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 146, + 547, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 146, + 547, + 253 + ], + "spans": [ + { + "bbox": [ + 304, + 146, + 547, + 253 + ], + "type": "text", + "content": "Global-context Visual Features. For each mask proposals, we first extract global-context visual features using the CLIP pre-trained model. The original visual features from CLIP, however, is designed to generate one single feature vector to describe the whole image. To tackle this issue, we modify a visual encoder from CLIP to extract features that contain information from not only the masked region but also surrounding regions to understand relationships between multiple objects." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 254, + 547, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 254, + 547, + 338 + ], + "spans": [ + { + "bbox": [ + 304, + 254, + 547, + 338 + ], + "type": "text", + "content": "In this paper, we use two different architectures for the visual encoder as in CLIP: ResNet [15] and Vision Transformer (ViT) [8]. For the visual encoder with the ResNet architecture, we denote a visual feature extractor without a pooling layer as " + }, + { + "bbox": [ + 304, + 254, + 547, + 338 + ], + "type": "inline_equation", + "content": "\\phi_{\\mathrm{f}}" + }, + { + "bbox": [ + 304, + 254, + 547, + 338 + ], + "type": "text", + "content": " and its attention pooling layer as " + }, + { + "bbox": [ + 304, + 254, + 547, + 338 + ], + "type": "inline_equation", + "content": "\\phi_{\\mathrm{att}}" + }, + { + "bbox": [ + 304, + 254, + 547, + 338 + ], + "type": "text", + "content": ". Then the visual feature, " + }, + { + "bbox": [ + 304, + 254, + 547, + 338 + ], + "type": "inline_equation", + "content": "\\mathbf{f}" + }, + { + "bbox": [ + 304, + 254, + 547, + 338 + ], + "type": "text", + "content": ", using the visual encoder of CLIP, " + }, + { + "bbox": [ + 304, + 254, + 547, + 338 + ], + "type": "inline_equation", + "content": "\\phi_{\\mathrm{CLIP}}" + }, + { + "bbox": [ + 304, + 254, + 547, + 338 + ], + "type": "text", + "content": ", can be expressed as follows:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 367, + 344, + 545, + 357 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 367, + 344, + 545, + 357 + ], + "spans": [ + { + "bbox": [ + 367, + 344, + 545, + 357 + ], + "type": "interline_equation", + "content": "\\mathbf {f} = \\phi_ {\\mathrm {C L I P}} (I) = \\phi_ {\\mathrm {a t t}} \\left(\\phi_ {\\mathrm {f}} (I)\\right), \\tag {2}", + "image_path": "094164c2f4447b2d0357b984fc9e17e0be00791863e8ad352d9ceaaf9a82dcdf.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 363, + 545, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 363, + 545, + 422 + ], + "spans": [ + { + "bbox": [ + 304, + 363, + 545, + 422 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 363, + 545, + 422 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 304, + 363, + 545, + 422 + ], + "type": "text", + "content": " is a given image. Similarly, since ViT has multiple multi-head attention layers, we divide this visual encoder into two parts: last " + }, + { + "bbox": [ + 304, + 363, + 545, + 422 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 304, + 363, + 545, + 422 + ], + "type": "text", + "content": " layers and the rest. We denote the former one by " + }, + { + "bbox": [ + 304, + 363, + 545, + 422 + ], + "type": "inline_equation", + "content": "\\phi_{\\mathrm{att}}" + }, + { + "bbox": [ + 304, + 363, + 545, + 422 + ], + "type": "text", + "content": ", and the later one by " + }, + { + "bbox": [ + 304, + 363, + 545, + 422 + ], + "type": "inline_equation", + "content": "\\phi_{\\mathrm{f}}" + }, + { + "bbox": [ + 304, + 363, + 545, + 422 + ], + "type": "text", + "content": " for ViT architectures based on CLIP." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 423, + 545, + 447 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 423, + 545, + 447 + ], + "spans": [ + { + "bbox": [ + 304, + 423, + 545, + 447 + ], + "type": "text", + "content": "Then given an image " + }, + { + "bbox": [ + 304, + 423, + 545, + 447 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 304, + 423, + 545, + 447 + ], + "type": "text", + "content": " and a mask " + }, + { + "bbox": [ + 304, + 423, + 545, + 447 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 304, + 423, + 545, + 447 + ], + "type": "text", + "content": ", our global-context visual feature is defined as follows:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 376, + 453, + 545, + 468 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 376, + 453, + 545, + 468 + ], + "spans": [ + { + "bbox": [ + 376, + 453, + 545, + 468 + ], + "type": "interline_equation", + "content": "\\mathbf {f} _ {m} ^ {G} = \\phi_ {\\text {a t t}} \\left(\\phi_ {f} (I) \\odot \\bar {m}\\right), \\tag {3}", + "image_path": "74e6c03f73bd222ed561246edf03935f996d6d548333fe5bb484376799b4d7f9.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 474, + 545, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 474, + 545, + 521 + ], + "spans": [ + { + "bbox": [ + 304, + 474, + 545, + 521 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 474, + 545, + 521 + ], + "type": "inline_equation", + "content": "\\bar{m}" + }, + { + "bbox": [ + 304, + 474, + 545, + 521 + ], + "type": "text", + "content": " is the resized mask scaled to the size of the feature map, and " + }, + { + "bbox": [ + 304, + 474, + 545, + 521 + ], + "type": "inline_equation", + "content": "\\odot" + }, + { + "bbox": [ + 304, + 474, + 545, + 521 + ], + "type": "text", + "content": " is a Hadamard product operation. We illustrate more details of this masking strategy for each architecture of CLIP in Section 4.1 and Figure 3." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 521, + 545, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 521, + 545, + 604 + ], + "spans": [ + { + "bbox": [ + 304, + 521, + 545, + 604 + ], + "type": "text", + "content": "We refer to it as the global context visual feature, because the entire image is passed through the encoder and the feature map at the last layer contains the holistic information about the image. Although we use mask proposals to obtain the features only on masked regions on the feature map, these features already have comprehensive information about the scene." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 619, + 547, + 692 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 619, + 547, + 692 + ], + "spans": [ + { + "bbox": [ + 304, + 619, + 547, + 692 + ], + "type": "text", + "content": "Local-context Visual Features. To obtain local-context visual features given a mask proposal, we first mask the image and then crop the image to obtain a new image surrounding only an area of the mask proposal. After cropping and masking the image, it is passed to the visual encoder of CLIP to extract our local-context visual feature " + }, + { + "bbox": [ + 304, + 619, + 547, + 692 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_m^L" + }, + { + "bbox": [ + 304, + 619, + 547, + 692 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 369, + 696, + 545, + 711 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 369, + 696, + 545, + 711 + ], + "spans": [ + { + "bbox": [ + 369, + 696, + 545, + 711 + ], + "type": "interline_equation", + "content": "\\mathbf {f} _ {m} ^ {L} = \\phi_ {\\mathrm {C L I P}} \\left(\\mathcal {T} _ {\\text {c r o p}} (I \\odot m)\\right), \\tag {4}", + "image_path": "bd1467b02da48a62aae382422619248706d424b1fea6711487af691b4275d19c.jpg" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "19459" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 288, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 288, + 133 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 288, + 133 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 72, + 288, + 133 + ], + "type": "inline_equation", + "content": "\\mathcal{T}_{crop}(\\cdot)" + }, + { + "bbox": [ + 46, + 72, + 288, + 133 + ], + "type": "text", + "content": " denotes a cropping operation. This approach is commonly used in zero-shot semantic segmentation methods [7,59]. Since this feature focuses on the masked region in the image where irrelevant regions are removed, it concentrates only on the target object itself." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 146, + 287, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 146, + 287, + 205 + ], + "spans": [ + { + "bbox": [ + 46, + 146, + 287, + 205 + ], + "type": "text", + "content": "Global-local Context Visual features. We aggregate global- and local-context features over masked regions to obtain one single visual feature that describe a representation of masked regions of the image. The global-local context visual feature is computed as follows:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 113, + 210, + 287, + 225 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 210, + 287, + 225 + ], + "spans": [ + { + "bbox": [ + 113, + 210, + 287, + 225 + ], + "type": "interline_equation", + "content": "\\mathbf {f} _ {m} = \\alpha \\mathbf {f} _ {m} ^ {G} + (1 - \\alpha) \\mathbf {f} _ {m} ^ {L}, \\tag {5}", + "image_path": "6d33f24e7471ea6921fd24e66a1bf42ad6b4fa37e270a8065825b4618bfc5df9.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 231, + 288, + 314 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 231, + 288, + 314 + ], + "spans": [ + { + "bbox": [ + 46, + 231, + 288, + 314 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 231, + 288, + 314 + ], + "type": "inline_equation", + "content": "\\alpha \\in [0,1]" + }, + { + "bbox": [ + 46, + 231, + 288, + 314 + ], + "type": "text", + "content": " is a constant parameter, " + }, + { + "bbox": [ + 46, + 231, + 288, + 314 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 46, + 231, + 288, + 314 + ], + "type": "text", + "content": " is a mask proposal, " + }, + { + "bbox": [ + 46, + 231, + 288, + 314 + ], + "type": "inline_equation", + "content": "\\mathbf{f}^G" + }, + { + "bbox": [ + 46, + 231, + 288, + 314 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 231, + 288, + 314 + ], + "type": "inline_equation", + "content": "\\mathbf{f}^L" + }, + { + "bbox": [ + 46, + 231, + 288, + 314 + ], + "type": "text", + "content": " are global-context and local-context visual features in Eq. (3) and Eq. (4), respectively. As in Eq. (1), the score for each mask proposal is then obtained by computing similarity between our global-local context visual features and the textual feature of the expression described in the next section." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 320, + 208, + 332 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 320, + 208, + 332 + ], + "spans": [ + { + "bbox": [ + 47, + 320, + 208, + 332 + ], + "type": "text", + "content": "3.3. Global-local Textual Features" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 339, + 287, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 339, + 287, + 399 + ], + "spans": [ + { + "bbox": [ + 46, + 339, + 287, + 399 + ], + "type": "text", + "content": "Similar to the visual features, it is important to understand a holistic meaning as well as the target object noun in given expressions. Given a referring expression " + }, + { + "bbox": [ + 46, + 339, + 287, + 399 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 46, + 339, + 287, + 399 + ], + "type": "text", + "content": ", we extract a global sentence feature, " + }, + { + "bbox": [ + 46, + 339, + 287, + 399 + ], + "type": "inline_equation", + "content": "\\mathbf{t}^G" + }, + { + "bbox": [ + 46, + 339, + 287, + 399 + ], + "type": "text", + "content": ", using the pre-trained CLIP text encoder, " + }, + { + "bbox": [ + 46, + 339, + 287, + 399 + ], + "type": "inline_equation", + "content": "\\psi_{\\mathrm{CLIP}}" + }, + { + "bbox": [ + 46, + 339, + 287, + 399 + ], + "type": "text", + "content": ", as follows:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 403, + 287, + 418 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 403, + 287, + 418 + ], + "spans": [ + { + "bbox": [ + 132, + 403, + 287, + 418 + ], + "type": "interline_equation", + "content": "\\mathbf {t} ^ {G} = \\psi_ {\\mathrm {C L I P}} (T). \\tag {6}", + "image_path": "e73919db40dcf3aec4ad39183c45c0f9b4f543dfb485529e08b46bbd6c11b807.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 423, + 287, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 423, + 287, + 495 + ], + "spans": [ + { + "bbox": [ + 46, + 423, + 287, + 495 + ], + "type": "text", + "content": "Although the CLIP text encoder can extract the textual representation aligning with the image-level representation, it is hard to focus on the target noun in the expression because the expression of this task is formed as a complex sentence containing multiple clauses, e.g. \"a dark brown leather sofa behind a foot stool that has a laptop computer on it\"." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 495, + 287, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 495, + 287, + 590 + ], + "spans": [ + { + "bbox": [ + 46, + 495, + 287, + 590 + ], + "type": "text", + "content": "To address this problem, we exploit a dependency parsing using spaCy [16] to find the target noun phrase, " + }, + { + "bbox": [ + 46, + 495, + 287, + 590 + ], + "type": "inline_equation", + "content": "\\mathrm{NP}(T)" + }, + { + "bbox": [ + 46, + 495, + 287, + 590 + ], + "type": "text", + "content": ", given the text expression " + }, + { + "bbox": [ + 46, + 495, + 287, + 590 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 46, + 495, + 287, + 590 + ], + "type": "text", + "content": ". To find the target noun phrase, we first find all noun phrases in the expression, and then select the target noun phrase that contains the root noun of the sentence. After identifying the target noun phrase in the input sentence, we extract the local-context textual feature from the CLIP textual encoder:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 123, + 596, + 287, + 610 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 596, + 287, + 610 + ], + "spans": [ + { + "bbox": [ + 123, + 596, + 287, + 610 + ], + "type": "interline_equation", + "content": "\\mathbf {t} ^ {L} = \\psi_ {\\mathrm {C L I P}} (\\mathrm {N P} (T)). \\tag {7}", + "image_path": "4ed180c42b6990899b032baf3a88fbd9c46069bd63d95cc0fbaea0c564e023ce.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 616, + 287, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 616, + 287, + 652 + ], + "spans": [ + { + "bbox": [ + 46, + 616, + 287, + 652 + ], + "type": "text", + "content": "Finally, our global-local context textual feature is computed by a weighted sum of the global and local textual features described in Eq. (6) and Eq. (7) as follows:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 116, + 656, + 287, + 671 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 656, + 287, + 671 + ], + "spans": [ + { + "bbox": [ + 116, + 656, + 287, + 671 + ], + "type": "interline_equation", + "content": "\\mathbf {t} = \\beta \\mathbf {t} ^ {G} + (1 - \\beta) \\mathbf {t} ^ {L}, \\tag {8}", + "image_path": "835ff8e4dcf6e97e174b28018a1f270c5dade339ed283aac8133192c45665b17.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 46, + 677, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 287, + 714 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 677, + 287, + 714 + ], + "type": "inline_equation", + "content": "\\beta \\in [0,1]" + }, + { + "bbox": [ + 46, + 677, + 287, + 714 + ], + "type": "text", + "content": " is a constant parameter, " + }, + { + "bbox": [ + 46, + 677, + 287, + 714 + ], + "type": "inline_equation", + "content": "\\mathbf{t}^G" + }, + { + "bbox": [ + 46, + 677, + 287, + 714 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 677, + 287, + 714 + ], + "type": "inline_equation", + "content": "\\mathbf{t}^L" + }, + { + "bbox": [ + 46, + 677, + 287, + 714 + ], + "type": "text", + "content": " are global sentence and local noun-phrase textual features, respectively." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 72, + 441, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 72, + 441, + 85 + ], + "spans": [ + { + "bbox": [ + 306, + 72, + 441, + 85 + ], + "type": "text", + "content": "4. Implementation Details" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 91, + 547, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 91, + 547, + 163 + ], + "spans": [ + { + "bbox": [ + 304, + 91, + 547, + 163 + ], + "type": "text", + "content": "We use unsupervised instance segmentation methods, FreeSOLO [53], to obtain mask proposals, and the shorter size of an input image is set to 800. For CLIP, the size of an image is set to " + }, + { + "bbox": [ + 304, + 91, + 547, + 163 + ], + "type": "inline_equation", + "content": "224 \\times 224" + }, + { + "bbox": [ + 304, + 91, + 547, + 163 + ], + "type": "text", + "content": ". The number of masking layers, " + }, + { + "bbox": [ + 304, + 91, + 547, + 163 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 304, + 91, + 547, + 163 + ], + "type": "text", + "content": " in ViT is set to 3. We set " + }, + { + "bbox": [ + 304, + 91, + 547, + 163 + ], + "type": "inline_equation", + "content": "\\alpha = 0.85" + }, + { + "bbox": [ + 304, + 91, + 547, + 163 + ], + "type": "text", + "content": " for RefCOCOg, 0.95 for RefCOCO and RefCOCO+, and " + }, + { + "bbox": [ + 304, + 91, + 547, + 163 + ], + "type": "inline_equation", + "content": "\\beta = 0.5" + }, + { + "bbox": [ + 304, + 91, + 547, + 163 + ], + "type": "text", + "content": " for all datasets." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 305, + 171, + 528, + 184 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 171, + 528, + 184 + ], + "spans": [ + { + "bbox": [ + 305, + 171, + 528, + 184 + ], + "type": "text", + "content": "4.1. Masking in Global-context Visual Encoder" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 190, + 547, + 237 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 190, + 547, + 237 + ], + "spans": [ + { + "bbox": [ + 304, + 190, + 547, + 237 + ], + "type": "text", + "content": "We use both ResNet-50 and ViT-B/32 architectures for the CLIP visual encoder. Masking strategies of the global-context visual encoder for these two architecture are mostly similar but have small differences, described next." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 253, + 547, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 253, + 547, + 421 + ], + "spans": [ + { + "bbox": [ + 304, + 253, + 547, + 421 + ], + "type": "text", + "content": "Masked Attention Pooling in ResNet [15]. In a ResNet-based visual encoder of the original CLIP, a global average pooling layer is replaced by an attention pooling layer. This attention pooling layer has the same architecture as the multi-head attention in a Transformer. A query of the attention pooling layer is computed by a global average pooling operation onto the feature maps extracted by the ResNet backbone. A key and a value of the attention pooling layer is given by a flattened feature map. In our masked attention pooling, we mask the feature map using a given mask before computing query, key and value. After masking feature maps, we compute query, key and value, and then they are fed into the multi-head attention layer. The detailed illustration of our masked attention pooling in ResNet is shown in Figure 3a." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 436, + 547, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 436, + 547, + 628 + ], + "spans": [ + { + "bbox": [ + 304, + 436, + 547, + 628 + ], + "type": "text", + "content": "Token Masking in ViT [8]. Following ViT, we divide an image into grid patches, and embed patches to a linear layer with positional embeddings to get tokens, and then process those tokens with a series of Transformer layer. To capture global-context of images, we mask tokens in only the last " + }, + { + "bbox": [ + 304, + 436, + 547, + 628 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 304, + 436, + 547, + 628 + ], + "type": "text", + "content": " Transformer layers. The tokens are reshaped and masked by a given mask proposal, and then flattened and applied to the subsequent Transformer layer. As ViT has a class token (CLS), we use the final output feature from this CLS token as our global-context visual representation. The detailed method of our token masking in ViT is also shown in Figure 3b. In our experiments, we use ViT-B/32 architecture for the backbone of our ViT-based visual encoder, and we apply a token masking to the last 3 layers in the visual encoder. We show the performances with respect to the location of token masking layers in the supplementary materials." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 306, + 639, + 388, + 653 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 639, + 388, + 653 + ], + "spans": [ + { + "bbox": [ + 306, + 639, + 388, + 653 + ], + "type": "text", + "content": "5. Experiments" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 306, + 658, + 427, + 670 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 658, + 427, + 670 + ], + "spans": [ + { + "bbox": [ + 306, + 658, + 427, + 670 + ], + "type": "text", + "content": "5.1. Datasets and Metrics" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 677, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 547, + 713 + ], + "type": "text", + "content": "We evaluate our method on RefCOCO [41], RefCOCO+ [41] and RefCOCOg [21, 38], where the images and masks in MS-COCO [31] dataset are used to annotate" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "19460" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 59, + 123, + 535, + 350 + ], + "blocks": [ + { + "bbox": [ + 46, + 70, + 547, + 114 + ], + "lines": [ + { + "bbox": [ + 46, + 70, + 547, + 114 + ], + "spans": [ + { + "bbox": [ + 46, + 70, + 547, + 114 + ], + "type": "text", + "content": "Table 1. Comparison with Zero-shot RIS baseline methods on three standard benchmark datasets. U: The UMD partition. G: The Google partition. All baseline methods use FreeSOLO as the mask proposal network. † denotes that the model is initialized with the ImageNet pre-trained weights and trained on RIS datasets. FreeSOLO upper-bound is computed between the GT mask and the maximum overlapped FreeSOLO mask with the GT mask." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 59, + 123, + 535, + 350 + ], + "lines": [ + { + "bbox": [ + 59, + 123, + 535, + 350 + ], + "spans": [ + { + "bbox": [ + 59, + 123, + 535, + 350 + ], + "type": "table", + "html": "
MetricMethodsVisual EncoderRefCOCORefCOCO+RefCOCOg
valtest Atest Bvaltest Atest Bval(U)test(U)val(G)
oIoUSupervised SoTA method [60]72.7375.8268.7962.1468.3855.1061.2462.0960.50
Zero-Shot Baselines
Grad-CAMResNet-5014.0215.0713.4914.4614.9714.0412.5112.8112.86
Score mapResNet-5019.8719.3120.2220.3719.6520.7518.8819.1619.15
Region tokenViT-B/3221.7120.3122.6322.6120.9123.4625.5225.3825.29
CroppingResNet-5022.3620.4922.6923.9522.0323.4928.2027.6427.47
CroppingViT-B/3222.7321.1123.0824.0922.4223.9328.6927.5127.70
Global-Local CLIP (ours)ResNet-5024.5823.3824.3525.8724.6125.6130.0729.8329.45
Global-Local CLIP (ours)ViT-B/3224.8823.6124.6626.1624.9025.8331.1130.9630.69
FreeSOLO upper-bound-42.0842.5243.5242.1742.5243.8048.8148.9648.49
mIoUZero-Shot Baselines
Grad-CAMResNet-5014.2215.9313.1814.8015.8713.7812.4713.1613.30
Score mapResNet-5021.3220.9621.5721.6121.1722.3020.0720.4320.63
Region tokenViT-B/3223.4322.0724.6224.5122.6425.3727.5727.3427.69
CroppingResNet-5024.3122.3724.6626.3123.9425.6931.2730.8730.78
CroppingViT-B/3224.8322.5825.7226.3324.0626.4631.8830.9431.06
Global-Local CLIP (ours)ResNet-5026.7024.9926.4828.2226.5427.8633.0233.1232.79
Global-Local CLIP (ours)ViT-B/3226.2024.9426.5627.8025.6427.8433.5233.6733.61
FreeSOLO upper-bound-48.2546.6250.4348.2846.6250.6252.4452.9152.76
Weakly-supervised method
TSEG [48]ViT-S/16†25.95--22.62--23.41--
", + "image_path": "47368bb0bceecea861e0a5fe7b3dd85554b0d951f89010e0123ceeb39a8f0cd4.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 49, + 366, + 180, + 441 + ], + "blocks": [ + { + "bbox": [ + 49, + 366, + 180, + 441 + ], + "lines": [ + { + "bbox": [ + 49, + 366, + 180, + 441 + ], + "spans": [ + { + "bbox": [ + 49, + 366, + 180, + 441 + ], + "type": "table", + "html": "
MethodTrain datasetoIoU on PhraseCut
AllUnseen
CRISRefCOCO15.5313.75
RefCOCO+16.3014.62
RefCOCOg16.2413.88
LAVTRefCOCO16.6814.43
RefCOCO+16.6413.49
RefCOCOg16.0513.48
OursN/A23.6422.98
", + "image_path": "af79deb05cb5a8b857a14e275374b1e1eac6817510a35a9ee5f9423acbcac241.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 182, + 366, + 279, + 441 + ], + "blocks": [ + { + "bbox": [ + 182, + 366, + 279, + 441 + ], + "lines": [ + { + "bbox": [ + 182, + 366, + 279, + 441 + ], + "spans": [ + { + "bbox": [ + 182, + 366, + 279, + 441 + ], + "type": "image", + "image_path": "0ce096b4aec5fe9d6429feeca993b26f9dc691311bb2d6e48e649f2f9863ac2b.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 47, + 445, + 288, + 477 + ], + "lines": [ + { + "bbox": [ + 47, + 445, + 288, + 477 + ], + "spans": [ + { + "bbox": [ + 47, + 445, + 288, + 477 + ], + "type": "text", + "content": "Figure 4. Comparisons to supervised methods in zero-shot setting on PhraseCut (left), and in few-shot setting on RefCOCOg (right). Unseen denotes a subset with classes that are not seen in RefCOCO." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 495, + 288, + 614 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 495, + 288, + 614 + ], + "spans": [ + { + "bbox": [ + 46, + 495, + 288, + 614 + ], + "type": "text", + "content": "the ground-truth of the referring image segmentation task. RefCOCO, RefCOCO+ and RefCOCOg have 19,994, 19,992 and 26,711 images with 142,210, 141,564 and 104,560 referring expressions, respectively. RefCOCO and RefCOCO+ have shorter expressions and an average of 1.6 nouns and 3.6 words are included in one expression, while RefCOCOg expresses more complex relations with longer sentences and has an average of about 2.8 nouns and 8.4 words. The detailed statistics of those datasets are demonstrated in our supplementary materials." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 617, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 617, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 617, + 288, + 715 + ], + "type": "text", + "content": "For the evaluation metrics, we use the overall Intersection over Union (oIoU) and the mean Intersection over Union (mIoU) which are the common metrics for the referring image segmentation task. The oIoU is measured by the total area of intersection divided by the total area of union, where the total area is computed by accumulating over all examples. In our ablation study, we use oIoUs since most of supervised RIS methods [6, 23] adopt it. We also report the mIoUs as" + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 323, + 397, + 526, + 459 + ], + "blocks": [ + { + "bbox": [ + 305, + 366, + 545, + 388 + ], + "lines": [ + { + "bbox": [ + 305, + 366, + 545, + 388 + ], + "spans": [ + { + "bbox": [ + 305, + 366, + 545, + 388 + ], + "type": "text", + "content": "Table 2. oIoU results of our method and the baselines using COCO instance GT masks. We use a ViT-B/32 model for a visual encoder." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 323, + 397, + 526, + 459 + ], + "lines": [ + { + "bbox": [ + 323, + 397, + 526, + 459 + ], + "spans": [ + { + "bbox": [ + 323, + 397, + 526, + 459 + ], + "type": "table", + "html": "
MethodRefCOCORefCOCO+RefCOCOg
Grad-CAM18.3218.1421.24
Score map23.9725.5028.11
Region token35.5938.1340.19
Cropping36.3242.0747.42
Ours37.0542.5951.01
", + "image_path": "85b9e510817bf3557e89da49217ff040a4503b3c33a18c9410fd741e918655f3.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 318, + 499, + 533, + 552 + ], + "blocks": [ + { + "bbox": [ + 305, + 468, + 545, + 491 + ], + "lines": [ + { + "bbox": [ + 305, + 468, + 545, + 491 + ], + "spans": [ + { + "bbox": [ + 305, + 468, + 545, + 491 + ], + "type": "text", + "content": "Table 3. oIoU results with different context-level features on the val split of RefCOCOg. We use a ViT-B/32 model for a visual encoder." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 318, + 499, + 533, + 552 + ], + "lines": [ + { + "bbox": [ + 318, + 499, + 533, + 552 + ], + "spans": [ + { + "bbox": [ + 318, + 499, + 533, + 552 + ], + "type": "table", + "html": "
Encoder VariantsTextual features
GlobalLocalGlobal-Local
Visual \nfeaturesGlobal27.0327.3727.60
Local28.6925.2329.48
Global-Local30.1827.9431.11
", + "image_path": "876759a51b8f1a597312be4e95a7de6a849a91dae299012f9866cca5a0b7ef48.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 570, + 545, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 570, + 545, + 594 + ], + "spans": [ + { + "bbox": [ + 305, + 570, + 545, + 594 + ], + "type": "text", + "content": "in [48], which computes the average IoU across all examples while considering the object sizes." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 601, + 372, + 613 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 601, + 372, + 613 + ], + "spans": [ + { + "bbox": [ + 306, + 601, + 372, + 613 + ], + "type": "text", + "content": "5.2. Baselines" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 620, + 547, + 667 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 620, + 547, + 667 + ], + "spans": [ + { + "bbox": [ + 305, + 620, + 547, + 667 + ], + "type": "text", + "content": "We modify some baseline methods extracting dense predictions from CLIP into zero-shot RIS task to compare with our framework, and use FreeSOLO [53] as a mask generator in all baselines." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 677, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 677, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 677, + 547, + 713 + ], + "type": "text", + "content": "- Grad-CAM: The first baseline is utilizing gradient-based activation map based on Grad-CAM [46] which has been verified in the prior work [17]. After obtaining the activa" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "19461" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 67, + 79, + 128, + 140 + ], + "blocks": [ + { + "bbox": [ + 86, + 70, + 107, + 78 + ], + "lines": [ + { + "bbox": [ + 86, + 70, + 107, + 78 + ], + "spans": [ + { + "bbox": [ + 86, + 70, + 107, + 78 + ], + "type": "text", + "content": "Image" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 67, + 79, + 128, + 140 + ], + "lines": [ + { + "bbox": [ + 67, + 79, + 128, + 140 + ], + "spans": [ + { + "bbox": [ + 67, + 79, + 128, + 140 + ], + "type": "image", + "image_path": "175ec71baec5767f30f4da75d8358cc443024c51dc4780ec22d5a79bdb451c73.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 132, + 99, + 170, + 108 + ], + "lines": [ + { + "bbox": [ + 132, + 99, + 170, + 108 + ], + "spans": [ + { + "bbox": [ + 132, + 99, + 170, + 108 + ], + "type": "text", + "content": "Expression:" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 140, + 109, + 267, + 117 + ], + "lines": [ + { + "bbox": [ + 140, + 109, + 267, + 117 + ], + "spans": [ + { + "bbox": [ + 140, + 109, + 267, + 117 + ], + "type": "text", + "content": "the banana the person is holding" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 123, + 141, + 160, + 148 + ], + "lines": [ + { + "bbox": [ + 123, + 141, + 160, + 148 + ], + "spans": [ + { + "bbox": [ + 123, + 141, + 160, + 148 + ], + "type": "text", + "content": "Local visual" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 67, + 149, + 113, + 194 + ], + "blocks": [ + { + "bbox": [ + 86, + 141, + 97, + 148 + ], + "lines": [ + { + "bbox": [ + 86, + 141, + 97, + 148 + ], + "spans": [ + { + "bbox": [ + 86, + 141, + 97, + 148 + ], + "type": "text", + "content": "GT" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 67, + 149, + 113, + 194 + ], + "lines": [ + { + "bbox": [ + 67, + 149, + 113, + 194 + ], + "spans": [ + { + "bbox": [ + 67, + 149, + 113, + 194 + ], + "type": "image", + "image_path": "59bcf0e50b884b1cc9e410e95f689b8a0e0ef98b998d456350f68a6d85a6c57c.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 118, + 149, + 163, + 194 + ], + "blocks": [ + { + "bbox": [ + 118, + 149, + 163, + 194 + ], + "lines": [ + { + "bbox": [ + 118, + 149, + 163, + 194 + ], + "spans": [ + { + "bbox": [ + 118, + 149, + 163, + 194 + ], + "type": "image", + "image_path": "b79efdc9aef4bdd6cea7434cf89bcb992bfa031c993f159d593661738218379c.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 170, + 149, + 216, + 194 + ], + "blocks": [ + { + "bbox": [ + 171, + 141, + 214, + 148 + ], + "lines": [ + { + "bbox": [ + 171, + 141, + 214, + 148 + ], + "spans": [ + { + "bbox": [ + 171, + 141, + 214, + 148 + ], + "type": "text", + "content": "Global visual" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 170, + 149, + 216, + 194 + ], + "lines": [ + { + "bbox": [ + 170, + 149, + 216, + 194 + ], + "spans": [ + { + "bbox": [ + 170, + 149, + 216, + 194 + ], + "type": "image", + "image_path": "34f4f27f3a778b3fbe4a15c39d0954a877e63fb00293b0bf1be62da800064c68.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 223, + 149, + 268, + 194 + ], + "blocks": [ + { + "bbox": [ + 225, + 140, + 265, + 148 + ], + "lines": [ + { + "bbox": [ + 225, + 140, + 265, + 148 + ], + "spans": [ + { + "bbox": [ + 225, + 140, + 265, + 148 + ], + "type": "text", + "content": "Global-Local" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 223, + 149, + 268, + 194 + ], + "lines": [ + { + "bbox": [ + 223, + 149, + 268, + 194 + ], + "spans": [ + { + "bbox": [ + 223, + 149, + 268, + 194 + ], + "type": "image", + "image_path": "54814498ca74ac1fd2495ddf3c8a74115c921e8dcb790e4479c8484932d955e5.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 67, + 206, + 128, + 267 + ], + "blocks": [ + { + "bbox": [ + 86, + 198, + 107, + 205 + ], + "lines": [ + { + "bbox": [ + 86, + 198, + 107, + 205 + ], + "spans": [ + { + "bbox": [ + 86, + 198, + 107, + 205 + ], + "type": "text", + "content": "Image" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 67, + 206, + 128, + 267 + ], + "lines": [ + { + "bbox": [ + 67, + 206, + 128, + 267 + ], + "spans": [ + { + "bbox": [ + 67, + 206, + 128, + 267 + ], + "type": "image", + "image_path": "8dba6ad77e3b2a9dc43c6ec0d74e28331d5380c6cc456c8723e59e9e1c0b4759.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 132, + 222, + 170, + 230 + ], + "lines": [ + { + "bbox": [ + 132, + 222, + 170, + 230 + ], + "spans": [ + { + "bbox": [ + 132, + 222, + 170, + 230 + ], + "type": "text", + "content": "Expression:" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 140, + 231, + 262, + 247 + ], + "lines": [ + { + "bbox": [ + 140, + 231, + 262, + 247 + ], + "spans": [ + { + "bbox": [ + 140, + 231, + 262, + 247 + ], + "type": "text", + "content": "a green bicycle ridden by a man in a black windbreaker" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 86, + 269, + 97, + 275 + ], + "lines": [ + { + "bbox": [ + 86, + 269, + 97, + 275 + ], + "spans": [ + { + "bbox": [ + 86, + 269, + 97, + 275 + ], + "type": "text", + "content": "GT" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 67, + 277, + 111, + 321 + ], + "blocks": [ + { + "bbox": [ + 67, + 277, + 111, + 321 + ], + "lines": [ + { + "bbox": [ + 67, + 277, + 111, + 321 + ], + "spans": [ + { + "bbox": [ + 67, + 277, + 111, + 321 + ], + "type": "image", + "image_path": "60c7fe2b38e6fe473564eceab8aa34e9d80bdaeec8881270a1d8de660b904858.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 331, + 288, + 364 + ], + "lines": [ + { + "bbox": [ + 46, + 331, + 288, + 364 + ], + "spans": [ + { + "bbox": [ + 46, + 331, + 288, + 364 + ], + "type": "text", + "content": "Figure 5. Qualitative results with different levels of visual features. COCO instance GT masks are used as mask proposals to validate the effect of the global-local context visual features." + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 119, + 277, + 164, + 321 + ], + "blocks": [ + { + "bbox": [ + 122, + 269, + 160, + 276 + ], + "lines": [ + { + "bbox": [ + 122, + 269, + 160, + 276 + ], + "spans": [ + { + "bbox": [ + 122, + 269, + 160, + 276 + ], + "type": "text", + "content": "Local visual" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 119, + 277, + 164, + 321 + ], + "lines": [ + { + "bbox": [ + 119, + 277, + 164, + 321 + ], + "spans": [ + { + "bbox": [ + 119, + 277, + 164, + 321 + ], + "type": "image", + "image_path": "5da1f89587b0963d59efff3c90e8212d3c46fce91c0dbf41b445923fa81b8cc1.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 171, + 277, + 216, + 321 + ], + "blocks": [ + { + "bbox": [ + 172, + 269, + 214, + 276 + ], + "lines": [ + { + "bbox": [ + 172, + 269, + 214, + 276 + ], + "spans": [ + { + "bbox": [ + 172, + 269, + 214, + 276 + ], + "type": "text", + "content": "Global visual" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 171, + 277, + 216, + 321 + ], + "lines": [ + { + "bbox": [ + 171, + 277, + 216, + 321 + ], + "spans": [ + { + "bbox": [ + 171, + 277, + 216, + 321 + ], + "type": "image", + "image_path": "a3a590e289940f97c228a5acbfef851aa08b50e856c05a0e352cac07442bfa86.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 223, + 277, + 268, + 321 + ], + "blocks": [ + { + "bbox": [ + 225, + 268, + 266, + 276 + ], + "lines": [ + { + "bbox": [ + 225, + 268, + 266, + 276 + ], + "spans": [ + { + "bbox": [ + 225, + 268, + 266, + 276 + ], + "type": "text", + "content": "Global-Local" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 223, + 277, + 268, + 321 + ], + "lines": [ + { + "bbox": [ + 223, + 277, + 268, + 321 + ], + "spans": [ + { + "bbox": [ + 223, + 277, + 268, + 321 + ], + "type": "image", + "image_path": "07e6c57a984bdebd860e3e01c1404e23b6827ecdcfbae57a653560c07aa2766f.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + } + ], + "index": 23 + }, + { + "bbox": [ + 55, + 386, + 288, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 386, + 288, + 422 + ], + "spans": [ + { + "bbox": [ + 55, + 386, + 288, + 422 + ], + "type": "text", + "content": "tion maps using the similarity score of image-text pairs, we mask the maps and aggregate scores for all mask proposals, and select the mask with the highest score." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 47, + 431, + 289, + 714 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 47, + 431, + 289, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 431, + 289, + 574 + ], + "spans": [ + { + "bbox": [ + 47, + 431, + 289, + 574 + ], + "type": "text", + "content": "- **Score Map:** The second baseline is the method extracting a dense score map as in MaskCLIP [63]. As in MaskCLIP, to obtain dense score maps without pooling, a value linear layer and the last layer in the attention pooling are transformed into two consecutive " + }, + { + "bbox": [ + 47, + 431, + 289, + 574 + ], + "type": "inline_equation", + "content": "1 \\times 1" + }, + { + "bbox": [ + 47, + 431, + 289, + 574 + ], + "type": "text", + "content": " convolution layers. The feature map extracted from ResNet is forwarded to those two layers to get language-compatible dense image feature map, and then compute a cosine similarity with CLIP's textual feature. After obtaining a score map, we project mask proposals to a score map. The scores in the mask area are averaged and then we select the mask with the maximum score." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 47, + 584, + 288, + 668 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 584, + 288, + 668 + ], + "spans": [ + { + "bbox": [ + 47, + 584, + 288, + 668 + ], + "type": "text", + "content": "- Region Token in ViT: The third baseline is a method used in Adapting CLIP [25]. Similar to Adapting CLIP, we use region tokens for each mask proposal for all Transformer layers in CLIP's visual encoder instead of using superpixels. We finally compute the cosine similarity between each class token of a mask proposal and CLIP's textual feature, and then choose the mask with the highest score." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 47, + 677, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 677, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 47, + 677, + 288, + 714 + ], + "type": "text", + "content": "- Cropping: The last baseline is our local-context visual features described in Section 3.2. Cropping and masking is a commonly used approach utilizing CLIP for extracting" + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "text" + }, + { + "type": "image", + "bbox": [ + 308, + 83, + 354, + 127 + ], + "blocks": [ + { + "bbox": [ + 322, + 73, + 344, + 80 + ], + "lines": [ + { + "bbox": [ + 322, + 73, + 344, + 80 + ], + "spans": [ + { + "bbox": [ + 322, + 73, + 344, + 80 + ], + "type": "text", + "content": "Image" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 308, + 83, + 354, + 127 + ], + "lines": [ + { + "bbox": [ + 308, + 83, + 354, + 127 + ], + "spans": [ + { + "bbox": [ + 308, + 83, + 354, + 127 + ], + "type": "image", + "image_path": "d089c8e983c72f3e3a84cfdc2f7e0e6570f7c5077619bd9faaae69c9bfe4f760.jpg" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 309, + 129, + 348, + 137 + ], + "lines": [ + { + "bbox": [ + 309, + 129, + 348, + 137 + ], + "spans": [ + { + "bbox": [ + 309, + 129, + 348, + 137 + ], + "type": "text", + "content": "Expression:" + } + ] + } + ], + "index": 40, + "angle": 0, + "type": "image_caption" + } + ], + "index": 31 + }, + { + "type": "image", + "bbox": [ + 356, + 82, + 403, + 127 + ], + "blocks": [ + { + "bbox": [ + 374, + 72, + 386, + 80 + ], + "lines": [ + { + "bbox": [ + 374, + 72, + 386, + 80 + ], + "spans": [ + { + "bbox": [ + 374, + 72, + 386, + 80 + ], + "type": "text", + "content": "GT" + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 356, + 82, + 403, + 127 + ], + "lines": [ + { + "bbox": [ + 356, + 82, + 403, + 127 + ], + "spans": [ + { + "bbox": [ + 356, + 82, + 403, + 127 + ], + "type": "image", + "image_path": "c826d8a656f514f40ec0182748be048e09bceaad87e61a349f05ebf5274e128d.jpg" + } + ] + } + ], + "index": 33, + "angle": 0, + "type": "image_body" + } + ], + "index": 33 + }, + { + "type": "image", + "bbox": [ + 403, + 82, + 450, + 127 + ], + "blocks": [ + { + "bbox": [ + 408, + 72, + 443, + 80 + ], + "lines": [ + { + "bbox": [ + 408, + 72, + 443, + 80 + ], + "spans": [ + { + "bbox": [ + 408, + 72, + 443, + 80 + ], + "type": "text", + "content": "Local text" + } + ] + } + ], + "index": 34, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 403, + 82, + 450, + 127 + ], + "lines": [ + { + "bbox": [ + 403, + 82, + 450, + 127 + ], + "spans": [ + { + "bbox": [ + 403, + 82, + 450, + 127 + ], + "type": "image", + "image_path": "693e8c61517c4226c11239327aa7bb5a086b8732a1c16208cafb2c46de142f76.jpg" + } + ] + } + ], + "index": 35, + "angle": 0, + "type": "image_body" + } + ], + "index": 35 + }, + { + "type": "image", + "bbox": [ + 451, + 83, + 498, + 127 + ], + "blocks": [ + { + "bbox": [ + 455, + 72, + 492, + 80 + ], + "lines": [ + { + "bbox": [ + 455, + 72, + 492, + 80 + ], + "spans": [ + { + "bbox": [ + 455, + 72, + 492, + 80 + ], + "type": "text", + "content": "Global text" + } + ] + } + ], + "index": 36, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 451, + 83, + 498, + 127 + ], + "lines": [ + { + "bbox": [ + 451, + 83, + 498, + 127 + ], + "spans": [ + { + "bbox": [ + 451, + 83, + 498, + 127 + ], + "type": "image", + "image_path": "24d4d4286cc6ab8a9f49e5603053500f9ef0d3b1e82e7d36e267bf2f0f723f68.jpg" + } + ] + } + ], + "index": 37, + "angle": 0, + "type": "image_body" + } + ], + "index": 37 + }, + { + "type": "image", + "bbox": [ + 500, + 83, + 545, + 127 + ], + "blocks": [ + { + "bbox": [ + 500, + 72, + 542, + 80 + ], + "lines": [ + { + "bbox": [ + 500, + 72, + 542, + 80 + ], + "spans": [ + { + "bbox": [ + 500, + 72, + 542, + 80 + ], + "type": "text", + "content": "Global-Local" + } + ] + } + ], + "index": 38, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 500, + 83, + 545, + 127 + ], + "lines": [ + { + "bbox": [ + 500, + 83, + 545, + 127 + ], + "spans": [ + { + "bbox": [ + 500, + 83, + 545, + 127 + ], + "type": "image", + "image_path": "49f44335a1d20a198480e16c451552af1187a0ee38d981a4fdbb05a3a5908a28.jpg" + } + ] + } + ], + "index": 39, + "angle": 0, + "type": "image_body" + } + ], + "index": 39 + }, + { + "type": "image", + "bbox": [ + 350, + 129, + 358, + 137 + ], + "blocks": [ + { + "bbox": [ + 350, + 129, + 358, + 137 + ], + "lines": [ + { + "bbox": [ + 350, + 129, + 358, + 137 + ], + "spans": [ + { + "bbox": [ + 350, + 129, + 358, + 137 + ], + "type": "image", + "image_path": "a14079ffe6982d3c747e9bea2fbf2aec856b788958d3798b3ca83079635f0001.jpg" + } + ] + } + ], + "index": 41, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 359, + 129, + 432, + 137 + ], + "lines": [ + { + "bbox": [ + 359, + 129, + 432, + 137 + ], + "spans": [ + { + "bbox": [ + 359, + 129, + 432, + 137 + ], + "type": "text", + "content": "guy in wheelchair" + } + ] + } + ], + "index": 42, + "angle": 0, + "type": "image_caption" + } + ], + "index": 41 + }, + { + "type": "image", + "bbox": [ + 308, + 140, + 354, + 186 + ], + "blocks": [ + { + "bbox": [ + 308, + 140, + 354, + 186 + ], + "lines": [ + { + "bbox": [ + 308, + 140, + 354, + 186 + ], + "spans": [ + { + "bbox": [ + 308, + 140, + 354, + 186 + ], + "type": "image", + "image_path": "621bc8c4868298223080137b6ceabe78f8cab2af98288a579c6015cb4d613000.jpg" + } + ] + } + ], + "index": 43, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 309, + 188, + 348, + 195 + ], + "lines": [ + { + "bbox": [ + 309, + 188, + 348, + 195 + ], + "spans": [ + { + "bbox": [ + 309, + 188, + 348, + 195 + ], + "type": "text", + "content": "Expression:" + } + ] + } + ], + "index": 48, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 350, + 188, + 381, + 196 + ], + "lines": [ + { + "bbox": [ + 350, + 188, + 381, + 196 + ], + "spans": [ + { + "bbox": [ + 350, + 188, + 381, + 196 + ], + "type": "text", + "content": "a woman" + } + ] + } + ], + "index": 49, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 352, + 197, + 386, + 204 + ], + "lines": [ + { + "bbox": [ + 352, + 197, + 386, + 204 + ], + "spans": [ + { + "bbox": [ + 352, + 197, + 386, + 204 + ], + "type": "text", + "content": "umbrella" + } + ] + } + ], + "index": 50, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 306, + 213, + 545, + 236 + ], + "lines": [ + { + "bbox": [ + 306, + 213, + 545, + 236 + ], + "spans": [ + { + "bbox": [ + 306, + 213, + 545, + 236 + ], + "type": "text", + "content": "Figure 6. Qualitative results with different levels of textual features using COCO Instance GT mask proposals." + } + ] + } + ], + "index": 51, + "angle": 0, + "type": "image_caption" + } + ], + "index": 43 + }, + { + "type": "image", + "bbox": [ + 356, + 140, + 402, + 186 + ], + "blocks": [ + { + "bbox": [ + 356, + 140, + 402, + 186 + ], + "lines": [ + { + "bbox": [ + 356, + 140, + 402, + 186 + ], + "spans": [ + { + "bbox": [ + 356, + 140, + 402, + 186 + ], + "type": "image", + "image_path": "16f9ecf60c777341519c67a95f51f5bf77fa161a4bc7aec0ec76dd83dfc10b5f.jpg" + } + ] + } + ], + "index": 44, + "angle": 0, + "type": "image_body" + } + ], + "index": 44 + }, + { + "type": "image", + "bbox": [ + 403, + 140, + 449, + 186 + ], + "blocks": [ + { + "bbox": [ + 403, + 140, + 449, + 186 + ], + "lines": [ + { + "bbox": [ + 403, + 140, + 449, + 186 + ], + "spans": [ + { + "bbox": [ + 403, + 140, + 449, + 186 + ], + "type": "image", + "image_path": "e9f6d407e67b0869c4afe74bee37216f3551102fb5d448873ec78b0396564757.jpg" + } + ] + } + ], + "index": 45, + "angle": 0, + "type": "image_body" + } + ], + "index": 45 + }, + { + "type": "image", + "bbox": [ + 451, + 140, + 498, + 186 + ], + "blocks": [ + { + "bbox": [ + 451, + 140, + 498, + 186 + ], + "lines": [ + { + "bbox": [ + 451, + 140, + 498, + 186 + ], + "spans": [ + { + "bbox": [ + 451, + 140, + 498, + 186 + ], + "type": "image", + "image_path": "286426ff765b1c17f783baa567b5fc5c4f9d116f6f071d4e6afdcfb261e43c91.jpg" + } + ] + } + ], + "index": 46, + "angle": 0, + "type": "image_body" + } + ], + "index": 46 + }, + { + "type": "image", + "bbox": [ + 499, + 140, + 545, + 186 + ], + "blocks": [ + { + "bbox": [ + 499, + 140, + 545, + 186 + ], + "lines": [ + { + "bbox": [ + 499, + 140, + 545, + 186 + ], + "spans": [ + { + "bbox": [ + 499, + 140, + 545, + 186 + ], + "type": "image", + "image_path": "a4f41c652470c5b8489327c2338666d59e5502f0b47c446bf3de4a255db73e34.jpg" + } + ] + } + ], + "index": 47, + "angle": 0, + "type": "image_body" + } + ], + "index": 47 + }, + { + "bbox": [ + 313, + 256, + 545, + 292 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 256, + 545, + 292 + ], + "spans": [ + { + "bbox": [ + 313, + 256, + 545, + 292 + ], + "type": "text", + "content": "mask or box region feature in a range of zero-shot dense prediction tasks [7, 9, 13, 49, 59]. Therefore, we consider cropping as one of the zero-shot RIS baselines." + } + ] + } + ], + "index": 52 + }, + { + "bbox": [ + 306, + 307, + 362, + 319 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 307, + 362, + 319 + ], + "spans": [ + { + "bbox": [ + 306, + 307, + 362, + 319 + ], + "type": "text", + "content": "5.3. Results" + } + ] + } + ], + "index": 53 + }, + { + "bbox": [ + 304, + 325, + 547, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 325, + 547, + 517 + ], + "spans": [ + { + "bbox": [ + 304, + 325, + 547, + 517 + ], + "type": "text", + "content": "Main Results. We report referring image segmentation performances of our global-local CLIP and other baselines on RefCOCO, RefCOCO+ and RefCOCOg in terms of IoU and mIoU metrics in Table 1. For a fair comparison, all methods including baselines use FreeSOLO [53] mask proposals to produce the final output mask. The experimental results show that our method outperforms other baseline methods with substantial margins. Our method also surpasses the weakly supervised referring image segmentation method (TSEG) [48] in terms of mIoU1. We also show upper-bound performances of using FreeSOLO, where the scores are computed by the IoU between ground-truth masks and its max-overlapped mask proposal. Although there is still a gap compared to the fully-supervised referring image segmentation methods, our method improves performance significantly compared to the baselines with the same upper-bound." + } + ] + } + ], + "index": 54 + }, + { + "bbox": [ + 304, + 531, + 547, + 686 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 531, + 547, + 686 + ], + "spans": [ + { + "bbox": [ + 304, + 531, + 547, + 686 + ], + "type": "text", + "content": "Zero-shot Evaluation on Unseen Domain. To verify the effectiveness of our method in a more practical setting, we report the zero-shot evaluation results with SoTA supervised methods [54, 60] on the test split of PhraseCut [56] in Figure 4 (left). Note that, RefCOCO contains expressions for only 80 salient object classes, whereas PhraseCut covers a variety of additional visual concepts i.e. 1272 categories in the test set. Our method outperforms both supervised methods, even though our models were never trained under RIS supervision. When evaluated on a subset of classes that are not seen in the RefCOCO datasets (Unseen column), the supervised methods show significant performance degradation, whereas our method works robustly on this subset." + } + ] + } + ], + "index": 55 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 306, + 693, + 545, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 693, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 693, + 545, + 713 + ], + "type": "text", + "content": "1We only compare mIoU scores with TSEG since it reports only mIoU scores in the paper." + } + ] + } + ], + "index": 56 + }, + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "19462" + } + ] + } + ], + "index": 57 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 61, + 70, + 534, + 220 + ], + "blocks": [ + { + "bbox": [ + 61, + 70, + 534, + 220 + ], + "lines": [ + { + "bbox": [ + 61, + 70, + 534, + 220 + ], + "spans": [ + { + "bbox": [ + 61, + 70, + 534, + 220 + ], + "type": "image", + "image_path": "b8fb3126971edeccdc43fd85ba0718c00bb0a1c780e4a477b816262e833f3ebc.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 48, + 228, + 544, + 239 + ], + "lines": [ + { + "bbox": [ + 48, + 228, + 544, + 239 + ], + "spans": [ + { + "bbox": [ + 48, + 228, + 544, + 239 + ], + "type": "text", + "content": "Figure 7. Qualitative results of our method with the several baselines. Note that all methods use mask proposals generated by FreeSOLO." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 260, + 289, + 438 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 260, + 289, + 438 + ], + "spans": [ + { + "bbox": [ + 47, + 260, + 289, + 438 + ], + "type": "text", + "content": "Comparison to supervised methods in few-shot Setting. We also compare our model to two supervised RIS methods [54, 60] in a few-shot learning setting, where the training set includes " + }, + { + "bbox": [ + 47, + 260, + 289, + 438 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 47, + 260, + 289, + 438 + ], + "type": "text", + "content": " instances for each of 80 classes in RefCOCO" + }, + { + "bbox": [ + 47, + 260, + 289, + 438 + ], + "type": "inline_equation", + "content": "^2" + }, + { + "bbox": [ + 47, + 260, + 289, + 438 + ], + "type": "text", + "content": ". Note that the supervised methods use additional forms of supervision in training, whereas our method does not require any form of training or additional supervision; thus this setting is even disadvantageous to our method. Figure 4 (right) shows oIoU while varying " + }, + { + "bbox": [ + 47, + 260, + 289, + 438 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 47, + 260, + 289, + 438 + ], + "type": "text", + "content": " on RefCOCOg. The results clearly show that our method outperforms both supervised methods with large margins when " + }, + { + "bbox": [ + 47, + 260, + 289, + 438 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 47, + 260, + 289, + 438 + ], + "type": "text", + "content": " is small, and the gaps narrow as " + }, + { + "bbox": [ + 47, + 260, + 289, + 438 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 47, + 260, + 289, + 438 + ], + "type": "text", + "content": " gets larger (64 and 256 for LAVT [60] and CRIS [54], respectively). Note that it covers about " + }, + { + "bbox": [ + 47, + 260, + 289, + 438 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 47, + 260, + 289, + 438 + ], + "type": "text", + "content": " of the training set when " + }, + { + "bbox": [ + 47, + 260, + 289, + 438 + ], + "type": "inline_equation", + "content": "k = 64" + }, + { + "bbox": [ + 47, + 260, + 289, + 438 + ], + "type": "text", + "content": " and the same trends hold for both RefCOCO and RefCOCO+." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 449, + 141, + 462 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 449, + 141, + 462 + ], + "spans": [ + { + "bbox": [ + 47, + 449, + 141, + 462 + ], + "type": "text", + "content": "5.4. Ablation Study" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 469, + 288, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 469, + 288, + 601 + ], + "spans": [ + { + "bbox": [ + 46, + 469, + 288, + 601 + ], + "type": "text", + "content": "Effects of Mask Quality. To show the impact of the proposed method without considering the mask quality of the mask generators, we evaluate the performance of our method and the baselines with COCO instance GT masks in Table 2. Our approach has demonstrated superior performance compared to all baselines and has shown a performance improvement of over " + }, + { + "bbox": [ + 46, + 469, + 288, + 601 + ], + "type": "inline_equation", + "content": "3.5\\%" + }, + { + "bbox": [ + 46, + 469, + 288, + 601 + ], + "type": "text", + "content": ", particularly on RefCOCOg which includes longer expressions. We believe that our method performs well on challenging examples that involve complex expressions, such as those with multiple clauses, which require an understanding of both the language and the scene." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 621, + 287, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 621, + 287, + 682 + ], + "spans": [ + { + "bbox": [ + 47, + 621, + 287, + 682 + ], + "type": "text", + "content": "Effects of Global-Local Context Features. We also study the effects of global-local context features in both visual and textual modalities and show the results in Table 3. For this analysis, we use RefCOCOg as it contains more complex expressions with multiple clauses. Among all combinations" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 306, + 260, + 547, + 283 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 260, + 547, + 283 + ], + "spans": [ + { + "bbox": [ + 306, + 260, + 547, + 283 + ], + "type": "text", + "content": "of two modalities, using both global-local context features in the visual and textual domains leads to the best performance." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 305, + 301, + 547, + 505 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 301, + 547, + 505 + ], + "spans": [ + { + "bbox": [ + 305, + 301, + 547, + 505 + ], + "type": "text", + "content": "Qualitative Analysis. We demonstrate several results that support the effectiveness of our global-local context visual features in Figure 5. To show this effect more clearly, we use COCO instance GT masks as mask proposals. When using only local-context visual features, the predicted mask tends to focus on the instance that shares the same class as the target object. However, when using only global-context visual features, the predicted mask tends to capture the context of the expression but may focus on a different object class. By combining global and local context, our method successfully finds the target mask. We also demonstrate the effectiveness of our global-local context textual features in Figure 6. Furthermore, we compare the qualitative results of our method with baseline methods in Figure 7. Our proposed global-local CLIP outperforms the baseline methods in identifying the target object by taking into account the global context of the image and expression." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 517, + 379, + 530 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 517, + 379, + 530 + ], + "spans": [ + { + "bbox": [ + 306, + 517, + 379, + 530 + ], + "type": "text", + "content": "6. Conclusion" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 305, + 537, + 547, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 537, + 547, + 647 + ], + "spans": [ + { + "bbox": [ + 305, + 537, + 547, + 647 + ], + "type": "text", + "content": "In this paper, we propose a simple yet effective zero-shot referring image segmentation framework focusing on transferring knowledges from image-text cross-modal representations of CLIP. To tackle the difficulty of the referring image segmentation task, we propose global-local context encodings to compute similarities between images and expressions, where both target object semantics and relations between the objects are dealt in a unified framework. The proposed method significantly outperforms all baseline methods and weakly supervised method as well." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 658, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 658, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 658, + 547, + 713 + ], + "type": "text", + "content": "Acknowledgement. This work was supported by the IITP grants (No.2019-0-01842, No.2021-0-02068, No.2022-0-00926) funded by MSIT, the ISTD program (No.20018334) funded by MOTIE, and the GIST-MIT Research Collaboration grant funded by GIST, Korea." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 693, + 287, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 693, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 693, + 287, + 713 + ], + "type": "text", + "content": "2we use object classes in RefCOCO GT annotation. This is to cover all salient objects in the dataset during the few-shot training." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "19463" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 712 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 124 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 124 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 124 + ], + "type": "text", + "content": "[1] Alberto Baldrati, Marco Bertini, Tiberio Uricchio, and Alberto Del Bimbo. Effective conditioned and composed image retrieval combining clip-based features. In CVPR, 2022. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 125, + 287, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 125, + 287, + 158 + ], + "spans": [ + { + "bbox": [ + 53, + 125, + 287, + 158 + ], + "type": "text", + "content": "[2] Bo Chen, Zhiwei Hu, Zhilong Ji, Jinfeng Bai, and Wangmeng Zuo. Position-aware contrastive alignment for referring image segmentation. arXiv preprint arXiv:2212.13419, 2022. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 54, + 159, + 288, + 202 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 159, + 288, + 202 + ], + "spans": [ + { + "bbox": [ + 54, + 159, + 288, + 202 + ], + "type": "text", + "content": "[3] Shiming Chen, Ziming Hong, Yang Liu, Guo-Sen Xie, Baigui Sun, Hao Li, Qinmu Peng, Ke Lu, and Xinge You. Transzero: Attribute-guided transformer for zero-shot learning. In AAAI, 2022. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 205, + 288, + 247 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 205, + 288, + 247 + ], + "spans": [ + { + "bbox": [ + 53, + 205, + 288, + 247 + ], + "type": "text", + "content": "[4] Yen-Chun Chen, Linjie Li, Licheng Yu, Ahmed El Kholy, Faisal Ahmed, Zhe Gan, Yu Cheng, and Jingjing Liu. Uniter: Universal image-text representation learning. In ECCV, 2020. 1" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 250, + 288, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 250, + 288, + 282 + ], + "spans": [ + { + "bbox": [ + 53, + 250, + 288, + 282 + ], + "type": "text", + "content": "[5] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. In ACL, 2019. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 285, + 288, + 316 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 285, + 288, + 316 + ], + "spans": [ + { + "bbox": [ + 53, + 285, + 288, + 316 + ], + "type": "text", + "content": "[6] Henghui Ding, Chang Liu, Suchen Wang, and Xudong Jiang. Vision-language transformer and query generation for referring segmentation. In ICCV, 2021. 3, 6" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 318, + 288, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 318, + 288, + 350 + ], + "spans": [ + { + "bbox": [ + 53, + 318, + 288, + 350 + ], + "type": "text", + "content": "[7] Jian Ding, Nan Xue, Gui-Song Xia, and Dengxin Dai. Decoupling zero-shot semantic segmentation. In CVPR, 2022. 1, 2, 5, 7" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 353, + 288, + 417 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 353, + 288, + 417 + ], + "spans": [ + { + "bbox": [ + 53, + 353, + 288, + 417 + ], + "type": "text", + "content": "[8] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. In ICLR, 2020. 4, 5" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 419, + 288, + 462 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 419, + 288, + 462 + ], + "spans": [ + { + "bbox": [ + 53, + 419, + 288, + 462 + ], + "type": "text", + "content": "[9] Yu Du, Fangyun Wei, Zihe Zhang, Miaojing Shi, Yue Gao, and Guoqi Li. Learning to prompt for open-vocabulary object detection with vision-language model. In CVPR, 2022. 1, 2, 7" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 464, + 288, + 507 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 464, + 288, + 507 + ], + "spans": [ + { + "bbox": [ + 48, + 464, + 288, + 507 + ], + "type": "text", + "content": "[10] Chengjian Feng, Yujie Zhong, Zequn Jie, Xiangxiang Chu, Haibing Ren, Xiaolin Wei, Weidi Xie, and Lin Ma. Promptdet: Expand your detector vocabulary with uncurated images. In ECCV, 2022. 1, 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 510, + 288, + 542 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 510, + 288, + 542 + ], + "spans": [ + { + "bbox": [ + 48, + 510, + 288, + 542 + ], + "type": "text", + "content": "[11] Guang Feng, Zhiwei Hu, Lihe Zhang, and Huchuan Lu. Encoder fusion network with co-attention embedding for referring image segmentation. In CVPR, 2021. 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 544, + 288, + 576 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 544, + 288, + 576 + ], + "spans": [ + { + "bbox": [ + 48, + 544, + 288, + 576 + ], + "type": "text", + "content": "[12] Golnaz Ghiasi, Xiuye Gu, Yin Cui, and Tsung-Yi Lin. Scaling open-vocabulary image segmentation with image-level labels. In ECCV, 2022. 1, 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 578, + 288, + 610 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 578, + 288, + 610 + ], + "spans": [ + { + "bbox": [ + 48, + 578, + 288, + 610 + ], + "type": "text", + "content": "[13] Xiuye Gu, Tsung-Yi Lin, Weicheng Kuo, and Yin Cui. Open-vocabulary object detection via vision and language knowledge distillation. In ICML, 2022. 1, 2, 7" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 613, + 288, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 613, + 288, + 643 + ], + "spans": [ + { + "bbox": [ + 48, + 613, + 288, + 643 + ], + "type": "text", + "content": "[14] Zongyan Han, Zhenyong Fu, Shuo Chen, and Jian Yang. Contrastive embedding for generalized zero-shot learning. In CVPR, 2021. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 48, + 646, + 288, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 646, + 288, + 677 + ], + "spans": [ + { + "bbox": [ + 48, + 646, + 288, + 677 + ], + "type": "text", + "content": "[15] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In CVPR, 2016. 4, 5" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 48, + 680, + 288, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 680, + 288, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 680, + 288, + 712 + ], + "type": "text", + "content": "[16] Matthew Honnibal and Mark Johnson. An improved non-monotonic transition system for dependency parsing. In EMNLP, 2015. 3, 5" + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 713 + ], + "type": "list", + "angle": 0, + "index": 34, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "text", + "content": "[17] Hsuan-An Hsia, Che-Hsien Lin, Bo-Han Kung, Jhao-Ting Chen, Daniel Stanley Tan, Jun-Cheng Chen, and Kai-Lung Hua. Clipcam: A simple baseline for zero-shot text-guided object and action localization. In ICASSP, 2022. 2, 6" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 118, + 547, + 148 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 118, + 547, + 148 + ], + "spans": [ + { + "bbox": [ + 308, + 118, + 547, + 148 + ], + "type": "text", + "content": "[18] Ronghang Hu, Marcus Rohrbach, and Trevor Darrell. Segmentation from natural language expressions. In ECCV, 2016. 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 150, + 545, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 150, + 545, + 194 + ], + "spans": [ + { + "bbox": [ + 308, + 150, + 545, + 194 + ], + "type": "text", + "content": "[19] Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. In ICML, 2021. 1, 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 195, + 545, + 227 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 195, + 545, + 227 + ], + "spans": [ + { + "bbox": [ + 308, + 195, + 545, + 227 + ], + "type": "text", + "content": "[20] Ya Jing, Tao Kong, Wei Wang, Liang Wang, Lei Li, and Tieniu Tan. Locate then segment: A strong pipeline for referring image segmentation. In CVPR, 2021. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 228, + 545, + 259 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 228, + 545, + 259 + ], + "spans": [ + { + "bbox": [ + 308, + 228, + 545, + 259 + ], + "type": "text", + "content": "[21] Sahar Kazemzadeh, Vicente Ordonez, Mark Matten, and Tamara Berg. Referitagame: Referring to objects in photographs of natural scenes. In EMNLP, 2014. 5" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 261, + 545, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 261, + 545, + 293 + ], + "spans": [ + { + "bbox": [ + 308, + 261, + 545, + 293 + ], + "type": "text", + "content": "[22] Kwanyoung Kim, Yujin Oh, and Jong Chul Ye. Zegot: Zero-shot segmentation through optimal transport of text prompts. arXiv preprint arXiv:2301.12171, 2023. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 294, + 545, + 327 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 294, + 545, + 327 + ], + "spans": [ + { + "bbox": [ + 308, + 294, + 545, + 327 + ], + "type": "text", + "content": "[23] Namyup Kim, Dongwon Kim, Cuiling Lan, Wenjun Zeng, and Suha Kwak. Restr: Convolution-free referring image segmentation using transformers. In CVPR, 2022. 3, 6" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 328, + 545, + 370 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 328, + 545, + 370 + ], + "spans": [ + { + "bbox": [ + 308, + 328, + 545, + 370 + ], + "type": "text", + "content": "[24] Weicheng Kuo, Yin Cui, Xiuye Gu, AJ Piergiovanni, and Anelia Angelova. F-vlm: Open-vocabulary object detection upon frozen vision and language models. arXiv preprint arXiv:2209.15639, 2022. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 372, + 545, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 372, + 545, + 403 + ], + "spans": [ + { + "bbox": [ + 308, + 372, + 545, + 403 + ], + "type": "text", + "content": "[25] Jiahao Li, Greg Shakhnarovich, and Raymond A Yeh. Adapting clip for phrase localization without further training. arXiv preprint arXiv:2204.03647, 2022. 2, 7" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 405, + 545, + 446 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 405, + 545, + 446 + ], + "spans": [ + { + "bbox": [ + 308, + 405, + 545, + 446 + ], + "type": "text", + "content": "[26] Ruiyu Li, Kaican Li, Yi-Chun Kuo, Michelle Shu, Xiaojuan Qi, Xiaoyong Shen, and Jiaya Jia. Referring image segmentation via recurrent refinement networks. In CVPR, 2018. 3" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 308, + 449, + 545, + 492 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 449, + 545, + 492 + ], + "spans": [ + { + "bbox": [ + 308, + 449, + 545, + 492 + ], + "type": "text", + "content": "[27] Wei Li, Can Gao, Guocheng Niu, Xinyan Xiao, Hao Liu, Jiachen Liu, Hua Wu, and Haifeng Wang. Unimo: Towards unified-modal understanding and generation via cross-modal contrastive learning. In ACL, 2021. 1" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 308, + 494, + 545, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 494, + 545, + 536 + ], + "spans": [ + { + "bbox": [ + 308, + 494, + 545, + 536 + ], + "type": "text", + "content": "[28] Xiujun Li, Xi Yin, Chunyuan Li, Pengchuan Zhang, Xiaowei Hu, Lei Zhang, Lijuan Wang, Houdong Hu, Li Dong, Furu Wei, et al. Oscar: Object-semantics aligned pre-training for vision-language tasks. In ECCV, 2020. 1" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 308, + 537, + 545, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 537, + 545, + 590 + ], + "spans": [ + { + "bbox": [ + 308, + 537, + 545, + 590 + ], + "type": "text", + "content": "[29] Feng Liang, Bichen Wu, Xiaoliang Dai, Kunpeng Li, Yinan Zhao, Hang Zhang, Peizhao Zhang, Peter Vajda, and Diana Marculescu. Open-vocabulary semantic segmentation with mask-adapted clip. arXiv preprint arXiv:2210.04150, 2022. 2" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 308, + 592, + 545, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 592, + 545, + 635 + ], + "spans": [ + { + "bbox": [ + 308, + 592, + 545, + 635 + ], + "type": "text", + "content": "[30] Chuang Lin, Peize Sun, Yi Jiang, Ping Luo, Lizhen Qu, Gholamreza Haffari, Zehuan Yuan, and Jianfei Cai. Learning object-language alignments for open-vocabulary object detection. arXiv preprint arXiv:2211.14843, 2022. 2" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 308, + 637, + 545, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 637, + 545, + 679 + ], + "spans": [ + { + "bbox": [ + 308, + 637, + 545, + 679 + ], + "type": "text", + "content": "[31] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dálár, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In ECCV, 2014. 5" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 308, + 681, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 681, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 308, + 681, + 545, + 713 + ], + "type": "text", + "content": "[32] Chenxi Liu, Zhe Lin, Xiaohui Shen, Jimei Yang, Xin Lu, and Alan Yuille. Recurrent multimodal interaction for referring image segmentation. In ICCV, 2017. 3" + } + ] + } + ], + "index": 33 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "19464" + } + ] + } + ], + "index": 35 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 712 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 288, + 116 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 288, + 116 + ], + "type": "text", + "content": "[33] Jiang Liu, Hui Ding, Zhaowei Cai, Yuting Zhang, Ravi Kumar Satzoda, Vijay Mahadevan, and R Manmatha. Polyformer: Referring image segmentation as sequential polygon generation. arXiv preprint arXiv:2302.07387, 2023. 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 49, + 119, + 287, + 152 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 119, + 287, + 152 + ], + "spans": [ + { + "bbox": [ + 49, + 119, + 287, + 152 + ], + "type": "text", + "content": "[34] Lu Liu, Tianyi Zhou, Guodong Long, Jing Jiang, Xuanyi Dong, and Chengqi Zhang. Isometric propagation network for generalized zero-shot learning. In ICLR, 2020. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 153, + 288, + 185 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 153, + 288, + 185 + ], + "spans": [ + { + "bbox": [ + 48, + 153, + 288, + 185 + ], + "type": "text", + "content": "[35] Jonathan Long, Evan Shelhamer, and Trevor Darrell. Fully convolutional networks for semantic segmentation. In CVPR, 2015. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 188, + 288, + 220 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 188, + 288, + 220 + ], + "spans": [ + { + "bbox": [ + 48, + 188, + 288, + 220 + ], + "type": "text", + "content": "[36] Jiasen Lu, Dhruv Batra, Devi Parikh, and Stefan Lee. Vilbert: Pretraining task-agnostic visiolinguistic representations for vision-and-language tasks. In NeurlPS, 2019. 1" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 223, + 287, + 266 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 223, + 287, + 266 + ], + "spans": [ + { + "bbox": [ + 48, + 223, + 287, + 266 + ], + "type": "text", + "content": "[37] Huaishao Luo, Junwei Bao, Youzheng Wu, Xiaodong He, and Tianrui Li. Segclip: Patch aggregation with learnable centers for open-vocabulary semantic segmentation. arXiv preprint arXiv:2211.14813, 2022. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 269, + 288, + 312 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 269, + 288, + 312 + ], + "spans": [ + { + "bbox": [ + 48, + 269, + 288, + 312 + ], + "type": "text", + "content": "[38] Junhua Mao, Jonathan Huang, Alexander Toshev, Oana Camburu, Alan L Yuille, and Kevin Murphy. Generation and comprehension of unambiguous object descriptions. In CVPR, 2016. 5" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 315, + 288, + 347 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 315, + 288, + 347 + ], + "spans": [ + { + "bbox": [ + 48, + 315, + 288, + 347 + ], + "type": "text", + "content": "[39] Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg S Corrado, and Jeff Dean. Distributed representations of words and phrases and their compositionality. In NeurIPS, 2013. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 350, + 288, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 350, + 288, + 381 + ], + "spans": [ + { + "bbox": [ + 48, + 350, + 288, + 381 + ], + "type": "text", + "content": "[40] Ron Mokady, Amir Hertz, and Amit H Bermano. Clipcap: Clip prefix for image captioning. arXiv preprint arXiv:2111.09734, 2021. 1, 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 384, + 288, + 416 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 384, + 288, + 416 + ], + "spans": [ + { + "bbox": [ + 48, + 384, + 288, + 416 + ], + "type": "text", + "content": "[41] Varun K Nagaraja, Vlad I Morariu, and Larry S Davis. Modeling context between objects for referring expression understanding. In ECCV, 2016. 5" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 418, + 288, + 461 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 418, + 288, + 461 + ], + "spans": [ + { + "bbox": [ + 48, + 418, + 288, + 461 + ], + "type": "text", + "content": "[42] Prashant Pandey, Mustafa Chasmai, Monish Natarajan, and Brejesh Lall. A language-guided benchmark for weakly supervised open vocabulary semantic segmentation. arXiv preprint arXiv:2302.14163, 2023. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 464, + 288, + 518 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 464, + 288, + 518 + ], + "spans": [ + { + "bbox": [ + 48, + 464, + 288, + 518 + ], + "type": "text", + "content": "[43] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, 2021. 1, 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 521, + 288, + 564 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 521, + 288, + 564 + ], + "spans": [ + { + "bbox": [ + 48, + 521, + 288, + 564 + ], + "type": "text", + "content": "[44] Yongming Rao, Wenliang Zhao, Guangyi Chen, Yansong Tang, Zheng Zhu, Guan Huang, Jie Zhou, and Jiwen Lu. Denseclip: Language-guided dense prediction with context-aware prompting. In CVPR, 2022. 1, 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 567, + 288, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 567, + 288, + 620 + ], + "spans": [ + { + "bbox": [ + 48, + 567, + 288, + 620 + ], + "type": "text", + "content": "[45] Hanoona Abdul Rasheed, Muhammad Maaz, Muhammad Uzair Khattak, Salman Khan, and Fahad Khan. Bridging the gap between object and image-level representations for open-vocabulary detection. In NeurIPS, 2022. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 624, + 288, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 624, + 288, + 666 + ], + "spans": [ + { + "bbox": [ + 48, + 624, + 288, + 666 + ], + "type": "text", + "content": "[46] Ramprasaath R Selvaraju, Michael Cogswell, Abhishek Das, Ramakrishna Vedantam, Devi Parikh, and Dhruv Batra. Gradcam: Visual explanations from deep networks via gradient-based localization. In ICCV, 2017. 6" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 670, + 288, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 670, + 288, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 670, + 288, + 712 + ], + "type": "text", + "content": "[47] Sheng Shen, Liunian Harold Li, Hao Tan, Mohit Bansal, Anna Rohrbach, Kai-Wei Chang, Zhewei Yao, and Kurt Keutzer. How much can clip benefit vision-and-language tasks? In ICLR, 2021. 1" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 706 + ], + "type": "list", + "angle": 0, + "index": 33, + "blocks": [ + { + "bbox": [ + 307, + 73, + 547, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 547, + 106 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 547, + 106 + ], + "type": "text", + "content": "[48] Robin Strudel, Ivan Laptev, and Cordelia Schmid. Weakly-supervised segmentation of referring expressions. arXiv preprint arXiv:2205.04725, 2022. 1, 3, 6, 7" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 107, + 547, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 107, + 547, + 149 + ], + "spans": [ + { + "bbox": [ + 307, + 107, + 547, + 149 + ], + "type": "text", + "content": "[49] Sanjay Subramanian, William Merrill, Trevor Darrell, Matt Gardner, Sameer Singh, and Anna Rohrbach. Reclip: A strong zero-shot baseline for referring expression comprehension. In ACL, 2022. 2, 7" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 152, + 547, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 152, + 547, + 183 + ], + "spans": [ + { + "bbox": [ + 307, + 152, + 547, + 183 + ], + "type": "text", + "content": "[50] Hao Tan and Mohit Bansal. Lxmert: Learning cross-modality encoder representations from transformers. In EMNLP, 2019. 1" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 185, + 547, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 185, + 547, + 217 + ], + "spans": [ + { + "bbox": [ + 308, + 185, + 547, + 217 + ], + "type": "text", + "content": "[51] Mengmeng Wang, Jiazheng Xing, and Yong Liu. Actionclip: A new paradigm for video action recognition. arXiv preprint arXiv:2109.08472, 2021. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 219, + 547, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 219, + 547, + 251 + ], + "spans": [ + { + "bbox": [ + 308, + 219, + 547, + 251 + ], + "type": "text", + "content": "[52] Xudong Wang, Rohit Girdhar, Stella X Yu, and Ishan Misra. Cut and learn for unsupervised object detection and instance segmentation. arXiv preprint arXiv:2301.11320, 2023. 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 252, + 547, + 294 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 252, + 547, + 294 + ], + "spans": [ + { + "bbox": [ + 308, + 252, + 547, + 294 + ], + "type": "text", + "content": "[53] Xinlong Wang, Zhiding Yu, Shalini De Mello, Jan Kautz, Anima Anandkumar, Chunhua Shen, and Jose M Alvarez. Freesolo: Learning to segment objects without annotations. In CVPR, 2022. 3, 4, 5, 6, 7" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 297, + 546, + 328 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 297, + 546, + 328 + ], + "spans": [ + { + "bbox": [ + 308, + 297, + 546, + 328 + ], + "type": "text", + "content": "[54] Zhaoqing Wang, Yu Lu, Qiang Li, Xunqiang Tao, Yandong Guo, Mingming Gong, and Tongliang Liu. Cris: Clip-driven referring image segmentation. In CVPR, 2022. 1, 3, 7, 8" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 330, + 547, + 371 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 330, + 547, + 371 + ], + "spans": [ + { + "bbox": [ + 308, + 330, + 547, + 371 + ], + "type": "text", + "content": "[55] Zhichao Wei, Xiaohao Chen, Mingqiang Chen, and Siyu Zhu. Learning aligned cross-modal representations for referring image segmentation. arXiv preprint arXiv:2301.06429, 2023. 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 374, + 547, + 406 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 374, + 547, + 406 + ], + "spans": [ + { + "bbox": [ + 308, + 374, + 547, + 406 + ], + "type": "text", + "content": "[56] Chenyun Wu, Zhe Lin, Scott Cohen, Trung Bui, and Subhransu Maji. Phrasescut: Language-based image segmentation in the wild. In CVPR, 2020. 7" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 408, + 546, + 440 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 408, + 546, + 440 + ], + "spans": [ + { + "bbox": [ + 308, + 408, + 546, + 440 + ], + "type": "text", + "content": "[57] Jianzong Wu, Xiangtai Li, Xia Li, Henghui Ding, Yunhai Tong, and Dacheng Tao. Towards robust referring image segmentation. arXiv preprint arXiv:2209.09554, 2022. 3" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 441, + 546, + 474 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 441, + 546, + 474 + ], + "spans": [ + { + "bbox": [ + 308, + 441, + 546, + 474 + ], + "type": "text", + "content": "[58] Mengde Xu, Zheng Zhang, Fangyun Wei, Han Hu, and Xiang Bai. Side adapter network for open-vocabulary semantic segmentation. arXiv preprint arXiv:2302.12242, 2023. 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 475, + 547, + 518 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 475, + 547, + 518 + ], + "spans": [ + { + "bbox": [ + 308, + 475, + 547, + 518 + ], + "type": "text", + "content": "[59] Mengde Xu, Zheng Zhang, Fangyun Wei, Yutong Lin, Yue Cao, Han Hu, and Xiang Bai. A simple baseline for open-vocabulary semantic segmentation with pre-trained vision-language model. In ECCV, 2022. 1, 2, 5, 7" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 308, + 520, + 547, + 562 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 520, + 547, + 562 + ], + "spans": [ + { + "bbox": [ + 308, + 520, + 547, + 562 + ], + "type": "text", + "content": "[60] Zhao Yang, Jiaqi Wang, Yansong Tang, Kai Chen, Hengshuang Zhao, and Philip HS Torr. Lavt: Language-aware vision transformer for referring image segmentation. In CVPR, 2022. 3, 6, 7, 8" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 308, + 564, + 547, + 595 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 564, + 547, + 595 + ], + "spans": [ + { + "bbox": [ + 308, + 564, + 547, + 595 + ], + "type": "text", + "content": "[61] Linwei Ye, Mrigank Rochan, Zhi Liu, and Yang Wang. Cross-modal self-attention network for referring image segmentation. In CVPR, 2019. 3" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 308, + 597, + 546, + 639 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 597, + 546, + 639 + ], + "spans": [ + { + "bbox": [ + 308, + 597, + 546, + 639 + ], + "type": "text", + "content": "[62] Zicheng Zhang, Yi Zhu, Jianzhuang Liu, Xiaodan Liang, and Wei Ke. Coupalign: Coupling word-pixel with sentence-mask alignments for referring image segmentation. In NeurIPS, 2022. 3" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 308, + 642, + 546, + 663 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 642, + 546, + 663 + ], + "spans": [ + { + "bbox": [ + 308, + 642, + 546, + 663 + ], + "type": "text", + "content": "[63] Chong Zhou, Chen Change Loy, and Bo Dai. Extract free dense labels from clip. In ECCV, 2022. 1, 2, 7" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 308, + 665, + 547, + 706 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 665, + 547, + 706 + ], + "spans": [ + { + "bbox": [ + 308, + 665, + 547, + 706 + ], + "type": "text", + "content": "[64] Ziqin Zhou, Bowen Zhang, Yinjie Lei, Lingqiao Liu, and Yifan Liu. Zegclip: Towards adapting clip for zero-shot semantic segmentation. arXiv preprint arXiv:2212.03588, 2022.2" + } + ] + } + ], + "index": 32 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "type": "text", + "content": "19465" + } + ] + } + ], + "index": 34 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/Zero-Shot Text-to-Parameter Translation for Game Character Auto-Creation/120ba89b-8ae1-4464-85a9-dac434567f6d_content_list.json b/2023/Zero-Shot Text-to-Parameter Translation for Game Character Auto-Creation/120ba89b-8ae1-4464-85a9-dac434567f6d_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..19d56d2b1549309dda8da04afddee757dfe722cb --- /dev/null +++ b/2023/Zero-Shot Text-to-Parameter Translation for Game Character Auto-Creation/120ba89b-8ae1-4464-85a9-dac434567f6d_content_list.json @@ -0,0 +1,1472 @@ +[ + { + "type": "text", + "text": "Zero-Shot Text-to-Parameter Translation for Game Character Auto-Creation", + "text_level": 1, + "bbox": [ + 94, + 130, + 875, + 150 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Rui Zhao $^{1}$ , Wei Li $^{2}$ , Zhipeng Hu $^{1}$ , Lincheng Li $^{1*}$ , Zhengxia Zou $^{3*}$ , Zhenwei Shi $^{3}$ , Changjie Fan $^{1}$ , $^{1}$ Netease Fuxi AI Lab, $^{2}$ Nankai University, $^{3}$ Beihang University", + "bbox": [ + 96, + 179, + 869, + 215 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{zhaorui10, zphu, lilincheng, fanchangjie}@corp.netease.com,", + "bbox": [ + 220, + 219, + 746, + 234 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "liwei@dbis.nankai.edu.cn, {zhengxiazou, zhenweishi}@buaa.edu.cn", + "bbox": [ + 204, + 236, + 759, + 251 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/db582dc6bc97988279dcf18cad7287276d66ea2a9cad30bf6860b11a081bd6d4.jpg", + "image_caption": [ + "Figure 1. Game characters created by the proposed text-to-parameter translation (T2P) given different text prompts. The front view and three side views are shown for each character." + ], + "image_footnote": [], + "bbox": [ + 78, + 286, + 895, + 512 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 580, + 313, + 597 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent popular Role-Playing Games (RPGs) saw the great success of character auto-creation systems. The bone-driven face model controlled by continuous parameters (like the position of bones) and discrete parameters (like the hairstyles) makes it possible for users to personalize and customize in-game characters. Previous in-game character auto-creation systems are mostly image-driven, where facial parameters are optimized so that the rendered character looks similar to the reference face photo. This paper proposes a novel text-to-parameter translation method (T2P) to achieve zero-shot text-driven game character auto-creation. With our method, users can create a vivid in-game character with arbitrary text description without using any reference photo or editing hundreds of parameters manually. In our method, taking the power of large-scale pre-trained multi-modal CLIP and neural rendering, T2P searches both continuous facial parameters and discrete facial parame", + "bbox": [ + 75, + 614, + 472, + 872 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ters in a unified framework. Due to the discontinuous parameter representation, previous methods have difficulty in effectively learning discrete facial parameters. T2P, to our best knowledge, is the first method that can handle the optimization of both discrete and continuous parameters. Experimental results show that T2P can generate high-quality and vivid game characters with given text prompts. T2P outperforms other SOTA text-to-3D generation methods on both objective evaluations and subjective evaluations.", + "bbox": [ + 500, + 582, + 893, + 719 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 500, + 748, + 632, + 763 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Role-Playing Games (RPGs) are praised by gamers for providing immersive experiences. Some of the recent popular RPGs, like Grand Theft Auto Online1 and Naraka2, have opened up character customization systems to players. In such systems, in-game characters are bone-driven and controlled by continuous parameters, like the position,", + "bbox": [ + 496, + 773, + 893, + 864 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation.", + "bbox": [ + 236, + 1, + 807, + 18 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "Except for this watermark, it is identical to the accepted version;", + "bbox": [ + 325, + 16, + 722, + 30 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 292, + 31, + 753, + 45 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Corresponding Authors.", + "bbox": [ + 94, + 886, + 230, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "1https://www.rockstargames.com/GTAOnline", + "bbox": [ + 517, + 875, + 759, + 887 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "2http://www.narakathegame.com", + "bbox": [ + 519, + 887, + 694, + 900 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "21013", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "rotation, scale of each bone, and discrete parameters, like the hairstyle, beard styles, make-ups, and other facial elements. By manually adjusting these parameters, players can control the appearance of the characters in the game according to their personal preferences, rather than using predefined character templates. However, it is cumbersome and time-consuming for users to manually adjust hundreds of parameters - usually taking up to hours to create a character that matches their expectations.", + "bbox": [ + 75, + 90, + 470, + 227 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To automatically create in-game characters, the method named Face-to-parameter translation (F2P) was recently proposed to automatically create game characters based on a single input face image [38]. F2P and its variants [39, 41] have been successfully used in recent RPGs like Narake and Justice, and virtual meeting platform Yaotai. Recent 3D face reconstruction methods [2, 7, 26, 33, 42-44] can also be adapted to create game characters. However, all the above-mentioned methods require reference face photos for auto-creation. Users may take time to search, download and upload suitable photos for their expected game characters. Compared with images, text prompts are more flexible and time-saving for game character auto-creation. A very recent work AvatarCLIP [10] achieved text-driven avatar auto-creation and animation. It optimizes implicit neural networks to generate characters. However, the created characters are controlled by implicit parameters, which lack explicit physical meanings, thus manually adjusting them needs extra designs. This will be inconvenient for players or game developers to further fine-tune the created game characters as they want.", + "bbox": [ + 75, + 229, + 470, + 547 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To address the above problems, we propose text-to-parameter translation (T2P) to tackle the in-game character auto-creation task based on arbitrary text prompts. T2P takes the power of large-scale pre-trained CLIP to achieve zero-shot text-driven character creation and utilizes neural rendering to make the rendering of in-game characters differentiable to accelerate the parameters optimization. Previous works like F2Ps give up controlling discrete facial parameters due to the problem of discontinuous parameter gradients. To our best knowledge, the proposed T2P is the first method that can handle both continuous and discrete facial parameters optimization in a unified framework to create vivid in-game characters. F2P is also the first text-driven automatic character creation suitable for game environments.", + "bbox": [ + 75, + 550, + 468, + 776 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our method consists of a pre-training stage and a text-to-parameter translation stage. In the pre-training stage, we first train an imitator to imitate the rendering behavior of the game engine to make the parameter searching pipeline end-to-end differentiable. We also pre-train a translator to translate the CLIP image embeddings of random game characters to their facial parameters. Then at the text-to-parameter translation stage, on one hand, we fine-tune the translator", + "bbox": [ + 75, + 780, + 470, + 900 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "on un-seen CLIP text embeddings to predict continuous parameters given text prompt rather than images, on the other hand, discrete parameters are evolutionally searched. Finally, the game engine takes in the facial parameters and creates the in-game characters which correspond to the text prompt described, as shown in Fig 1. Objective evaluations and subjective evaluations both indicate our method outperforms other SOTA zero-shot text-to-3D methods.", + "bbox": [ + 496, + 90, + 890, + 210 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our contributions are summarized as follows:", + "text_level": 1, + "bbox": [ + 517, + 212, + 821, + 226 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1) We propose a novel text-to-parameter translation method for zero-shot in-game character auto-creation. To the best of our knowledge, we are the first to study text-driven character creation ready for game environments.", + "2) The proposed T2P can optimize both continuous and discrete parameters in a unified framework, unlike earlier methods giving up controlling difficult-to-learn discrete parameters.", + "3) The proposed text-driven auto-creation paradigm is flexible and friendly for users, and the predicted physically meaningful facial parameters enable players or game developers to further finetune the game character as they want." + ], + "bbox": [ + 496, + 227, + 890, + 409 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 500, + 424, + 640, + 440 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1. Parametric Character Auto-Creation", + "text_level": 1, + "bbox": [ + 500, + 450, + 820, + 465 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Character auto-creation has been an emerging research topic because of its significance in role-playing games, augmented reality, and metaverses. Some methods on this topic are recently proposed. Tied Output Synthesis (TOS) learns to predict a set of binary facial parameters to control the graphical engine to generate a character that looks like the human in input photo [49]. Face-to-Parameter translation (F2P) is proposed to optimize a set of continuous facial parameters to minimize the distance between the generated game character's face and the input photo [38]. In F2P's following works [39, 41], the framework is improved to achieve fast and robust character creation. The PockerFace-Gan is proposed to decouple the expression features and identity features in order to generate expression-less game characters [40]. Borovikov et al. applies domain engineering and predict the facial parameters in a global-local way, considering the face as a hierarchical ensemble of general facial structure and local facial regions [3]. These methods all need reference photos to create characters, while we aim at creating characters based on text input.", + "bbox": [ + 496, + 473, + 890, + 776 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.2.3D Face Reconstruction", + "text_level": 1, + "bbox": [ + 500, + 786, + 718, + 801 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3D face reconstruction also aims to generate a 3D face given single or multi-view 2D facial images. 3D morphable model (3DMM) [1] and its variants [2,6,9,12,19] are representative methods in the literature. They first parameterize a 3D face mesh data and then optimize it to match the facial identity, expression, and texture of given reference im", + "bbox": [ + 496, + 810, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "21014", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/1ee822a8851306695398626c8ce3763d3ac4c4340965dcaa2c95796d87cbee04.jpg", + "image_caption": [ + "Figure 2. An overview of the proposed T2P. $E_{I}$ and $E_{T}$ denote the CLIP image encoder and text encoder, respectively. An imitator is trained to mimic the game engine and achieve differentiable rendering. A translator is pre-trained to translate the CLIP image embeddings to continuous facial parameters. When creating game characters given text prompts, T2P searches continuous facial parameters by fine-tuning the translator and searches discrete facial parameters by the evolution search. Finally, the facial parameters are fed into the game engine to render the in-game characters." + ], + "image_footnote": [], + "bbox": [ + 83, + 92, + 893, + 361 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "ages. Taking advantage of deep Convolutional Neural Networks (CNNs), high-level image representations are used to improve the predicting of the morphable model coefficients [7, 13, 44]. The recently proposed MeInGame firstly reconstructs the face as a 3DMM model and then transfers the face to game mesh keeping their topology [20]. It also predicts texture map and lighting coefficients from input images to improve the outlook of the game mesh.", + "bbox": [ + 75, + 465, + 472, + 587 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.3. Zero-Shot Text-Driven Generation", + "text_level": 1, + "bbox": [ + 76, + 603, + 380, + 618 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Zero-shot content generation is recently made possible by the powerful multimodel representation and generalization capabilities of CLIP [32]. Combining the CLIP with variational autoencoder or diffusion model, DALL-E [31], DALL-E 2 [31] and Imagen [30] achieved high-quality zero-shot text-to-image synthesis, and sparked widespread discussion. Text-driven image translation and manipulation, and human image generation are also explored [8, 15, 16, 18, 25, 48, 50-52]. Taking advantage of CLIP, zero-shot text-driven 3D object generation and manipulation methods made rapid advances [5, 14, 17, 23, 37, 46]. The most recently proposed Dreamfusion uses Imagen to supervise the Neural Radiance Fields network (NeRF) [24] to generate 3D object [27]. The most related work to ours named AvatarCLIP was recently proposed to achieve zero-shot text-driven 3D avatar generation and animation [10]. Given a text prompt, AvatarCLIP first generates a coarse shape by code-book-based retrieval, guided by CLIP. Then the coarse", + "bbox": [ + 75, + 628, + 472, + 902 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "shape is used to initialize a NeuS network [47] to generate the implicit representation. Finally, the implicit 3D avatar is optimized to sculpt fine geometry and generate texture. This method treats the 3D human generation as a NeuS optimization process. However, the implicit representation makes it difficult to implement in games and unfriendly to user interaction. As a comparison, our created bone-driven game characters are controlled by explicit parameters with physical meanings. This enables players and game developers to further edit the created characters according to their needs.", + "bbox": [ + 496, + 465, + 893, + 618 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 500, + 630, + 591, + 646 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Fig. 2 shows an overview of the proposed T2P. We first train an imitator to simulate the game engine and pretrain a translator to translate the CLIP image embeddings to continuous facial parameters. Then, to achieve text-to-parameter translation, given the text prompts, we fine-tune the translator to predict continuous parameters and combine the evolution search to optimize discrete parameters.", + "bbox": [ + 496, + 655, + 893, + 763 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Imitator", + "text_level": 1, + "bbox": [ + 500, + 771, + 602, + 786 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We train a neural imitator to mimic the behavior of the game engine in order to differentiate the rendering of in-game characters. It takes in continuous facial parameters $\\pmb{x}$ and renders the front view of the game character $\\pmb{y}$ . Different from the F2P [38] taking a similar generator network architecture of DC-GAN [29], we add a positional encoder at the input-end of the renderer to improve the facial param-", + "bbox": [ + 496, + 794, + 893, + 902 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "21015", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/9ff6fbf9c3690ff6d7095235fccf21ef72ee04810507a995b9e68ba29091debe.jpg", + "image_caption": [ + "Figure 3. The architecture of our translator. The translator contains a set of transformer encoder layers, several learnable tokens, a fine-tuning head, and a prediction head. The translator is firstly pre-trained on CLIP image embeddings and then fine-tuned on CLIP text embeddings to predict continuous facial parameters. When fine-tuning the translator, only the parameters of the fine-tuning head are updated." + ], + "image_footnote": [], + "bbox": [ + 83, + 90, + 460, + 189 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "eters parsing on complex textures and geometry. We treat the imitator training as a regression problem to minimize the pixel-wise distance between the images rendered by the game engine and the imitator. To avoid the blurry rendered pixels, we use L1 loss as the loss function to train the imitator:", + "bbox": [ + 75, + 325, + 468, + 416 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {L} _ {G} (\\boldsymbol {x}) = E _ {\\boldsymbol {x} \\sim u (\\boldsymbol {x})} \\left\\{\\left| \\left| \\boldsymbol {y} - \\hat {\\boldsymbol {y}} \\right| \\right| _ {1} \\right\\} \\tag {1} \\\\ = E _ {\\boldsymbol {x} \\sim u (\\boldsymbol {x})} \\left\\{\\left| \\left| G (\\boldsymbol {x}) - \\operatorname {E n g i n e} (\\boldsymbol {x}) \\right| \\right| _ {1} \\right\\}, \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 117, + 425, + 468, + 462 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $G(\\pmb{x})$ and $\\text{Engine}(\\pmb{x})$ represent the image rendered by the imitator and game engine, respectively.", + "bbox": [ + 76, + 472, + 468, + 503 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To prepare the training data, we randomly sample 170K continuous facial parameters $\\pmb{x}$ from a multidimensional uniform distribution $u(\\pmb{x})$ . We feed these parameters into the game engine to render out the facial images. Then these facial parameters and image pairs are split into 80% and 20% for training and validation.", + "bbox": [ + 76, + 503, + 468, + 594 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. Continuous Parameters Searching", + "text_level": 1, + "bbox": [ + 76, + 604, + 375, + 619 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We aim to train a translator to predict continuous facial parameters based on CLIP text embeddings. To reduce the learning difficulty, we first pre-train the translator on CLIP image embeddings and then fine-tune it on text CLIP embeddings. The main reason is that text-parameter pairs are expensive to collect, while image-parameter pairs can be infinitely generated with the game engine.", + "bbox": [ + 75, + 627, + 468, + 733 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We take the randomly sampled facial parameters and rendered image pairs mentioned in section 3.1 as training data. The rendered images are fed into the CLIP image encoder to collect image embeddings. Then we build a translator $F$ based on a transformer encoder, and train it to map the image embeddings $e_{I}$ into facial parameters $x$ , as shown in Fig. 3. The object function is defined as the L1 reconstruction loss between the true facial parameters and the predicted ones:", + "bbox": [ + 75, + 733, + 468, + 868 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {F} \\left(\\boldsymbol {e} _ {I}, \\boldsymbol {x}\\right) = E _ {e _ {I} \\sim u \\left(\\boldsymbol {e} _ {I}\\right)} \\left\\{\\left| \\left| F \\left(\\boldsymbol {e} _ {I}\\right) - \\hat {\\boldsymbol {x}} \\right| \\right| _ {1} \\right\\}. \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 130, + 882, + 468, + 900 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "When T2P creates game characters given text prompts, there is no image embeddings available. Though the CLIP is trained to pull the text and image pairs close to each other in the embedding space, there are still gaps between the two modalities. We, therefore, fine-tune the translator to fit the input text embeddings. Inspired by the recent prompt tuning study [53], we fix the parameters of the transformer and fine-tune a tiny tuner head. The translator is trained to map the text embeddings $e_{T}$ to facial parameters $x$ . Then the facial parameters are fed into the imitator to render the image of the game character. The fine-tuning object function is to minimize the cosine distance between the given text embeddings $e_{T}$ and the image embeddings of the rendered image:", + "bbox": [ + 496, + 90, + 890, + 303 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {L} _ {C L I P} \\left(\\boldsymbol {e} _ {T}, \\boldsymbol {x}\\right) = 1 - \\cos \\left(\\boldsymbol {e} _ {T}, E _ {I} (G (\\boldsymbol {x}))\\right) \\tag {3} \\\\ = 1 - \\cos \\left(\\boldsymbol {e} _ {T}, E _ {I} \\left(G \\left(F \\left(\\boldsymbol {e} _ {T}\\right)\\right)\\right), \\right. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 537, + 314, + 890, + 349 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $E_{I}$ is the CLIP image encoder. The parameters of the fine-tuned head $w$ are iteratively updated as follows,", + "bbox": [ + 498, + 361, + 890, + 391 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nw \\leftarrow w - \\eta_ {t} \\frac {\\partial \\mathcal {L} _ {C L I P}}{\\partial w}, \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 617, + 401, + 890, + 431 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\eta_{t}$ is the learning rate at $t$ th iteration. We follow the snapshot ensembles [11] and set the learning rate using the cosine annealing schedule with warm restarts (SGDR) [22] to encourage the translator to converge to and escape from local minima:", + "bbox": [ + 496, + 441, + 890, + 516 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\eta_ {t} = \\eta_ {\\min } + \\frac {1}{2} \\left(\\eta_ {\\max } - \\eta_ {\\min }\\right) \\left(1 + \\cos \\left(\\frac {N _ {t}}{N} \\pi\\right)\\right), \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 524, + 525, + 890, + 555 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\eta_{min}$ , $\\eta_{max}$ , and $\\eta_t$ denote the minimum, maximum, and current learning rate, respectively. $N$ denotes the number of iterations between two warm restarts, and $N_t$ denotes the number of iterations since the last restart. Each time the $N_t$ equals $N$ , the current iteration is called a snapshot point, and we save the predicted facial parameters at this point. These facial parameters are then used to initialize the first population of the evolution search.", + "bbox": [ + 496, + 565, + 890, + 686 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3. Discrete Parameters Searching", + "text_level": 1, + "bbox": [ + 500, + 695, + 772, + 710 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In the bone-driven face model, besides continuous facial parameters controlling its bones, discrete facial elements (like the hairstyle, beard styles, and make-up) are also important. However, these elements are difficult for the imitator to learn, because they are discrete and highly changeable. Unlike previous methods that ignore discrete parameters during optimization, we propose to evolutionally search them by directly interacting with the game engine. Evolutionary algorithms have been widely used in reinforcement learning and neural architecture search [21, 36], where the objective function can be optimized without using any gradient information.", + "bbox": [ + 496, + 719, + 890, + 898 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "21016", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/c8eec65ad6ffdac643645c7ac82ebc49997794290b8d614c918f835ac3c6a09b.jpg", + "image_caption": [ + "Figure 4. Game characters created by the proposed T2P given the text prompt \"monkey\". The first five game characters are created by the translator at different fine-tuning iterations. The last one is created by the evolution search, adding a discrete facial element, a beard." + ], + "image_footnote": [], + "bbox": [ + 101, + 88, + 867, + 220 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Here we perform a text-driven evolution search to find the optimum discrete facial parameters. The initial generation contains random initialized discrete parameters as well as the continuous facial parameters predicted by the translator. To impose supervision on 3D views, we render out two images for each game character, one for front view $y_{front}$ and one for side view $y_{side}$ . The facial parameters are scored by the CLIP model as follows,", + "bbox": [ + 75, + 284, + 472, + 405 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} S _ {C L I P} = \\alpha \\cos \\left(E _ {T} (T), E _ {I} \\left(\\boldsymbol {y} _ {\\text {f r o n t}}\\right)\\right) \\tag {6} \\\\ + (1 - \\alpha) \\cos \\left(E _ {T} \\left(T ^ {\\prime}\\right), E _ {I} (\\boldsymbol {y} _ {\\text {s i d e}})\\right), \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 127, + 415, + 468, + 450 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\alpha$ is the weight coefficient, $T$ is the given text prompt, $T'$ is the automatically adjusted text prompt for the side view, $E_{T}$ is the CLIP text encoder and $E_{I}$ is the CLIP image encoder. Then $k$ random pairs of facial parameters are selected as parents to produce the next generation through crossover and mutation. For the crossover step, child $\\pmb{x}^c$ is generated by randomly choosing a value from parents $\\pmb{x}^f$ and $\\pmb{x}^m$ at each position $i$ ,", + "bbox": [ + 75, + 460, + 468, + 580 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nP \\left(x _ {i} ^ {c} = x _ {i} ^ {f}\\right) + P \\left(x _ {i} ^ {c} = x _ {i} ^ {m}\\right) = 1. \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 158, + 589, + 468, + 609 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "For the mutation step, each child parameter $\\pmb{x}^c$ is added randomly noise at multiple randomly selected position $i$ ,", + "bbox": [ + 75, + 618, + 468, + 650 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nx _ {i} ^ {c \\prime} = x _ {i} ^ {c} + n o i s e. \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 207, + 659, + 468, + 676 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The newly generated children's parameters together with the better ones of the parents' parameters are selected as the next generation and get involved in the looping selection, crossover, and mutation. The evolution process terminates until the CLIP score is converged.", + "bbox": [ + 75, + 686, + 468, + 763 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.4. Implementation Details", + "text_level": 1, + "bbox": [ + 76, + 771, + 294, + 787 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Network architecture. Our imitator consists of a positional encoder with four fully-connected layers and a generator with six transposed convolution layers. The generator is similar to DCGAN's generator [29], except that its Tanh activation of the output layer is removed to encourage a better convergence. The translator consists of eight Transformer encoder layers [45], each of them having eight", + "bbox": [ + 75, + 794, + 470, + 901 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "multi-attention heads, and sixteen input tokens. The first token is the CLIP embeddings and the other tokens are learnable. We concatenate a prediction head with one single fully-connected layer after the Transformer. The fine-tuning head of the translator is a three layers perceptron with a bottleneck architecture.", + "bbox": [ + 496, + 284, + 890, + 375 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Training details. The imitator and translator are both trained using SGD optimizer [4]. We set the momentum to 0.9 and set the weight decay to 5e-4. For imitator pretraining, the learning rate is set to 1e-3 and is reduced to $0.98\\mathrm{x}$ per 30 epochs, and the training is stopped after 500 epochs. For translator pre-training, the learning rate is set to 1e-4 and is reduced to $0.1\\mathrm{x}$ at the 600th epoch and the training is stopped at the 1000th epoch. We randomly sample 170K facial parameters and corresponding rendered images of in-game characters pairs to train the imitator and translator. For translator fine-tuning, the minimum and maximum learning rates are set to $\\eta_{min} = 0$ and $\\eta_{max} = 1$ , respectively, and the number of iterations between two warm starts $N$ is set to 10 for the SGDR learning rate scheduler. Fine-tuning is stopped when the CLIP scores are no longer improved by more than 100 iterations.", + "bbox": [ + 496, + 378, + 892, + 621 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Evolution search. The facial parameters predicted by the translator at the last 5 snapshot points are selected as initial values. Each set of facial parameters contains 269 continuous parameters and 62 discrete parameters, and the initialized values of these discrete parameters are set to zeros, which means these facial elements do not appear at the beginning. These 5 sets of facial parameters together with 5 more random ones are the first population for the evolution search. We found that updating continuous parameters together with discrete parameters in the evolution search achieves better results. The number of selected pairs of parents is set to 10. The weight coefficient $\\alpha$ is set to 0.8. The crossover rate is set to 0.4 and the mutation rate is set to 0.05.", + "bbox": [ + 496, + 625, + 893, + 835 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Prompt engineering. To enhance the text prompts, we follow the CLIP [28] and adapt prompt ensembling to the given text prompts. We preset 12 template sentences, such as “{} head rendered in a game engine”, and then fill the", + "bbox": [ + 496, + 840, + 893, + 902 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "21017", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/564530c9162f8746800af5bfff816310c8e5d314bee28305f2a7a0a5c89fedfc.jpg", + "image_caption": [ + "Figure 5. In-game fictional characters created by the proposed T2P given different text prompts. The results in the first row are created by the translator. The results in the second row are created by the evolution search." + ], + "image_footnote": [], + "bbox": [ + 86, + 88, + 460, + 292 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "“{}” with the input text prompt. We calculate the CLIP text embeddings of the filled sentences and take their mean value as the input text embeddings for the translator and evolution search. For evolution search, we further add “side view of” to the template sentences when calculating the CLIP score of the rendered images of the side view.", + "bbox": [ + 75, + 385, + 468, + 476 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4. Experimental Results and Analysis", + "text_level": 1, + "bbox": [ + 76, + 489, + 393, + 507 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1. Game Character Auto-Creation", + "text_level": 1, + "bbox": [ + 76, + 513, + 357, + 529 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Fig. 4 shows the game characters created by T2P given the text prompt \"monkey\". The first five images show the in-game characters created by the translator at different fin-tuning iterations. The in-game character gradually grows from a normal human face to look like a monkey. The evolution search further searches discrete facial elements and also slightly improves continuous parameters. The last image of Fig. 4 shows the evolution search adds a beard to the character to make it more vivid. In this process, the proposed T2P is enabled to search both continuous and discrete facial parameters to optimize the in-game character to be consistent with the given text prompt and vivid. Fig. 5 shows more results of fictional character creation. Results in the first row are controlled by continuous parameters, and results in the second row are added discrete facial elements.", + "bbox": [ + 75, + 537, + 468, + 763 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "T2P can create characters with animal heads, as shown in Fig. 4, fictional characters, as shown in Fig. 5, and celebrities, as shown in Fig. 6, and characters conditioned on compactied text prompts, as shown in Fig. 7. These results show the powerful zero-shot game character auto-creation ability of the proposed T2P. By inputting only a text prompt, T2P can generate a vivid character, which is more flexible and time-saving for players or game developers compared to manual customization.", + "bbox": [ + 75, + 765, + 468, + 900 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Comparison with Other Methods", + "text_level": 1, + "bbox": [ + 500, + 90, + 790, + 107 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We compare the proposed method with AvatarCLIP [10] and DreamFusion [27]. The comparison includes objective evaluations and subjective evaluations. Since DreamFusion is not open source yet, we use the community implementation version of it, named Stable-Dreamfusion1. This version uses the open-source stable diffusion model [34] to drive the 3D object generation. We only compare the heads generated by these methods. This may introduce unfairness, thus we will never claim superiority besides the head part.", + "bbox": [ + 496, + 113, + 890, + 250 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We feed 24 different text prompts into these two methods and our proposed T2P to generate characters respectively. Three examples are shown in Fig. 8. For objective evaluations, we compare the Inception Score [35], CLIP Ranking-1, and their speed (run on NVIDIA A30), as shown in Table 1. For each method, CLIP Ranking-1 calculates the ratio of its created characters ranked by CLIP as top-1 among the characters created by all three methods. The evaluation scores show the proposed T2P outperforms the other two methods and runs at a much faster speed.", + "bbox": [ + 496, + 250, + 890, + 402 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "For subjective evaluations, we invite 20 volunteers to evaluate the generation results in terms of realistic degree and consistency with the given text. They are asked to focus on the heads and faces of the characters and score them from 1 to 5, where 1 is the worst and 5 is the best. The evaluation results are shown in Table 1. Evaluation results show our method consistently outperforms the other two methods. We also notice that AvatarCLIP performs good at celebrities generation, Dreamfusion is good at fictional characters generation, while our method performs better at both types, just as shown in Fig. 8.", + "bbox": [ + 496, + 402, + 892, + 568 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3. Ablation Studies", + "text_level": 1, + "bbox": [ + 500, + 578, + 663, + 593 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We conduct ablation studies to analyze the importance of the proposed translator and evolution search. We run our framework with three settings, including 1) only evolution search 2) only translator and 3) both translator and evolution search. The details of these settings are as follows.", + "bbox": [ + 496, + 602, + 890, + 676 + ], + "page_idx": 5 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1) Evolution Search. The translator is removed from the framework and the evolution search is used to directly search both continuous and discrete facial parameters given text prompts.", + "2) Translator. The evolution search is abandoned, and the translator is fine-tuned to translate the given text prompts into continuous facial parameters and gives up controlling discrete parameters.", + "3) Full Implementation. Given text prompts, the translator is fine-tuned to predict continuous facial parameters. Then, the evolution search further searches discrete parameters and also improves the continuous ones.", + "Fig. 9 shows the CLIP scores increasing curves with the" + ], + "bbox": [ + 496, + 678, + 890, + 876 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "$^{1}$ https://github.com/ashawkey/stable-dreamfusion", + "bbox": [ + 517, + 886, + 781, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "21018", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/a8c0e18028430106ba80174d06514eb1b674748c6e98b8cc07b78c135af40bf5.jpg", + "image_caption": [ + "Figure 6. In-game celebrities created by the proposed T2P. This figure shows the front view and the side view for each character." + ], + "image_footnote": [], + "bbox": [ + 83, + 88, + 885, + 537 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/4ec08a6b83ab5c4f28db921e6451c37ba276e3e079a94127994f157808117282.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodObjective EvaluationsSubjective Evaluations
Inception Score ↑CLIP Ranking-1 ↑Time ↓Reality ↑Consistency with Text ↑
DreamFusion [27]1.60 ± 0.1216.67%254.50min1.85 ± 1.022.23 ± 1.39
AvatarCLIP [10]1.37 ± 0.3116.67%177.79min1.97 ± 0.532.14 ± 0.66
T2P (ours)1.65 ± 0.2166.66%359.47s3.87 ± 0.473.34 ± 0.53
", + "bbox": [ + 89, + 574, + 879, + 667 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 1. Comparison results of DreamFusion, AvatarCLIP, and the proposed T2P in terms of objective and subjective evaluations.", + "bbox": [ + 99, + 674, + 866, + 688 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "T2P running in 300 seconds. The means and standard deviations are calculated based on 100 times repeat running driven by one text prompt. As shown in the figure, the full implementation of our method always outperforms the other two. The translator is optimized rapidly to find optimal continuous parameters but can not further improve the CLIP scores because of lacking discrete facial elements. Compared with the translator, the evolution search is quite slow but can reach a higher CLIP score. The full implementation of T2P takes advantage of both translator and evolution search and achieves fast and better optimization.", + "bbox": [ + 75, + 715, + 470, + 882 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We further test different settings of proposed T2P on 100", + "bbox": [ + 96, + 885, + 470, + 900 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "different text prompts to evaluate their performance. Table 2 shows the results. The first row is the result of directly using the pre-trained translator to predict continuous facial parameters, and the second row is the result of fine-tuning translator to predict parameters. The fine-tuned one can achieve a higher CLIP score, which indicates the necessity of fine-tuning. The CLIP scores of only using the evolution search and the full version of T2P are shown in the third and fourth rows, respectively. The full version of T2P achieves the highest CLIP score because it can search both continuous and discrete facial parameters to create better in-game characters.", + "bbox": [ + 496, + 715, + 890, + 897 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "21019", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/dec700e641a5a823ea484affc9a91737b1535d790683cc60be23c53e57b431f8.jpg", + "image_caption": [ + "Figure 7. In-game characters created by the proposed T2P given complicated prompts." + ], + "image_footnote": [], + "bbox": [ + 89, + 88, + 460, + 210 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/28c5b6378145d59a4612625316da84833e3b1c7d6f7aeeff04d4c8ecb709b524.jpg", + "image_caption": [ + "Figure 8. Comparison of AvatarCLIP, DreamFusion, and the proposed T2P. Each column shows the 3D characters created by these methods given the same text prompt." + ], + "image_footnote": [], + "bbox": [ + 89, + 258, + 460, + 547 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/881609561241c57c2c1c30fd174d2c335053c84df12dff839c84ce682b8e5e59.jpg", + "image_caption": [ + "Figure 9. Curves of CLIP scores increasing within 300s under three different module settings." + ], + "image_footnote": [], + "bbox": [ + 84, + 616, + 460, + 781 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.4. Facial Parameter Interpolation", + "text_level": 1, + "bbox": [ + 76, + 845, + 344, + 864 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Since the generated characters are controlled by parameters with explicit physical meanings, users can further ad", + "bbox": [ + 76, + 869, + 468, + 902 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/7d44d56dfbba56c6d895f6cfad1234fd7b435a79f9030236a53e81d2e884a589.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
TranslatorEvolution SearchCLIP Score
fixed×27.29 ± 3.10
fine-tuned×34.85 ± 3.15
×35.31 ± 2.26
fine-tuned35.72 ± 2.70
", + "bbox": [ + 532, + 88, + 859, + 180 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 2. Results of ablation studies. Four versions of the proposed method are compared.", + "bbox": [ + 500, + 186, + 890, + 215 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/974c37c5e18a83288778f5010de30eea88e2d41f91d49c30caaa5dd7948ee776.jpg", + "image_caption": [ + "Figure 10. Examples of the facial parameter interpolation of game characters." + ], + "image_footnote": [], + "bbox": [ + 509, + 229, + 883, + 382 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "just the outlook of the characters as they want. One can also interpolate different facial parameters to create a new character, as shown in Fig. 10. The first row shows the interpolation between the monkey and Thanos, in which the new facial parameters are calculated as follows,", + "bbox": [ + 498, + 445, + 892, + 521 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {x} _ {\\text {n e w}} = \\beta \\boldsymbol {x} _ {\\text {m o n k e y}} + (1 - \\beta) \\boldsymbol {x} _ {\\text {T h a n o s}}, \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 563, + 532, + 890, + 549 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "where $\\beta$ is the interpolation coefficient decreasing from 1 to 0. The results in the second row of Fig. 10 show the interpolation between the monkey and Shrek. Besides, more than two characters can also be interpolated. We believe the benefits of the facial parameters controlling bone-driven game characters can give players a higher degree of freedom in character customization.", + "bbox": [ + 498, + 559, + 892, + 664 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 679, + 619, + 694 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We propose a novel method called \"text-to-parameter translation\" to create bone-driven in-game characters given text prompts. Our method achieves high-quality zero-shot creation of in-game characters and can search both continuous and discrete facial parameters in a unified framework. The proposed text-driven framework is flexible and time-saving for users, and the created bone-driven characters with physically meaningful facial parameters are convenient for users to further edit as they want. Experimental results show our method achieves high-quality and vivid zero-shot text-driven game character auto-creation and outperforms other SOTA text-to-3D generation methods in terms of objective evaluations, speed, and subjective evaluations.", + "bbox": [ + 496, + 704, + 892, + 900 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "21020", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Volker Blanz and Thomas Vetter. A morphable model for the synthesis of 3d faces. In Proceedings of the 26th annual conference on Computer graphics and interactive techniques, pages 187-194, 1999. 2", + "[2] James Booth, Epameinondas Antonakos, Stylianos Ploumpis, George Trigeorgis, Yannis Panagakis, and Stefanos Zafeiriou. 3d face morphable models\" in-the-wild\". In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 48-57, 2017. 2", + "[3] Igor Borovikov, Karine Levonyan, Jon Rein, Pawel Wrotek, and Nitish Victor. Applied monocular reconstruction of parametric faces with domain engineering. arXiv preprint arXiv:2208.02935, 2022. 2", + "[4] Léon Bottou. Large-scale machine learning with stochastic gradient descent. In Proceedings of COMPSTAT'2010, pages 177-186. Springer, 2010. 5", + "[5] Zehranaz Canfes, M Furkan Atasoy, Alara Dirik, and Pinar Yanardag. Text and image guided 3d avatar generation and manipulation. arXiv preprint arXiv:2202.06079, 2022. 3", + "[6] Chen Cao, Yanlin Weng, Shun Zhou, Yiying Tong, and Kun Zhou. Facewarehouse: A 3d facial expression database for visual computing. IEEE Transactions on Visualization and Computer Graphics, 20(3):413-425, 2013. 2", + "[7] Pengfei Dou, Shishir K Shah, and Ioannis A Kakadiaris. End-to-end 3d face reconstruction with deep neural networks. In proceedings of the IEEE conference on computer vision and pattern recognition, pages 5908-5917, 2017. 2, 3", + "[8] Rinon Gal, Or Patashnik, Haggai Maron, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. Stylegan-nada: Clip-guided domain adaptation of image generators. ACM Transactions on Graphics (TOG), 41(4):1-13, 2022. 3", + "[9] Thomas Gering, Andreas Morel-Forster, Clemens Blumer, Bernhard Egger, Marcel Luthi, Sandro Schonborn, and Thomas Vetter. Morphable face models—an open framework. In 2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018), pages 75–82. IEEE, 2018. 2", + "[10] Fangzhou Hong, Mingyuan Zhang, Liang Pan, Zhongang Cai, Lei Yang, and Ziwei Liu. Avatarclip: Zero-shot text-driven generation and animation of 3d avatars. arXiv preprint arXiv:2205.08535, 2022. 2, 3, 6, 7", + "[11] Gao Huang, Yixuan Li, Geoff Pleiss, Zhuang Liu, John E Hopcroft, and Kilian Q Weinberger. Snapshot ensembles: Train 1, get m for free. arXiv preprint arXiv:1704.00109, 2017. 4", + "[12] Patrik Huber, Guosheng Hu, Rafael Tena, Pouria Mortazavian, P Koppen, William J Christmas, Matthias Ratsch, and Josef Kittler. A multiresolution 3d morphable face model and fitting framework. In Proceedings of the 11th international joint conference on computer vision, imaging and computer graphics theory and applications, 2016. 2", + "[13] Aaron S Jackson, Adrian Bulat, Vasileios Argyriou, and Georgios Tzimiropoulos. Large pose 3d face reconstruction from a single image via direct volumetric cnn regression. In Proceedings of the IEEE international conference on computer vision, pages 1031-1039, 2017. 3" + ], + "bbox": [ + 78, + 114, + 470, + 901 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[14] Ajay Jain, Ben Mildenhall, Jonathan T Barron, Pieter Abbeel, and Ben Poole. Zero-shot text-guided object generation with dream fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 867-876, 2022. 3", + "[15] Yuming Jiang, Ziqi Huang, Xingang Pan, Chen Change Loy, and Ziwei Liu. Talk-to-edit: Fine-grained facial editing via dialog. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 13799-13808, 2021. 3", + "[16] Yuming Jiang, Shuai Yang, Haonan Qju, Wayne Wu, Chen Change Loy, and Ziwei Liu. Text2human: Text-driven controllable human image generation. ACM Transactions on Graphics (TOG), 41(4):1-11, 2022. 3", + "[17] Nasir Khalid, Tianhao Xie, Eugene Belilovsky, and Tiberiu Popa. Clip-mesh: Generating textured meshes from text using pretrained image-text models. ACM Transactions on Graphics (TOG), Proc. SIGGRAPH Asia, 2022. 3", + "[18] Gwanghyun Kim and Jong Chul Ye. Diffusionclip: Text-guided image manipulation using diffusion models. 2021. 3", + "[19] Tianye Li, Timo Bolkart, Michael J Black, Hao Li, and Javier Romero. Learning a model of facial shape and expression from 4d scans. ACM Trans. Graph., 36(6):194-1, 2017. 2", + "[20] Jiangke Lin, Yi Yuan, and Zhengxia Zou. Meingame: Create a game character face from a single portrait. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 35, pages 311-319, 2021. 3", + "[21] Yuqiao Liu, Yanan Sun, Bing Xue, Mengjie Zhang, Gary G Yen, and Kay Chen Tan. A survey on evolutionary neural architecture search. IEEE transactions on neural networks and learning systems, 2021. 4", + "[22] Ilya Loshchilov and Frank Hutter. Sgdr: Stochastic gradient descent with warm restarts. arXiv preprint arXiv:1608.03983, 2016. 4", + "[23] Oscar Michel, Roi Bar-On, Richard Liu, Sagie Benaim, and Rana Hanocka. Text2mesh: Text-driven neural stylization for meshes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13492-13502, 2022. 3", + "[24] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM, 65(1):99-106, 2021. 3", + "[25] Or Patashnik, Zongze Wu, Eli Shechtman, Daniel Cohen-Or, and Dani Lischinski. Styleclip: Text-driven manipulation of stylegan imagery. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2085–2094, 2021. 3", + "[26] Weilong Peng, Zhiyong Feng, Chao Xu, and Yong Su. Parametric t-spline face morphable model for detailed fitting in shape subspace. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6139-6147, 2017. 2", + "[27] Ben Poole, Ajay Jain, Jonathan T Barron, and Ben Mildenhall. Dreamfusion: Text-to-3d using 2d diffusion. arXiv preprint arXiv:2209.14988, 2022. 3, 6, 7" + ], + "bbox": [ + 501, + 92, + 890, + 901 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "21021", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[28] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International Conference on Machine Learning, pages 8748-8763. PMLR, 2021. 5", + "[29] Alec Radford, Luke Metz, and Soumith Chintala. Unsupervised representation learning with deep convolutional generative adversarial networks. arXiv preprint arXiv:1511.06434, 2015. 3, 5", + "[30] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 2022. 3", + "[31] Aditya Ramesh, Mikhail Pavlov, Gabriel Goh, Scott Gray, Chelsea Voss, Alec Radford, Mark Chen, and Ilya Sutskever. Zero-shot text-to-image generation. In International Conference on Machine Learning, pages 8821-8831. PMLR, 2021. 3", + "[32] Scott Reed, Zeynep Akata, Xinchen Yan, Lajanugen Logeswaran, Bernt Schiele, and Honglak Lee. Generative adversarial text to image synthesis. In International conference on machine learning, pages 1060-1069. PMLR, 2016. 3", + "[33] Elad Richardson, Matan Sela, Roy Or-El, and Ron Kimmel. Learning detailed face reconstruction from a single image. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1259–1268, 2017. 2", + "[34] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10684-10695, 2022. 6", + "[35] Tim Salimans, Ian Goodfellow, Wojciech Zaremba, Vicki Cheung, Alec Radford, and Xi Chen. Improved techniques for training gans. Advances in neural information processing systems, 29, 2016. 6", + "[36] Tim Salimans, Jonathan Ho, Xi Chen, Szymon Sidor, and Ilya Sutskever. Evolution strategies as a scalable alternative to reinforcement learning. arXiv preprint arXiv:1703.03864, 2017. 4", + "[37] Aditya Sanghi, Hang Chu, Joseph G Lambourne, Ye Wang, Chin-Yi Cheng, Marco Fumero, and Kamal Rahimi Malekshan. Clip-forge: Towards zero-shot text-to-shape generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18603-18613, 2022. 3", + "[38] Tianyang Shi, Yi Yuan, Changjie Fan, Zhengxia Zou, Zhenwei Shi, and Yong Liu. Face-to-parameter translation for game character auto-creation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 161–170, 2019. 2, 3", + "[39] Tianyang Shi, Zhengxia Zou, Zhenwei Shi, and Yi Yuan. Neural rendering for game character auto-creation. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2020. 2", + "[40] Tianyang Shi, Zhengxia Zou, Xinhui Song, Zheng Song, Changjian Gu, Changjie Fan, and Yi Yuan. Neutral face" + ], + "bbox": [ + 78, + 90, + 470, + 901 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "game character auto-creation via pokerface-gan. In Proceedings of the 28th ACM International Conference on Multimedia, pages 3201–3209, 2020. 2", + "[41] Tianyang Shi, Zhengxia Zuo, Yi Yuan, and Changjie Fan. Fast and robust face-to-parameter translation for game character auto-creation. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 34, pages 1733–1740, 2020. 2", + "[42] Ayush Tewari, Michael Zollhofer, Hyeongwoo Kim, Pablo Garrido, Florian Bernard, Patrick Perez, and Christian Theobalt. Mofa: Model-based deep convolutional face autoencoder for unsupervised monocular reconstruction. In Proceedings of the IEEE International Conference on Computer Vision Workshops, pages 1274-1283, 2017. 2", + "[43] Anh Tuan Tran, Tal Hassner, Iacopo Masi, Eran Paz, Yuval Nirkin, and Gerard Medioni. Extreme 3d face reconstruction: Seeing through occlusions. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3935-3944, 2018. 2", + "[44] Anh Tuan Tran, Tal Hassner, Iacopo Masi, and Gerard Medioni. Regressing robust and discriminative 3d morphable models with a very deep neural network. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5163-5172, 2017. 2, 3", + "[45] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 30, 2017. 5", + "[46] Can Wang, Menglei Chai, Mingming He, Dongdong Chen, and Jing Liao. Clip-nerf: Text-and-image driven manipulation of neural radiance fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3835-3844, 2022. 3", + "[47] Peng Wang, Lingjie Liu, Yuan Liu, Christian Theobalt, Taku Komura, and Wenping Wang. Neus: Learning neural implicit surfaces by volume rendering for multi-view reconstruction. arXiv preprint arXiv:2106.10689, 2021. 3", + "[48] Tianyi Wei, Dongdong Chen, Wenbo Zhou, Jing Liao, Zhentao Tan, Lu Yuan, Weiming Zhang, and Nenghai Yu. Hairclip: Design your hair by text and reference image. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18072-18081, 2022. 3", + "[49] Lior Wolf, Yaniv Taigman, and Adam Polyak. Unsupervised creation of parameterized avatars. In Proceedings of the IEEE International Conference on Computer Vision, pages 1530-1538, 2017. 2", + "[50] Weihao Xia, Yujiu Yang, Jing-Hao Xue, and Baoyuan Wu. Tedigan: Text-guided diverse face image generation and manipulation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2256-2265, 2021. 3", + "[51] Zipeng Xu, Tianwei Lin, Hao Tang, Fu Li, Dongliang He, Nicu Sebe, Radu Timofte, Luc Van Gool, and Errui Ding. Predict, prevent, and evaluate: Disentangled text-driven image manipulation empowered by pre-trained vision-language model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18229-18238, 2022. 3" + ], + "bbox": [ + 501, + 92, + 890, + 898 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "21022", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[52] Yingchen Yu, Fangneng Zhan, Rongliang Wu, Jiahui Zhang, Shijian Lu, Miaomiao Cui, Xuansong Xie, Xian-Sheng Hua, and Chunyan Miao. Towards counterfactual image manipulation via clip. In Proceedings of the 30th ACM International Conference on Multimedia, pages 3637-3645, 2022. 3", + "[53] Kaiyang Zhou, Jingkang Yang, Chen Change Loy, and Ziwei Liu. Learning to prompt for vision-language models. International Journal of Computer Vision, 130(9):2337-2348, 2022. 4" + ], + "bbox": [ + 78, + 90, + 468, + 215 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "21023", + "bbox": [ + 478, + 945, + 517, + 955 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/2023/Zero-Shot Text-to-Parameter Translation for Game Character Auto-Creation/120ba89b-8ae1-4464-85a9-dac434567f6d_model.json b/2023/Zero-Shot Text-to-Parameter Translation for Game Character Auto-Creation/120ba89b-8ae1-4464-85a9-dac434567f6d_model.json new file mode 100644 index 0000000000000000000000000000000000000000..ad90eb3cd0985a906b8f0cddf26f83c7cec8c0ac --- /dev/null +++ b/2023/Zero-Shot Text-to-Parameter Translation for Game Character Auto-Creation/120ba89b-8ae1-4464-85a9-dac434567f6d_model.json @@ -0,0 +1,2136 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.002, + 0.808, + 0.02 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation." + }, + { + "type": "header", + "bbox": [ + 0.326, + 0.017, + 0.723, + 0.031 + ], + "angle": 0, + "content": "Except for this watermark, it is identical to the accepted version;" + }, + { + "type": "header", + "bbox": [ + 0.294, + 0.032, + 0.754, + 0.046 + ], + "angle": 0, + "content": "the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.096, + 0.131, + 0.877, + 0.151 + ], + "angle": 0, + "content": "Zero-Shot Text-to-Parameter Translation for Game Character Auto-Creation" + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.18, + 0.87, + 0.217 + ], + "angle": 0, + "content": "Rui Zhao\\(^{1}\\), Wei Li\\(^{2}\\), Zhipeng Hu\\(^{1}\\), Lincheng Li\\(^{1*}\\), Zhengxia Zou\\(^{3*}\\), Zhenwei Shi\\(^{3}\\), Changjie Fan\\(^{1}\\), \\(^{1}\\)Netease Fuxi AI Lab, \\(^{2}\\)Nankai University, \\(^{3}\\)Beihang University" + }, + { + "type": "text", + "bbox": [ + 0.221, + 0.22, + 0.747, + 0.235 + ], + "angle": 0, + "content": "{zhaorui10, zphu, lilincheng, fanchangjie}@corp.netease.com," + }, + { + "type": "text", + "bbox": [ + 0.205, + 0.237, + 0.76, + 0.252 + ], + "angle": 0, + "content": "liwei@dbis.nankai.edu.cn, {zhengxiazou, zhenweishi}@buaa.edu.cn" + }, + { + "type": "image", + "bbox": [ + 0.08, + 0.287, + 0.896, + 0.513 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.521, + 0.895, + 0.549 + ], + "angle": 0, + "content": "Figure 1. Game characters created by the proposed text-to-parameter translation (T2P) given different text prompts. The front view and three side views are shown for each character." + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.582, + 0.314, + 0.598 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.615, + 0.473, + 0.873 + ], + "angle": 0, + "content": "Recent popular Role-Playing Games (RPGs) saw the great success of character auto-creation systems. The bone-driven face model controlled by continuous parameters (like the position of bones) and discrete parameters (like the hairstyles) makes it possible for users to personalize and customize in-game characters. Previous in-game character auto-creation systems are mostly image-driven, where facial parameters are optimized so that the rendered character looks similar to the reference face photo. This paper proposes a novel text-to-parameter translation method (T2P) to achieve zero-shot text-driven game character auto-creation. With our method, users can create a vivid in-game character with arbitrary text description without using any reference photo or editing hundreds of parameters manually. In our method, taking the power of large-scale pre-trained multi-modal CLIP and neural rendering, T2P searches both continuous facial parameters and discrete facial parame" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.583, + 0.895, + 0.72 + ], + "angle": 0, + "content": "ters in a unified framework. Due to the discontinuous parameter representation, previous methods have difficulty in effectively learning discrete facial parameters. T2P, to our best knowledge, is the first method that can handle the optimization of both discrete and continuous parameters. Experimental results show that T2P can generate high-quality and vivid game characters with given text prompts. T2P outperforms other SOTA text-to-3D generation methods on both objective evaluations and subjective evaluations." + }, + { + "type": "title", + "bbox": [ + 0.501, + 0.749, + 0.633, + 0.765 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.774, + 0.894, + 0.866 + ], + "angle": 0, + "content": "Role-Playing Games (RPGs) are praised by gamers for providing immersive experiences. Some of the recent popular RPGs, like Grand Theft Auto Online1 and Naraka2, have opened up character customization systems to players. In such systems, in-game characters are bone-driven and controlled by continuous parameters, like the position," + }, + { + "type": "page_footnote", + "bbox": [ + 0.096, + 0.887, + 0.232, + 0.9 + ], + "angle": 0, + "content": "*Corresponding Authors." + }, + { + "type": "page_footnote", + "bbox": [ + 0.518, + 0.875, + 0.76, + 0.888 + ], + "angle": 0, + "content": "1https://www.rockstargames.com/GTAOnline" + }, + { + "type": "page_footnote", + "bbox": [ + 0.52, + 0.888, + 0.695, + 0.901 + ], + "angle": 0, + "content": "2http://www.narakathegame.com" + }, + { + "type": "list", + "bbox": [ + 0.518, + 0.875, + 0.76, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.52, + 0.958 + ], + "angle": 0, + "content": "21013" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.471, + 0.228 + ], + "angle": 0, + "content": "rotation, scale of each bone, and discrete parameters, like the hairstyle, beard styles, make-ups, and other facial elements. By manually adjusting these parameters, players can control the appearance of the characters in the game according to their personal preferences, rather than using predefined character templates. However, it is cumbersome and time-consuming for users to manually adjust hundreds of parameters - usually taking up to hours to create a character that matches their expectations." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.23, + 0.471, + 0.548 + ], + "angle": 0, + "content": "To automatically create in-game characters, the method named Face-to-parameter translation (F2P) was recently proposed to automatically create game characters based on a single input face image [38]. F2P and its variants [39, 41] have been successfully used in recent RPGs like Narake and Justice, and virtual meeting platform Yaotai. Recent 3D face reconstruction methods [2, 7, 26, 33, 42-44] can also be adapted to create game characters. However, all the above-mentioned methods require reference face photos for auto-creation. Users may take time to search, download and upload suitable photos for their expected game characters. Compared with images, text prompts are more flexible and time-saving for game character auto-creation. A very recent work AvatarCLIP [10] achieved text-driven avatar auto-creation and animation. It optimizes implicit neural networks to generate characters. However, the created characters are controlled by implicit parameters, which lack explicit physical meanings, thus manually adjusting them needs extra designs. This will be inconvenient for players or game developers to further fine-tune the created game characters as they want." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.551, + 0.47, + 0.777 + ], + "angle": 0, + "content": "To address the above problems, we propose text-to-parameter translation (T2P) to tackle the in-game character auto-creation task based on arbitrary text prompts. T2P takes the power of large-scale pre-trained CLIP to achieve zero-shot text-driven character creation and utilizes neural rendering to make the rendering of in-game characters differentiable to accelerate the parameters optimization. Previous works like F2Ps give up controlling discrete facial parameters due to the problem of discontinuous parameter gradients. To our best knowledge, the proposed T2P is the first method that can handle both continuous and discrete facial parameters optimization in a unified framework to create vivid in-game characters. F2P is also the first text-driven automatic character creation suitable for game environments." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.781, + 0.471, + 0.901 + ], + "angle": 0, + "content": "Our method consists of a pre-training stage and a text-to-parameter translation stage. In the pre-training stage, we first train an imitator to imitate the rendering behavior of the game engine to make the parameter searching pipeline end-to-end differentiable. We also pre-train a translator to translate the CLIP image embeddings of random game characters to their facial parameters. Then at the text-to-parameter translation stage, on one hand, we fine-tune the translator" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.212 + ], + "angle": 0, + "content": "on un-seen CLIP text embeddings to predict continuous parameters given text prompt rather than images, on the other hand, discrete parameters are evolutionally searched. Finally, the game engine takes in the facial parameters and creates the in-game characters which correspond to the text prompt described, as shown in Fig 1. Objective evaluations and subjective evaluations both indicate our method outperforms other SOTA zero-shot text-to-3D methods." + }, + { + "type": "title", + "bbox": [ + 0.519, + 0.213, + 0.822, + 0.227 + ], + "angle": 0, + "content": "Our contributions are summarized as follows:" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.228, + 0.892, + 0.289 + ], + "angle": 0, + "content": "1) We propose a novel text-to-parameter translation method for zero-shot in-game character auto-creation. To the best of our knowledge, we are the first to study text-driven character creation ready for game environments." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.29, + 0.892, + 0.348 + ], + "angle": 0, + "content": "2) The proposed T2P can optimize both continuous and discrete parameters in a unified framework, unlike earlier methods giving up controlling difficult-to-learn discrete parameters." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.35, + 0.892, + 0.41 + ], + "angle": 0, + "content": "3) The proposed text-driven auto-creation paradigm is flexible and friendly for users, and the predicted physically meaningful facial parameters enable players or game developers to further finetune the game character as they want." + }, + { + "type": "list", + "bbox": [ + 0.498, + 0.228, + 0.892, + 0.41 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.425, + 0.642, + 0.441 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.451, + 0.821, + 0.466 + ], + "angle": 0, + "content": "2.1. Parametric Character Auto-Creation" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.474, + 0.892, + 0.777 + ], + "angle": 0, + "content": "Character auto-creation has been an emerging research topic because of its significance in role-playing games, augmented reality, and metaverses. Some methods on this topic are recently proposed. Tied Output Synthesis (TOS) learns to predict a set of binary facial parameters to control the graphical engine to generate a character that looks like the human in input photo [49]. Face-to-Parameter translation (F2P) is proposed to optimize a set of continuous facial parameters to minimize the distance between the generated game character's face and the input photo [38]. In F2P's following works [39, 41], the framework is improved to achieve fast and robust character creation. The PockerFace-Gan is proposed to decouple the expression features and identity features in order to generate expression-less game characters [40]. Borovikov et al. applies domain engineering and predict the facial parameters in a global-local way, considering the face as a hierarchical ensemble of general facial structure and local facial regions [3]. These methods all need reference photos to create characters, while we aim at creating characters based on text input." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.787, + 0.72, + 0.802 + ], + "angle": 0, + "content": "2.2.3D Face Reconstruction" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.811, + 0.892, + 0.901 + ], + "angle": 0, + "content": "3D face reconstruction also aims to generate a 3D face given single or multi-view 2D facial images. 3D morphable model (3DMM) [1] and its variants [2,6,9,12,19] are representative methods in the literature. They first parameterize a 3D face mesh data and then optimize it to match the facial identity, expression, and texture of given reference im" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "21014" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.084, + 0.093, + 0.895, + 0.362 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.369, + 0.895, + 0.442 + ], + "angle": 0, + "content": "Figure 2. An overview of the proposed T2P. \\( E_{I} \\) and \\( E_{T} \\) denote the CLIP image encoder and text encoder, respectively. An imitator is trained to mimic the game engine and achieve differentiable rendering. A translator is pre-trained to translate the CLIP image embeddings to continuous facial parameters. When creating game characters given text prompts, T2P searches continuous facial parameters by fine-tuning the translator and searches discrete facial parameters by the evolution search. Finally, the facial parameters are fed into the game engine to render the in-game characters." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.466, + 0.473, + 0.588 + ], + "angle": 0, + "content": "ages. Taking advantage of deep Convolutional Neural Networks (CNNs), high-level image representations are used to improve the predicting of the morphable model coefficients [7, 13, 44]. The recently proposed MeInGame firstly reconstructs the face as a 3DMM model and then transfers the face to game mesh keeping their topology [20]. It also predicts texture map and lighting coefficients from input images to improve the outlook of the game mesh." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.604, + 0.381, + 0.619 + ], + "angle": 0, + "content": "2.3. Zero-Shot Text-Driven Generation" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.629, + 0.473, + 0.903 + ], + "angle": 0, + "content": "Zero-shot content generation is recently made possible by the powerful multimodel representation and generalization capabilities of CLIP [32]. Combining the CLIP with variational autoencoder or diffusion model, DALL-E [31], DALL-E 2 [31] and Imagen [30] achieved high-quality zero-shot text-to-image synthesis, and sparked widespread discussion. Text-driven image translation and manipulation, and human image generation are also explored [8, 15, 16, 18, 25, 48, 50-52]. Taking advantage of CLIP, zero-shot text-driven 3D object generation and manipulation methods made rapid advances [5, 14, 17, 23, 37, 46]. The most recently proposed Dreamfusion uses Imagen to supervise the Neural Radiance Fields network (NeRF) [24] to generate 3D object [27]. The most related work to ours named AvatarCLIP was recently proposed to achieve zero-shot text-driven 3D avatar generation and animation [10]. Given a text prompt, AvatarCLIP first generates a coarse shape by code-book-based retrieval, guided by CLIP. Then the coarse" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.466, + 0.895, + 0.619 + ], + "angle": 0, + "content": "shape is used to initialize a NeuS network [47] to generate the implicit representation. Finally, the implicit 3D avatar is optimized to sculpt fine geometry and generate texture. This method treats the 3D human generation as a NeuS optimization process. However, the implicit representation makes it difficult to implement in games and unfriendly to user interaction. As a comparison, our created bone-driven game characters are controlled by explicit parameters with physical meanings. This enables players and game developers to further edit the created characters according to their needs." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.631, + 0.593, + 0.647 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.656, + 0.894, + 0.765 + ], + "angle": 0, + "content": "Fig. 2 shows an overview of the proposed T2P. We first train an imitator to simulate the game engine and pretrain a translator to translate the CLIP image embeddings to continuous facial parameters. Then, to achieve text-to-parameter translation, given the text prompts, we fine-tune the translator to predict continuous parameters and combine the evolution search to optimize discrete parameters." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.772, + 0.603, + 0.787 + ], + "angle": 0, + "content": "3.1. Imitator" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.795, + 0.895, + 0.903 + ], + "angle": 0, + "content": "We train a neural imitator to mimic the behavior of the game engine in order to differentiate the rendering of in-game characters. It takes in continuous facial parameters \\( \\pmb{x} \\) and renders the front view of the game character \\( \\pmb{y} \\). Different from the F2P [38] taking a similar generator network architecture of DC-GAN [29], we add a positional encoder at the input-end of the renderer to improve the facial param-" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "21015" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.084, + 0.092, + 0.462, + 0.19 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.202, + 0.471, + 0.3 + ], + "angle": 0, + "content": "Figure 3. The architecture of our translator. The translator contains a set of transformer encoder layers, several learnable tokens, a fine-tuning head, and a prediction head. The translator is firstly pre-trained on CLIP image embeddings and then fine-tuned on CLIP text embeddings to predict continuous facial parameters. When fine-tuning the translator, only the parameters of the fine-tuning head are updated." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.327, + 0.47, + 0.417 + ], + "angle": 0, + "content": "eters parsing on complex textures and geometry. We treat the imitator training as a regression problem to minimize the pixel-wise distance between the images rendered by the game engine and the imitator. To avoid the blurry rendered pixels, we use L1 loss as the loss function to train the imitator:" + }, + { + "type": "equation", + "bbox": [ + 0.118, + 0.426, + 0.47, + 0.463 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {L} _ {G} (\\boldsymbol {x}) = E _ {\\boldsymbol {x} \\sim u (\\boldsymbol {x})} \\left\\{\\left| \\left| \\boldsymbol {y} - \\hat {\\boldsymbol {y}} \\right| \\right| _ {1} \\right\\} \\tag {1} \\\\ = E _ {\\boldsymbol {x} \\sim u (\\boldsymbol {x})} \\left\\{\\left| \\left| G (\\boldsymbol {x}) - \\operatorname {E n g i n e} (\\boldsymbol {x}) \\right| \\right| _ {1} \\right\\}, \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.473, + 0.469, + 0.504 + ], + "angle": 0, + "content": "where \\( G(\\pmb{x}) \\) and \\( \\text{Engine}(\\pmb{x}) \\) represent the image rendered by the imitator and game engine, respectively." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.505, + 0.469, + 0.595 + ], + "angle": 0, + "content": "To prepare the training data, we randomly sample 170K continuous facial parameters \\( \\pmb{x} \\) from a multidimensional uniform distribution \\( u(\\pmb{x}) \\). We feed these parameters into the game engine to render out the facial images. Then these facial parameters and image pairs are split into 80% and 20% for training and validation." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.605, + 0.377, + 0.621 + ], + "angle": 0, + "content": "3.2. Continuous Parameters Searching" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.628, + 0.469, + 0.734 + ], + "angle": 0, + "content": "We aim to train a translator to predict continuous facial parameters based on CLIP text embeddings. To reduce the learning difficulty, we first pre-train the translator on CLIP image embeddings and then fine-tune it on text CLIP embeddings. The main reason is that text-parameter pairs are expensive to collect, while image-parameter pairs can be infinitely generated with the game engine." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.734, + 0.469, + 0.869 + ], + "angle": 0, + "content": "We take the randomly sampled facial parameters and rendered image pairs mentioned in section 3.1 as training data. The rendered images are fed into the CLIP image encoder to collect image embeddings. Then we build a translator \\( F \\) based on a transformer encoder, and train it to map the image embeddings \\( e_{I} \\) into facial parameters \\( x \\), as shown in Fig. 3. The object function is defined as the L1 reconstruction loss between the true facial parameters and the predicted ones:" + }, + { + "type": "equation", + "bbox": [ + 0.131, + 0.883, + 0.469, + 0.901 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {F} \\left(\\boldsymbol {e} _ {I}, \\boldsymbol {x}\\right) = E _ {e _ {I} \\sim u \\left(\\boldsymbol {e} _ {I}\\right)} \\left\\{\\left| \\left| F \\left(\\boldsymbol {e} _ {I}\\right) - \\hat {\\boldsymbol {x}} \\right| \\right| _ {1} \\right\\}. \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.304 + ], + "angle": 0, + "content": "When T2P creates game characters given text prompts, there is no image embeddings available. Though the CLIP is trained to pull the text and image pairs close to each other in the embedding space, there are still gaps between the two modalities. We, therefore, fine-tune the translator to fit the input text embeddings. Inspired by the recent prompt tuning study [53], we fix the parameters of the transformer and fine-tune a tiny tuner head. The translator is trained to map the text embeddings \\( e_{T} \\) to facial parameters \\( x \\). Then the facial parameters are fed into the imitator to render the image of the game character. The fine-tuning object function is to minimize the cosine distance between the given text embeddings \\( e_{T} \\) and the image embeddings of the rendered image:" + }, + { + "type": "equation", + "bbox": [ + 0.538, + 0.315, + 0.892, + 0.35 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {L} _ {C L I P} \\left(\\boldsymbol {e} _ {T}, \\boldsymbol {x}\\right) = 1 - \\cos \\left(\\boldsymbol {e} _ {T}, E _ {I} (G (\\boldsymbol {x}))\\right) \\tag {3} \\\\ = 1 - \\cos \\left(\\boldsymbol {e} _ {T}, E _ {I} \\left(G \\left(F \\left(\\boldsymbol {e} _ {T}\\right)\\right)\\right), \\right. \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.362, + 0.892, + 0.392 + ], + "angle": 0, + "content": "where \\(E_{I}\\) is the CLIP image encoder. The parameters of the fine-tuned head \\(w\\) are iteratively updated as follows," + }, + { + "type": "equation", + "bbox": [ + 0.618, + 0.402, + 0.891, + 0.433 + ], + "angle": 0, + "content": "\\[\nw \\leftarrow w - \\eta_ {t} \\frac {\\partial \\mathcal {L} _ {C L I P}}{\\partial w}, \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.442, + 0.892, + 0.517 + ], + "angle": 0, + "content": "where \\(\\eta_{t}\\) is the learning rate at \\(t\\)th iteration. We follow the snapshot ensembles [11] and set the learning rate using the cosine annealing schedule with warm restarts (SGDR) [22] to encourage the translator to converge to and escape from local minima:" + }, + { + "type": "equation", + "bbox": [ + 0.525, + 0.526, + 0.892, + 0.556 + ], + "angle": 0, + "content": "\\[\n\\eta_ {t} = \\eta_ {\\min } + \\frac {1}{2} \\left(\\eta_ {\\max } - \\eta_ {\\min }\\right) \\left(1 + \\cos \\left(\\frac {N _ {t}}{N} \\pi\\right)\\right), \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.566, + 0.892, + 0.687 + ], + "angle": 0, + "content": "where \\(\\eta_{min}\\), \\(\\eta_{max}\\), and \\(\\eta_t\\) denote the minimum, maximum, and current learning rate, respectively. \\(N\\) denotes the number of iterations between two warm restarts, and \\(N_t\\) denotes the number of iterations since the last restart. Each time the \\(N_t\\) equals \\(N\\), the current iteration is called a snapshot point, and we save the predicted facial parameters at this point. These facial parameters are then used to initialize the first population of the evolution search." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.696, + 0.774, + 0.712 + ], + "angle": 0, + "content": "3.3. Discrete Parameters Searching" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.72, + 0.892, + 0.9 + ], + "angle": 0, + "content": "In the bone-driven face model, besides continuous facial parameters controlling its bones, discrete facial elements (like the hairstyle, beard styles, and make-up) are also important. However, these elements are difficult for the imitator to learn, because they are discrete and highly changeable. Unlike previous methods that ignore discrete parameters during optimization, we propose to evolutionally search them by directly interacting with the game engine. Evolutionary algorithms have been widely used in reinforcement learning and neural architecture search [21, 36], where the objective function can be optimized without using any gradient information." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.957 + ], + "angle": 0, + "content": "21016" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.102, + 0.089, + 0.868, + 0.221 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.23, + 0.894, + 0.259 + ], + "angle": 0, + "content": "Figure 4. Game characters created by the proposed T2P given the text prompt \"monkey\". The first five game characters are created by the translator at different fine-tuning iterations. The last one is created by the evolution search, adding a discrete facial element, a beard." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.285, + 0.473, + 0.406 + ], + "angle": 0, + "content": "Here we perform a text-driven evolution search to find the optimum discrete facial parameters. The initial generation contains random initialized discrete parameters as well as the continuous facial parameters predicted by the translator. To impose supervision on 3D views, we render out two images for each game character, one for front view \\( y_{front} \\) and one for side view \\( y_{side} \\). The facial parameters are scored by the CLIP model as follows," + }, + { + "type": "equation", + "bbox": [ + 0.128, + 0.416, + 0.47, + 0.452 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} S _ {C L I P} = \\alpha \\cos \\left(E _ {T} (T), E _ {I} \\left(\\boldsymbol {y} _ {\\text {f r o n t}}\\right)\\right) \\tag {6} \\\\ + (1 - \\alpha) \\cos \\left(E _ {T} \\left(T ^ {\\prime}\\right), E _ {I} (\\boldsymbol {y} _ {\\text {s i d e}})\\right), \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.461, + 0.47, + 0.581 + ], + "angle": 0, + "content": "where \\(\\alpha\\) is the weight coefficient, \\(T\\) is the given text prompt, \\(T'\\) is the automatically adjusted text prompt for the side view, \\(E_{T}\\) is the CLIP text encoder and \\(E_{I}\\) is the CLIP image encoder. Then \\(k\\) random pairs of facial parameters are selected as parents to produce the next generation through crossover and mutation. For the crossover step, child \\(\\pmb{x}^c\\) is generated by randomly choosing a value from parents \\(\\pmb{x}^f\\) and \\(\\pmb{x}^m\\) at each position \\(i\\)," + }, + { + "type": "equation", + "bbox": [ + 0.16, + 0.59, + 0.47, + 0.611 + ], + "angle": 0, + "content": "\\[\nP \\left(x _ {i} ^ {c} = x _ {i} ^ {f}\\right) + P \\left(x _ {i} ^ {c} = x _ {i} ^ {m}\\right) = 1. \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.619, + 0.47, + 0.651 + ], + "angle": 0, + "content": "For the mutation step, each child parameter \\( \\pmb{x}^c \\) is added randomly noise at multiple randomly selected position \\( i \\)," + }, + { + "type": "equation", + "bbox": [ + 0.209, + 0.66, + 0.469, + 0.677 + ], + "angle": 0, + "content": "\\[\nx _ {i} ^ {c \\prime} = x _ {i} ^ {c} + n o i s e. \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.688, + 0.47, + 0.764 + ], + "angle": 0, + "content": "The newly generated children's parameters together with the better ones of the parents' parameters are selected as the next generation and get involved in the looping selection, crossover, and mutation. The evolution process terminates until the CLIP score is converged." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.772, + 0.295, + 0.789 + ], + "angle": 0, + "content": "3.4. Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.795, + 0.471, + 0.902 + ], + "angle": 0, + "content": "Network architecture. Our imitator consists of a positional encoder with four fully-connected layers and a generator with six transposed convolution layers. The generator is similar to DCGAN's generator [29], except that its Tanh activation of the output layer is removed to encourage a better convergence. The translator consists of eight Transformer encoder layers [45], each of them having eight" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.285, + 0.892, + 0.375 + ], + "angle": 0, + "content": "multi-attention heads, and sixteen input tokens. The first token is the CLIP embeddings and the other tokens are learnable. We concatenate a prediction head with one single fully-connected layer after the Transformer. The fine-tuning head of the translator is a three layers perceptron with a bottleneck architecture." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.379, + 0.893, + 0.622 + ], + "angle": 0, + "content": "Training details. The imitator and translator are both trained using SGD optimizer [4]. We set the momentum to 0.9 and set the weight decay to 5e-4. For imitator pretraining, the learning rate is set to 1e-3 and is reduced to \\(0.98\\mathrm{x}\\) per 30 epochs, and the training is stopped after 500 epochs. For translator pre-training, the learning rate is set to 1e-4 and is reduced to \\(0.1\\mathrm{x}\\) at the 600th epoch and the training is stopped at the 1000th epoch. We randomly sample 170K facial parameters and corresponding rendered images of in-game characters pairs to train the imitator and translator. For translator fine-tuning, the minimum and maximum learning rates are set to \\(\\eta_{min} = 0\\) and \\(\\eta_{max} = 1\\), respectively, and the number of iterations between two warm starts \\(N\\) is set to 10 for the SGDR learning rate scheduler. Fine-tuning is stopped when the CLIP scores are no longer improved by more than 100 iterations." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.625, + 0.895, + 0.836 + ], + "angle": 0, + "content": "Evolution search. The facial parameters predicted by the translator at the last 5 snapshot points are selected as initial values. Each set of facial parameters contains 269 continuous parameters and 62 discrete parameters, and the initialized values of these discrete parameters are set to zeros, which means these facial elements do not appear at the beginning. These 5 sets of facial parameters together with 5 more random ones are the first population for the evolution search. We found that updating continuous parameters together with discrete parameters in the evolution search achieves better results. The number of selected pairs of parents is set to 10. The weight coefficient \\(\\alpha\\) is set to 0.8. The crossover rate is set to 0.4 and the mutation rate is set to 0.05." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.841, + 0.895, + 0.903 + ], + "angle": 0, + "content": "Prompt engineering. To enhance the text prompts, we follow the CLIP [28] and adapt prompt ensembling to the given text prompts. We preset 12 template sentences, such as “{} head rendered in a game engine”, and then fill the" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "21017" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.087, + 0.089, + 0.462, + 0.294 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.303, + 0.472, + 0.358 + ], + "angle": 0, + "content": "Figure 5. In-game fictional characters created by the proposed T2P given different text prompts. The results in the first row are created by the translator. The results in the second row are created by the evolution search." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.386, + 0.47, + 0.477 + ], + "angle": 0, + "content": "“{}” with the input text prompt. We calculate the CLIP text embeddings of the filled sentences and take their mean value as the input text embeddings for the translator and evolution search. For evolution search, we further add “side view of” to the template sentences when calculating the CLIP score of the rendered images of the side view." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.49, + 0.395, + 0.508 + ], + "angle": 0, + "content": "4. Experimental Results and Analysis" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.515, + 0.358, + 0.53 + ], + "angle": 0, + "content": "4.1. Game Character Auto-Creation" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.539, + 0.47, + 0.765 + ], + "angle": 0, + "content": "Fig. 4 shows the game characters created by T2P given the text prompt \"monkey\". The first five images show the in-game characters created by the translator at different fin-tuning iterations. The in-game character gradually grows from a normal human face to look like a monkey. The evolution search further searches discrete facial elements and also slightly improves continuous parameters. The last image of Fig. 4 shows the evolution search adds a beard to the character to make it more vivid. In this process, the proposed T2P is enabled to search both continuous and discrete facial parameters to optimize the in-game character to be consistent with the given text prompt and vivid. Fig. 5 shows more results of fictional character creation. Results in the first row are controlled by continuous parameters, and results in the second row are added discrete facial elements." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.766, + 0.47, + 0.901 + ], + "angle": 0, + "content": "T2P can create characters with animal heads, as shown in Fig. 4, fictional characters, as shown in Fig. 5, and celebrities, as shown in Fig. 6, and characters conditioned on compactied text prompts, as shown in Fig. 7. These results show the powerful zero-shot game character auto-creation ability of the proposed T2P. By inputting only a text prompt, T2P can generate a vivid character, which is more flexible and time-saving for players or game developers compared to manual customization." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.091, + 0.791, + 0.108 + ], + "angle": 0, + "content": "4.2. Comparison with Other Methods" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.114, + 0.892, + 0.251 + ], + "angle": 0, + "content": "We compare the proposed method with AvatarCLIP [10] and DreamFusion [27]. The comparison includes objective evaluations and subjective evaluations. Since DreamFusion is not open source yet, we use the community implementation version of it, named Stable-Dreamfusion1. This version uses the open-source stable diffusion model [34] to drive the 3D object generation. We only compare the heads generated by these methods. This may introduce unfairness, thus we will never claim superiority besides the head part." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.251, + 0.892, + 0.403 + ], + "angle": 0, + "content": "We feed 24 different text prompts into these two methods and our proposed T2P to generate characters respectively. Three examples are shown in Fig. 8. For objective evaluations, we compare the Inception Score [35], CLIP Ranking-1, and their speed (run on NVIDIA A30), as shown in Table 1. For each method, CLIP Ranking-1 calculates the ratio of its created characters ranked by CLIP as top-1 among the characters created by all three methods. The evaluation scores show the proposed T2P outperforms the other two methods and runs at a much faster speed." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.403, + 0.893, + 0.569 + ], + "angle": 0, + "content": "For subjective evaluations, we invite 20 volunteers to evaluate the generation results in terms of realistic degree and consistency with the given text. They are asked to focus on the heads and faces of the characters and score them from 1 to 5, where 1 is the worst and 5 is the best. The evaluation results are shown in Table 1. Evaluation results show our method consistently outperforms the other two methods. We also notice that AvatarCLIP performs good at celebrities generation, Dreamfusion is good at fictional characters generation, while our method performs better at both types, just as shown in Fig. 8." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.579, + 0.665, + 0.594 + ], + "angle": 0, + "content": "4.3. Ablation Studies" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.603, + 0.892, + 0.678 + ], + "angle": 0, + "content": "We conduct ablation studies to analyze the importance of the proposed translator and evolution search. We run our framework with three settings, including 1) only evolution search 2) only translator and 3) both translator and evolution search. The details of these settings are as follows." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.679, + 0.892, + 0.739 + ], + "angle": 0, + "content": "1) Evolution Search. The translator is removed from the framework and the evolution search is used to directly search both continuous and discrete facial parameters given text prompts." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.74, + 0.892, + 0.8 + ], + "angle": 0, + "content": "2) Translator. The evolution search is abandoned, and the translator is fine-tuned to translate the given text prompts into continuous facial parameters and gives up controlling discrete parameters." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.801, + 0.892, + 0.861 + ], + "angle": 0, + "content": "3) Full Implementation. Given text prompts, the translator is fine-tuned to predict continuous facial parameters. Then, the evolution search further searches discrete parameters and also improves the continuous ones." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.862, + 0.892, + 0.877 + ], + "angle": 0, + "content": "Fig. 9 shows the CLIP scores increasing curves with the" + }, + { + "type": "list", + "bbox": [ + 0.498, + 0.679, + 0.892, + 0.877 + ], + "angle": 0, + "content": null + }, + { + "type": "page_footnote", + "bbox": [ + 0.518, + 0.887, + 0.782, + 0.901 + ], + "angle": 0, + "content": "\\(^{1}\\)https://github.com/ashawkey/stable-dreamfusion" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "21018" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.084, + 0.089, + 0.887, + 0.539 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.104, + 0.547, + 0.865, + 0.562 + ], + "angle": 0, + "content": "Figure 6. In-game celebrities created by the proposed T2P. This figure shows the front view and the side view for each character." + }, + { + "type": "table", + "bbox": [ + 0.09, + 0.575, + 0.88, + 0.668 + ], + "angle": 0, + "content": "
MethodObjective EvaluationsSubjective Evaluations
Inception Score ↑CLIP Ranking-1 ↑Time ↓Reality ↑Consistency with Text ↑
DreamFusion [27]1.60 ± 0.1216.67%254.50min1.85 ± 1.022.23 ± 1.39
AvatarCLIP [10]1.37 ± 0.3116.67%177.79min1.97 ± 0.532.14 ± 0.66
T2P (ours)1.65 ± 0.2166.66%359.47s3.87 ± 0.473.34 ± 0.53
" + }, + { + "type": "table_caption", + "bbox": [ + 0.101, + 0.675, + 0.867, + 0.689 + ], + "angle": 0, + "content": "Table 1. Comparison results of DreamFusion, AvatarCLIP, and the proposed T2P in terms of objective and subjective evaluations." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.716, + 0.471, + 0.883 + ], + "angle": 0, + "content": "T2P running in 300 seconds. The means and standard deviations are calculated based on 100 times repeat running driven by one text prompt. As shown in the figure, the full implementation of our method always outperforms the other two. The translator is optimized rapidly to find optimal continuous parameters but can not further improve the CLIP scores because of lacking discrete facial elements. Compared with the translator, the evolution search is quite slow but can reach a higher CLIP score. The full implementation of T2P takes advantage of both translator and evolution search and achieves fast and better optimization." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.886, + 0.471, + 0.901 + ], + "angle": 0, + "content": "We further test different settings of proposed T2P on 100" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.716, + 0.892, + 0.898 + ], + "angle": 0, + "content": "different text prompts to evaluate their performance. Table 2 shows the results. The first row is the result of directly using the pre-trained translator to predict continuous facial parameters, and the second row is the result of fine-tuning translator to predict parameters. The fine-tuned one can achieve a higher CLIP score, which indicates the necessity of fine-tuning. The CLIP scores of only using the evolution search and the full version of T2P are shown in the third and fourth rows, respectively. The full version of T2P achieves the highest CLIP score because it can search both continuous and discrete facial parameters to create better in-game characters." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "21019" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.091, + 0.089, + 0.462, + 0.212 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.077, + 0.218, + 0.47, + 0.247 + ], + "angle": 0, + "content": "Figure 7. In-game characters created by the proposed T2P given complicated prompts." + }, + { + "type": "image", + "bbox": [ + 0.091, + 0.259, + 0.462, + 0.549 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.077, + 0.556, + 0.47, + 0.6 + ], + "angle": 0, + "content": "Figure 8. Comparison of AvatarCLIP, DreamFusion, and the proposed T2P. Each column shows the 3D characters created by these methods given the same text prompt." + }, + { + "type": "image", + "bbox": [ + 0.085, + 0.617, + 0.462, + 0.782 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.077, + 0.792, + 0.47, + 0.822 + ], + "angle": 0, + "content": "Figure 9. Curves of CLIP scores increasing within 300s under three different module settings." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.847, + 0.345, + 0.865 + ], + "angle": 0, + "content": "4.4. Facial Parameter Interpolation" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.871, + 0.47, + 0.903 + ], + "angle": 0, + "content": "Since the generated characters are controlled by parameters with explicit physical meanings, users can further ad" + }, + { + "type": "table", + "bbox": [ + 0.534, + 0.089, + 0.86, + 0.181 + ], + "angle": 0, + "content": "
TranslatorEvolution SearchCLIP Score
fixed×27.29 ± 3.10
fine-tuned×34.85 ± 3.15
×35.31 ± 2.26
fine-tuned35.72 ± 2.70
" + }, + { + "type": "table_caption", + "bbox": [ + 0.5, + 0.188, + 0.892, + 0.217 + ], + "angle": 0, + "content": "Table 2. Results of ablation studies. Four versions of the proposed method are compared." + }, + { + "type": "image", + "bbox": [ + 0.51, + 0.23, + 0.885, + 0.383 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.5, + 0.391, + 0.892, + 0.418 + ], + "angle": 0, + "content": "Figure 10. Examples of the facial parameter interpolation of game characters." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.446, + 0.893, + 0.522 + ], + "angle": 0, + "content": "just the outlook of the characters as they want. One can also interpolate different facial parameters to create a new character, as shown in Fig. 10. The first row shows the interpolation between the monkey and Thanos, in which the new facial parameters are calculated as follows," + }, + { + "type": "equation", + "bbox": [ + 0.564, + 0.533, + 0.892, + 0.55 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {x} _ {\\text {n e w}} = \\beta \\boldsymbol {x} _ {\\text {m o n k e y}} + (1 - \\beta) \\boldsymbol {x} _ {\\text {T h a n o s}}, \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.56, + 0.893, + 0.665 + ], + "angle": 0, + "content": "where \\(\\beta\\) is the interpolation coefficient decreasing from 1 to 0. The results in the second row of Fig. 10 show the interpolation between the monkey and Shrek. Besides, more than two characters can also be interpolated. We believe the benefits of the facial parameters controlling bone-driven game characters can give players a higher degree of freedom in character customization." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.68, + 0.62, + 0.695 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.705, + 0.893, + 0.901 + ], + "angle": 0, + "content": "We propose a novel method called \"text-to-parameter translation\" to create bone-driven in-game characters given text prompts. Our method achieves high-quality zero-shot creation of in-game characters and can search both continuous and discrete facial parameters in a unified framework. The proposed text-driven framework is flexible and time-saving for users, and the created bone-driven characters with physically meaningful facial parameters are convenient for users to further edit as they want. Experimental results show our method achieves high-quality and vivid zero-shot text-driven game character auto-creation and outperforms other SOTA text-to-3D generation methods in terms of objective evaluations, speed, and subjective evaluations." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "21020" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.17 + ], + "angle": 0, + "content": "[1] Volker Blanz and Thomas Vetter. A morphable model for the synthesis of 3d faces. In Proceedings of the 26th annual conference on Computer graphics and interactive techniques, pages 187-194, 1999. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.172, + 0.472, + 0.241 + ], + "angle": 0, + "content": "[2] James Booth, Epameinondas Antonakos, Stylianos Ploumpis, George Trigeorgis, Yannis Panagakis, and Stefanos Zafeiriou. 3d face morphable models\" in-the-wild\". In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 48-57, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.242, + 0.471, + 0.297 + ], + "angle": 0, + "content": "[3] Igor Borovikov, Karine Levonyan, Jon Rein, Pawel Wrotek, and Nitish Victor. Applied monocular reconstruction of parametric faces with domain engineering. arXiv preprint arXiv:2208.02935, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.298, + 0.47, + 0.34 + ], + "angle": 0, + "content": "[4] Léon Bottou. Large-scale machine learning with stochastic gradient descent. In Proceedings of COMPSTAT'2010, pages 177-186. Springer, 2010. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.341, + 0.47, + 0.382 + ], + "angle": 0, + "content": "[5] Zehranaz Canfes, M Furkan Atasoy, Alara Dirik, and Pinar Yanardag. Text and image guided 3d avatar generation and manipulation. arXiv preprint arXiv:2202.06079, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.383, + 0.47, + 0.438 + ], + "angle": 0, + "content": "[6] Chen Cao, Yanlin Weng, Shun Zhou, Yiying Tong, and Kun Zhou. Facewarehouse: A 3d facial expression database for visual computing. IEEE Transactions on Visualization and Computer Graphics, 20(3):413-425, 2013. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.439, + 0.47, + 0.495 + ], + "angle": 0, + "content": "[7] Pengfei Dou, Shishir K Shah, and Ioannis A Kakadiaris. End-to-end 3d face reconstruction with deep neural networks. In proceedings of the IEEE conference on computer vision and pattern recognition, pages 5908-5917, 2017. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.496, + 0.47, + 0.55 + ], + "angle": 0, + "content": "[8] Rinon Gal, Or Patashnik, Haggai Maron, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. Stylegan-nada: Clip-guided domain adaptation of image generators. ACM Transactions on Graphics (TOG), 41(4):1-13, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.551, + 0.47, + 0.633 + ], + "angle": 0, + "content": "[9] Thomas Gering, Andreas Morel-Forster, Clemens Blumer, Bernhard Egger, Marcel Luthi, Sandro Schonborn, and Thomas Vetter. Morphable face models—an open framework. In 2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018), pages 75–82. IEEE, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.635, + 0.47, + 0.69 + ], + "angle": 0, + "content": "[10] Fangzhou Hong, Mingyuan Zhang, Liang Pan, Zhongang Cai, Lei Yang, and Ziwei Liu. Avatarclip: Zero-shot text-driven generation and animation of 3d avatars. arXiv preprint arXiv:2205.08535, 2022. 2, 3, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.691, + 0.47, + 0.746 + ], + "angle": 0, + "content": "[11] Gao Huang, Yixuan Li, Geoff Pleiss, Zhuang Liu, John E Hopcroft, and Kilian Q Weinberger. Snapshot ensembles: Train 1, get m for free. arXiv preprint arXiv:1704.00109, 2017. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.748, + 0.47, + 0.831 + ], + "angle": 0, + "content": "[12] Patrik Huber, Guosheng Hu, Rafael Tena, Pouria Mortazavian, P Koppen, William J Christmas, Matthias Ratsch, and Josef Kittler. A multiresolution 3d morphable face model and fitting framework. In Proceedings of the 11th international joint conference on computer vision, imaging and computer graphics theory and applications, 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.832, + 0.47, + 0.902 + ], + "angle": 0, + "content": "[13] Aaron S Jackson, Adrian Bulat, Vasileios Argyriou, and Georgios Tzimiropoulos. Large pose 3d face reconstruction from a single image via direct volumetric cnn regression. In Proceedings of the IEEE international conference on computer vision, pages 1031-1039, 2017. 3" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.472, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.161 + ], + "angle": 0, + "content": "[14] Ajay Jain, Ben Mildenhall, Jonathan T Barron, Pieter Abbeel, and Ben Poole. Zero-shot text-guided object generation with dream fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 867-876, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.163, + 0.892, + 0.219 + ], + "angle": 0, + "content": "[15] Yuming Jiang, Ziqi Huang, Xingang Pan, Chen Change Loy, and Ziwei Liu. Talk-to-edit: Fine-grained facial editing via dialog. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 13799-13808, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.221, + 0.892, + 0.276 + ], + "angle": 0, + "content": "[16] Yuming Jiang, Shuai Yang, Haonan Qju, Wayne Wu, Chen Change Loy, and Ziwei Liu. Text2human: Text-driven controllable human image generation. ACM Transactions on Graphics (TOG), 41(4):1-11, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.277, + 0.892, + 0.332 + ], + "angle": 0, + "content": "[17] Nasir Khalid, Tianhao Xie, Eugene Belilovsky, and Tiberiu Popa. Clip-mesh: Generating textured meshes from text using pretrained image-text models. ACM Transactions on Graphics (TOG), Proc. SIGGRAPH Asia, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.334, + 0.892, + 0.374 + ], + "angle": 0, + "content": "[18] Gwanghyun Kim and Jong Chul Ye. Diffusionclip: Text-guided image manipulation using diffusion models. 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.375, + 0.892, + 0.419 + ], + "angle": 0, + "content": "[19] Tianye Li, Timo Bolkart, Michael J Black, Hao Li, and Javier Romero. Learning a model of facial shape and expression from 4d scans. ACM Trans. Graph., 36(6):194-1, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.42, + 0.892, + 0.476 + ], + "angle": 0, + "content": "[20] Jiangke Lin, Yi Yuan, and Zhengxia Zou. Meingame: Create a game character face from a single portrait. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 35, pages 311-319, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.477, + 0.892, + 0.532 + ], + "angle": 0, + "content": "[21] Yuqiao Liu, Yanan Sun, Bing Xue, Mengjie Zhang, Gary G Yen, and Kay Chen Tan. A survey on evolutionary neural architecture search. IEEE transactions on neural networks and learning systems, 2021. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.533, + 0.892, + 0.574 + ], + "angle": 0, + "content": "[22] Ilya Loshchilov and Frank Hutter. Sgdr: Stochastic gradient descent with warm restarts. arXiv preprint arXiv:1608.03983, 2016. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.576, + 0.892, + 0.645 + ], + "angle": 0, + "content": "[23] Oscar Michel, Roi Bar-On, Richard Liu, Sagie Benaim, and Rana Hanocka. Text2mesh: Text-driven neural stylization for meshes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13492-13502, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.647, + 0.892, + 0.715 + ], + "angle": 0, + "content": "[24] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM, 65(1):99-106, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.717, + 0.892, + 0.786 + ], + "angle": 0, + "content": "[25] Or Patashnik, Zongze Wu, Eli Shechtman, Daniel Cohen-Or, and Dani Lischinski. Styleclip: Text-driven manipulation of stylegan imagery. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2085–2094, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.788, + 0.892, + 0.857 + ], + "angle": 0, + "content": "[26] Weilong Peng, Zhiyong Feng, Chao Xu, and Yong Su. Parametric t-spline face morphable model for detailed fitting in shape subspace. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6139-6147, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.858, + 0.892, + 0.902 + ], + "angle": 0, + "content": "[27] Ben Poole, Ajay Jain, Jonathan T Barron, and Ben Mildenhall. Dreamfusion: Text-to-3d using 2d diffusion. arXiv preprint arXiv:2209.14988, 2022. 3, 6, 7" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "21021" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.176 + ], + "angle": 0, + "content": "[28] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International Conference on Machine Learning, pages 8748-8763. PMLR, 2021. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.178, + 0.472, + 0.233 + ], + "angle": 0, + "content": "[29] Alec Radford, Luke Metz, and Soumith Chintala. Unsupervised representation learning with deep convolutional generative adversarial networks. arXiv preprint arXiv:1511.06434, 2015. 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.234, + 0.47, + 0.289 + ], + "angle": 0, + "content": "[30] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.291, + 0.47, + 0.359 + ], + "angle": 0, + "content": "[31] Aditya Ramesh, Mikhail Pavlov, Gabriel Goh, Scott Gray, Chelsea Voss, Alec Radford, Mark Chen, and Ilya Sutskever. Zero-shot text-to-image generation. In International Conference on Machine Learning, pages 8821-8831. PMLR, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.362, + 0.47, + 0.417 + ], + "angle": 0, + "content": "[32] Scott Reed, Zeynep Akata, Xinchen Yan, Lajanugen Logeswaran, Bernt Schiele, and Honglak Lee. Generative adversarial text to image synthesis. In International conference on machine learning, pages 1060-1069. PMLR, 2016. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.419, + 0.469, + 0.475 + ], + "angle": 0, + "content": "[33] Elad Richardson, Matan Sela, Roy Or-El, and Ron Kimmel. Learning detailed face reconstruction from a single image. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1259–1268, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.476, + 0.47, + 0.546 + ], + "angle": 0, + "content": "[34] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10684-10695, 2022. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.548, + 0.469, + 0.602 + ], + "angle": 0, + "content": "[35] Tim Salimans, Ian Goodfellow, Wojciech Zaremba, Vicki Cheung, Alec Radford, and Xi Chen. Improved techniques for training gans. Advances in neural information processing systems, 29, 2016. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.604, + 0.469, + 0.657 + ], + "angle": 0, + "content": "[36] Tim Salimans, Jonathan Ho, Xi Chen, Szymon Sidor, and Ilya Sutskever. Evolution strategies as a scalable alternative to reinforcement learning. arXiv preprint arXiv:1703.03864, 2017. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.66, + 0.469, + 0.743 + ], + "angle": 0, + "content": "[37] Aditya Sanghi, Hang Chu, Joseph G Lambourne, Ye Wang, Chin-Yi Cheng, Marco Fumero, and Kamal Rahimi Malekshan. Clip-forge: Towards zero-shot text-to-shape generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18603-18613, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.745, + 0.469, + 0.815 + ], + "angle": 0, + "content": "[38] Tianyang Shi, Yi Yuan, Changjie Fan, Zhengxia Zou, Zhenwei Shi, and Yong Liu. Face-to-parameter translation for game character auto-creation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 161–170, 2019. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.817, + 0.469, + 0.87 + ], + "angle": 0, + "content": "[39] Tianyang Shi, Zhengxia Zou, Zhenwei Shi, and Yi Yuan. Neural rendering for game character auto-creation. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.873, + 0.469, + 0.902 + ], + "angle": 0, + "content": "[40] Tianyang Shi, Zhengxia Zou, Xinhui Song, Zheng Song, Changjian Gu, Changjie Fan, and Yi Yuan. Neutral face" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.472, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.535, + 0.093, + 0.892, + 0.134 + ], + "angle": 0, + "content": "game character auto-creation via pokerface-gan. In Proceedings of the 28th ACM International Conference on Multimedia, pages 3201–3209, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.135, + 0.892, + 0.203 + ], + "angle": 0, + "content": "[41] Tianyang Shi, Zhengxia Zuo, Yi Yuan, and Changjie Fan. Fast and robust face-to-parameter translation for game character auto-creation. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 34, pages 1733–1740, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.205, + 0.892, + 0.288 + ], + "angle": 0, + "content": "[42] Ayush Tewari, Michael Zollhofer, Hyeongwoo Kim, Pablo Garrido, Florian Bernard, Patrick Perez, and Christian Theobalt. Mofa: Model-based deep convolutional face autoencoder for unsupervised monocular reconstruction. In Proceedings of the IEEE International Conference on Computer Vision Workshops, pages 1274-1283, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.289, + 0.892, + 0.357 + ], + "angle": 0, + "content": "[43] Anh Tuan Tran, Tal Hassner, Iacopo Masi, Eran Paz, Yuval Nirkin, and Gerard Medioni. Extreme 3d face reconstruction: Seeing through occlusions. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3935-3944, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.358, + 0.892, + 0.427 + ], + "angle": 0, + "content": "[44] Anh Tuan Tran, Tal Hassner, Iacopo Masi, and Gerard Medioni. Regressing robust and discriminative 3d morphable models with a very deep neural network. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5163-5172, 2017. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.428, + 0.892, + 0.482 + ], + "angle": 0, + "content": "[45] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 30, 2017. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.484, + 0.892, + 0.552 + ], + "angle": 0, + "content": "[46] Can Wang, Menglei Chai, Mingming He, Dongdong Chen, and Jing Liao. Clip-nerf: Text-and-image driven manipulation of neural radiance fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3835-3844, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.554, + 0.892, + 0.608 + ], + "angle": 0, + "content": "[47] Peng Wang, Lingjie Liu, Yuan Liu, Christian Theobalt, Taku Komura, and Wenping Wang. Neus: Learning neural implicit surfaces by volume rendering for multi-view reconstruction. arXiv preprint arXiv:2106.10689, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.609, + 0.892, + 0.678 + ], + "angle": 0, + "content": "[48] Tianyi Wei, Dongdong Chen, Wenbo Zhou, Jing Liao, Zhentao Tan, Lu Yuan, Weiming Zhang, and Nenghai Yu. Hairclip: Design your hair by text and reference image. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18072-18081, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.679, + 0.892, + 0.733 + ], + "angle": 0, + "content": "[49] Lior Wolf, Yaniv Taigman, and Adam Polyak. Unsupervised creation of parameterized avatars. In Proceedings of the IEEE International Conference on Computer Vision, pages 1530-1538, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.735, + 0.892, + 0.802 + ], + "angle": 0, + "content": "[50] Weihao Xia, Yujiu Yang, Jing-Hao Xue, and Baoyuan Wu. Tedigan: Text-guided diverse face image generation and manipulation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2256-2265, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.804, + 0.892, + 0.9 + ], + "angle": 0, + "content": "[51] Zipeng Xu, Tianwei Lin, Hao Tang, Fu Li, Dongliang He, Nicu Sebe, Radu Timofte, Luc Van Gool, and Errui Ding. Predict, prevent, and evaluate: Disentangled text-driven image manipulation empowered by pre-trained vision-language model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18229-18238, 2022. 3" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "21022" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.161 + ], + "angle": 0, + "content": "[52] Yingchen Yu, Fangneng Zhan, Rongliang Wu, Jiahui Zhang, Shijian Lu, Miaomiao Cui, Xuansong Xie, Xian-Sheng Hua, and Chunyan Miao. Towards counterfactual image manipulation via clip. In Proceedings of the 30th ACM International Conference on Multimedia, pages 3637-3645, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.163, + 0.469, + 0.217 + ], + "angle": 0, + "content": "[53] Kaiyang Zhou, Jingkang Yang, Chen Change Loy, and Ziwei Liu. Learning to prompt for vision-language models. International Journal of Computer Vision, 130(9):2337-2348, 2022. 4" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.217 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.946, + 0.518, + 0.956 + ], + "angle": 0, + "content": "21023" + } + ] +] \ No newline at end of file diff --git a/2023/Zero-Shot Text-to-Parameter Translation for Game Character Auto-Creation/120ba89b-8ae1-4464-85a9-dac434567f6d_origin.pdf b/2023/Zero-Shot Text-to-Parameter Translation for Game Character Auto-Creation/120ba89b-8ae1-4464-85a9-dac434567f6d_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..c12b928ed6183c21d11310c9b4c457b565600ced --- /dev/null +++ b/2023/Zero-Shot Text-to-Parameter Translation for Game Character Auto-Creation/120ba89b-8ae1-4464-85a9-dac434567f6d_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:602cb73323b459130dffc0fb1aa71ad2f832461c7b9c4de5a8abd32ff33f4f34 +size 7595575 diff --git a/2023/Zero-Shot Text-to-Parameter Translation for Game Character Auto-Creation/full.md b/2023/Zero-Shot Text-to-Parameter Translation for Game Character Auto-Creation/full.md new file mode 100644 index 0000000000000000000000000000000000000000..79b9fc7d42d7846eb24f6d0743cd3683cca947e6 --- /dev/null +++ b/2023/Zero-Shot Text-to-Parameter Translation for Game Character Auto-Creation/full.md @@ -0,0 +1,283 @@ +# Zero-Shot Text-to-Parameter Translation for Game Character Auto-Creation + +Rui Zhao $^{1}$ , Wei Li $^{2}$ , Zhipeng Hu $^{1}$ , Lincheng Li $^{1*}$ , Zhengxia Zou $^{3*}$ , Zhenwei Shi $^{3}$ , Changjie Fan $^{1}$ , $^{1}$ Netease Fuxi AI Lab, $^{2}$ Nankai University, $^{3}$ Beihang University + +{zhaorui10, zphu, lilincheng, fanchangjie}@corp.netease.com, + +liwei@dbis.nankai.edu.cn, {zhengxiazou, zhenweishi}@buaa.edu.cn + +![](images/db582dc6bc97988279dcf18cad7287276d66ea2a9cad30bf6860b11a081bd6d4.jpg) +Figure 1. Game characters created by the proposed text-to-parameter translation (T2P) given different text prompts. The front view and three side views are shown for each character. + +# Abstract + +Recent popular Role-Playing Games (RPGs) saw the great success of character auto-creation systems. The bone-driven face model controlled by continuous parameters (like the position of bones) and discrete parameters (like the hairstyles) makes it possible for users to personalize and customize in-game characters. Previous in-game character auto-creation systems are mostly image-driven, where facial parameters are optimized so that the rendered character looks similar to the reference face photo. This paper proposes a novel text-to-parameter translation method (T2P) to achieve zero-shot text-driven game character auto-creation. With our method, users can create a vivid in-game character with arbitrary text description without using any reference photo or editing hundreds of parameters manually. In our method, taking the power of large-scale pre-trained multi-modal CLIP and neural rendering, T2P searches both continuous facial parameters and discrete facial parame + +ters in a unified framework. Due to the discontinuous parameter representation, previous methods have difficulty in effectively learning discrete facial parameters. T2P, to our best knowledge, is the first method that can handle the optimization of both discrete and continuous parameters. Experimental results show that T2P can generate high-quality and vivid game characters with given text prompts. T2P outperforms other SOTA text-to-3D generation methods on both objective evaluations and subjective evaluations. + +# 1. Introduction + +Role-Playing Games (RPGs) are praised by gamers for providing immersive experiences. Some of the recent popular RPGs, like Grand Theft Auto Online1 and Naraka2, have opened up character customization systems to players. In such systems, in-game characters are bone-driven and controlled by continuous parameters, like the position, + +rotation, scale of each bone, and discrete parameters, like the hairstyle, beard styles, make-ups, and other facial elements. By manually adjusting these parameters, players can control the appearance of the characters in the game according to their personal preferences, rather than using predefined character templates. However, it is cumbersome and time-consuming for users to manually adjust hundreds of parameters - usually taking up to hours to create a character that matches their expectations. + +To automatically create in-game characters, the method named Face-to-parameter translation (F2P) was recently proposed to automatically create game characters based on a single input face image [38]. F2P and its variants [39, 41] have been successfully used in recent RPGs like Narake and Justice, and virtual meeting platform Yaotai. Recent 3D face reconstruction methods [2, 7, 26, 33, 42-44] can also be adapted to create game characters. However, all the above-mentioned methods require reference face photos for auto-creation. Users may take time to search, download and upload suitable photos for their expected game characters. Compared with images, text prompts are more flexible and time-saving for game character auto-creation. A very recent work AvatarCLIP [10] achieved text-driven avatar auto-creation and animation. It optimizes implicit neural networks to generate characters. However, the created characters are controlled by implicit parameters, which lack explicit physical meanings, thus manually adjusting them needs extra designs. This will be inconvenient for players or game developers to further fine-tune the created game characters as they want. + +To address the above problems, we propose text-to-parameter translation (T2P) to tackle the in-game character auto-creation task based on arbitrary text prompts. T2P takes the power of large-scale pre-trained CLIP to achieve zero-shot text-driven character creation and utilizes neural rendering to make the rendering of in-game characters differentiable to accelerate the parameters optimization. Previous works like F2Ps give up controlling discrete facial parameters due to the problem of discontinuous parameter gradients. To our best knowledge, the proposed T2P is the first method that can handle both continuous and discrete facial parameters optimization in a unified framework to create vivid in-game characters. F2P is also the first text-driven automatic character creation suitable for game environments. + +Our method consists of a pre-training stage and a text-to-parameter translation stage. In the pre-training stage, we first train an imitator to imitate the rendering behavior of the game engine to make the parameter searching pipeline end-to-end differentiable. We also pre-train a translator to translate the CLIP image embeddings of random game characters to their facial parameters. Then at the text-to-parameter translation stage, on one hand, we fine-tune the translator + +on un-seen CLIP text embeddings to predict continuous parameters given text prompt rather than images, on the other hand, discrete parameters are evolutionally searched. Finally, the game engine takes in the facial parameters and creates the in-game characters which correspond to the text prompt described, as shown in Fig 1. Objective evaluations and subjective evaluations both indicate our method outperforms other SOTA zero-shot text-to-3D methods. + +# Our contributions are summarized as follows: + +1) We propose a novel text-to-parameter translation method for zero-shot in-game character auto-creation. To the best of our knowledge, we are the first to study text-driven character creation ready for game environments. +2) The proposed T2P can optimize both continuous and discrete parameters in a unified framework, unlike earlier methods giving up controlling difficult-to-learn discrete parameters. +3) The proposed text-driven auto-creation paradigm is flexible and friendly for users, and the predicted physically meaningful facial parameters enable players or game developers to further finetune the game character as they want. + +# 2. Related Work + +# 2.1. Parametric Character Auto-Creation + +Character auto-creation has been an emerging research topic because of its significance in role-playing games, augmented reality, and metaverses. Some methods on this topic are recently proposed. Tied Output Synthesis (TOS) learns to predict a set of binary facial parameters to control the graphical engine to generate a character that looks like the human in input photo [49]. Face-to-Parameter translation (F2P) is proposed to optimize a set of continuous facial parameters to minimize the distance between the generated game character's face and the input photo [38]. In F2P's following works [39, 41], the framework is improved to achieve fast and robust character creation. The PockerFace-Gan is proposed to decouple the expression features and identity features in order to generate expression-less game characters [40]. Borovikov et al. applies domain engineering and predict the facial parameters in a global-local way, considering the face as a hierarchical ensemble of general facial structure and local facial regions [3]. These methods all need reference photos to create characters, while we aim at creating characters based on text input. + +# 2.2.3D Face Reconstruction + +3D face reconstruction also aims to generate a 3D face given single or multi-view 2D facial images. 3D morphable model (3DMM) [1] and its variants [2,6,9,12,19] are representative methods in the literature. They first parameterize a 3D face mesh data and then optimize it to match the facial identity, expression, and texture of given reference im + +![](images/1ee822a8851306695398626c8ce3763d3ac4c4340965dcaa2c95796d87cbee04.jpg) +Figure 2. An overview of the proposed T2P. $E_{I}$ and $E_{T}$ denote the CLIP image encoder and text encoder, respectively. An imitator is trained to mimic the game engine and achieve differentiable rendering. A translator is pre-trained to translate the CLIP image embeddings to continuous facial parameters. When creating game characters given text prompts, T2P searches continuous facial parameters by fine-tuning the translator and searches discrete facial parameters by the evolution search. Finally, the facial parameters are fed into the game engine to render the in-game characters. + +ages. Taking advantage of deep Convolutional Neural Networks (CNNs), high-level image representations are used to improve the predicting of the morphable model coefficients [7, 13, 44]. The recently proposed MeInGame firstly reconstructs the face as a 3DMM model and then transfers the face to game mesh keeping their topology [20]. It also predicts texture map and lighting coefficients from input images to improve the outlook of the game mesh. + +# 2.3. Zero-Shot Text-Driven Generation + +Zero-shot content generation is recently made possible by the powerful multimodel representation and generalization capabilities of CLIP [32]. Combining the CLIP with variational autoencoder or diffusion model, DALL-E [31], DALL-E 2 [31] and Imagen [30] achieved high-quality zero-shot text-to-image synthesis, and sparked widespread discussion. Text-driven image translation and manipulation, and human image generation are also explored [8, 15, 16, 18, 25, 48, 50-52]. Taking advantage of CLIP, zero-shot text-driven 3D object generation and manipulation methods made rapid advances [5, 14, 17, 23, 37, 46]. The most recently proposed Dreamfusion uses Imagen to supervise the Neural Radiance Fields network (NeRF) [24] to generate 3D object [27]. The most related work to ours named AvatarCLIP was recently proposed to achieve zero-shot text-driven 3D avatar generation and animation [10]. Given a text prompt, AvatarCLIP first generates a coarse shape by code-book-based retrieval, guided by CLIP. Then the coarse + +shape is used to initialize a NeuS network [47] to generate the implicit representation. Finally, the implicit 3D avatar is optimized to sculpt fine geometry and generate texture. This method treats the 3D human generation as a NeuS optimization process. However, the implicit representation makes it difficult to implement in games and unfriendly to user interaction. As a comparison, our created bone-driven game characters are controlled by explicit parameters with physical meanings. This enables players and game developers to further edit the created characters according to their needs. + +# 3. Method + +Fig. 2 shows an overview of the proposed T2P. We first train an imitator to simulate the game engine and pretrain a translator to translate the CLIP image embeddings to continuous facial parameters. Then, to achieve text-to-parameter translation, given the text prompts, we fine-tune the translator to predict continuous parameters and combine the evolution search to optimize discrete parameters. + +# 3.1. Imitator + +We train a neural imitator to mimic the behavior of the game engine in order to differentiate the rendering of in-game characters. It takes in continuous facial parameters $\pmb{x}$ and renders the front view of the game character $\pmb{y}$ . Different from the F2P [38] taking a similar generator network architecture of DC-GAN [29], we add a positional encoder at the input-end of the renderer to improve the facial param- + +![](images/9ff6fbf9c3690ff6d7095235fccf21ef72ee04810507a995b9e68ba29091debe.jpg) +Figure 3. The architecture of our translator. The translator contains a set of transformer encoder layers, several learnable tokens, a fine-tuning head, and a prediction head. The translator is firstly pre-trained on CLIP image embeddings and then fine-tuned on CLIP text embeddings to predict continuous facial parameters. When fine-tuning the translator, only the parameters of the fine-tuning head are updated. + +eters parsing on complex textures and geometry. We treat the imitator training as a regression problem to minimize the pixel-wise distance between the images rendered by the game engine and the imitator. To avoid the blurry rendered pixels, we use L1 loss as the loss function to train the imitator: + +$$ +\begin{array}{l} \mathcal {L} _ {G} (\boldsymbol {x}) = E _ {\boldsymbol {x} \sim u (\boldsymbol {x})} \left\{\left| \left| \boldsymbol {y} - \hat {\boldsymbol {y}} \right| \right| _ {1} \right\} \tag {1} \\ = E _ {\boldsymbol {x} \sim u (\boldsymbol {x})} \left\{\left| \left| G (\boldsymbol {x}) - \operatorname {E n g i n e} (\boldsymbol {x}) \right| \right| _ {1} \right\}, \\ \end{array} +$$ + +where $G(\pmb{x})$ and $\text{Engine}(\pmb{x})$ represent the image rendered by the imitator and game engine, respectively. + +To prepare the training data, we randomly sample 170K continuous facial parameters $\pmb{x}$ from a multidimensional uniform distribution $u(\pmb{x})$ . We feed these parameters into the game engine to render out the facial images. Then these facial parameters and image pairs are split into 80% and 20% for training and validation. + +# 3.2. Continuous Parameters Searching + +We aim to train a translator to predict continuous facial parameters based on CLIP text embeddings. To reduce the learning difficulty, we first pre-train the translator on CLIP image embeddings and then fine-tune it on text CLIP embeddings. The main reason is that text-parameter pairs are expensive to collect, while image-parameter pairs can be infinitely generated with the game engine. + +We take the randomly sampled facial parameters and rendered image pairs mentioned in section 3.1 as training data. The rendered images are fed into the CLIP image encoder to collect image embeddings. Then we build a translator $F$ based on a transformer encoder, and train it to map the image embeddings $e_{I}$ into facial parameters $x$ , as shown in Fig. 3. The object function is defined as the L1 reconstruction loss between the true facial parameters and the predicted ones: + +$$ +\mathcal {L} _ {F} \left(\boldsymbol {e} _ {I}, \boldsymbol {x}\right) = E _ {e _ {I} \sim u \left(\boldsymbol {e} _ {I}\right)} \left\{\left| \left| F \left(\boldsymbol {e} _ {I}\right) - \hat {\boldsymbol {x}} \right| \right| _ {1} \right\}. \tag {2} +$$ + +When T2P creates game characters given text prompts, there is no image embeddings available. Though the CLIP is trained to pull the text and image pairs close to each other in the embedding space, there are still gaps between the two modalities. We, therefore, fine-tune the translator to fit the input text embeddings. Inspired by the recent prompt tuning study [53], we fix the parameters of the transformer and fine-tune a tiny tuner head. The translator is trained to map the text embeddings $e_{T}$ to facial parameters $x$ . Then the facial parameters are fed into the imitator to render the image of the game character. The fine-tuning object function is to minimize the cosine distance between the given text embeddings $e_{T}$ and the image embeddings of the rendered image: + +$$ +\begin{array}{l} \mathcal {L} _ {C L I P} \left(\boldsymbol {e} _ {T}, \boldsymbol {x}\right) = 1 - \cos \left(\boldsymbol {e} _ {T}, E _ {I} (G (\boldsymbol {x}))\right) \tag {3} \\ = 1 - \cos \left(\boldsymbol {e} _ {T}, E _ {I} \left(G \left(F \left(\boldsymbol {e} _ {T}\right)\right)\right), \right. \\ \end{array} +$$ + +where $E_{I}$ is the CLIP image encoder. The parameters of the fine-tuned head $w$ are iteratively updated as follows, + +$$ +w \leftarrow w - \eta_ {t} \frac {\partial \mathcal {L} _ {C L I P}}{\partial w}, \tag {4} +$$ + +where $\eta_{t}$ is the learning rate at $t$ th iteration. We follow the snapshot ensembles [11] and set the learning rate using the cosine annealing schedule with warm restarts (SGDR) [22] to encourage the translator to converge to and escape from local minima: + +$$ +\eta_ {t} = \eta_ {\min } + \frac {1}{2} \left(\eta_ {\max } - \eta_ {\min }\right) \left(1 + \cos \left(\frac {N _ {t}}{N} \pi\right)\right), \tag {5} +$$ + +where $\eta_{min}$ , $\eta_{max}$ , and $\eta_t$ denote the minimum, maximum, and current learning rate, respectively. $N$ denotes the number of iterations between two warm restarts, and $N_t$ denotes the number of iterations since the last restart. Each time the $N_t$ equals $N$ , the current iteration is called a snapshot point, and we save the predicted facial parameters at this point. These facial parameters are then used to initialize the first population of the evolution search. + +# 3.3. Discrete Parameters Searching + +In the bone-driven face model, besides continuous facial parameters controlling its bones, discrete facial elements (like the hairstyle, beard styles, and make-up) are also important. However, these elements are difficult for the imitator to learn, because they are discrete and highly changeable. Unlike previous methods that ignore discrete parameters during optimization, we propose to evolutionally search them by directly interacting with the game engine. Evolutionary algorithms have been widely used in reinforcement learning and neural architecture search [21, 36], where the objective function can be optimized without using any gradient information. + +![](images/c8eec65ad6ffdac643645c7ac82ebc49997794290b8d614c918f835ac3c6a09b.jpg) +Figure 4. Game characters created by the proposed T2P given the text prompt "monkey". The first five game characters are created by the translator at different fine-tuning iterations. The last one is created by the evolution search, adding a discrete facial element, a beard. + +Here we perform a text-driven evolution search to find the optimum discrete facial parameters. The initial generation contains random initialized discrete parameters as well as the continuous facial parameters predicted by the translator. To impose supervision on 3D views, we render out two images for each game character, one for front view $y_{front}$ and one for side view $y_{side}$ . The facial parameters are scored by the CLIP model as follows, + +$$ +\begin{array}{l} S _ {C L I P} = \alpha \cos \left(E _ {T} (T), E _ {I} \left(\boldsymbol {y} _ {\text {f r o n t}}\right)\right) \tag {6} \\ + (1 - \alpha) \cos \left(E _ {T} \left(T ^ {\prime}\right), E _ {I} (\boldsymbol {y} _ {\text {s i d e}})\right), \\ \end{array} +$$ + +where $\alpha$ is the weight coefficient, $T$ is the given text prompt, $T'$ is the automatically adjusted text prompt for the side view, $E_{T}$ is the CLIP text encoder and $E_{I}$ is the CLIP image encoder. Then $k$ random pairs of facial parameters are selected as parents to produce the next generation through crossover and mutation. For the crossover step, child $\pmb{x}^c$ is generated by randomly choosing a value from parents $\pmb{x}^f$ and $\pmb{x}^m$ at each position $i$ , + +$$ +P \left(x _ {i} ^ {c} = x _ {i} ^ {f}\right) + P \left(x _ {i} ^ {c} = x _ {i} ^ {m}\right) = 1. \tag {7} +$$ + +For the mutation step, each child parameter $\pmb{x}^c$ is added randomly noise at multiple randomly selected position $i$ , + +$$ +x _ {i} ^ {c \prime} = x _ {i} ^ {c} + n o i s e. \tag {8} +$$ + +The newly generated children's parameters together with the better ones of the parents' parameters are selected as the next generation and get involved in the looping selection, crossover, and mutation. The evolution process terminates until the CLIP score is converged. + +# 3.4. Implementation Details + +Network architecture. Our imitator consists of a positional encoder with four fully-connected layers and a generator with six transposed convolution layers. The generator is similar to DCGAN's generator [29], except that its Tanh activation of the output layer is removed to encourage a better convergence. The translator consists of eight Transformer encoder layers [45], each of them having eight + +multi-attention heads, and sixteen input tokens. The first token is the CLIP embeddings and the other tokens are learnable. We concatenate a prediction head with one single fully-connected layer after the Transformer. The fine-tuning head of the translator is a three layers perceptron with a bottleneck architecture. + +Training details. The imitator and translator are both trained using SGD optimizer [4]. We set the momentum to 0.9 and set the weight decay to 5e-4. For imitator pretraining, the learning rate is set to 1e-3 and is reduced to $0.98\mathrm{x}$ per 30 epochs, and the training is stopped after 500 epochs. For translator pre-training, the learning rate is set to 1e-4 and is reduced to $0.1\mathrm{x}$ at the 600th epoch and the training is stopped at the 1000th epoch. We randomly sample 170K facial parameters and corresponding rendered images of in-game characters pairs to train the imitator and translator. For translator fine-tuning, the minimum and maximum learning rates are set to $\eta_{min} = 0$ and $\eta_{max} = 1$ , respectively, and the number of iterations between two warm starts $N$ is set to 10 for the SGDR learning rate scheduler. Fine-tuning is stopped when the CLIP scores are no longer improved by more than 100 iterations. + +Evolution search. The facial parameters predicted by the translator at the last 5 snapshot points are selected as initial values. Each set of facial parameters contains 269 continuous parameters and 62 discrete parameters, and the initialized values of these discrete parameters are set to zeros, which means these facial elements do not appear at the beginning. These 5 sets of facial parameters together with 5 more random ones are the first population for the evolution search. We found that updating continuous parameters together with discrete parameters in the evolution search achieves better results. The number of selected pairs of parents is set to 10. The weight coefficient $\alpha$ is set to 0.8. The crossover rate is set to 0.4 and the mutation rate is set to 0.05. + +Prompt engineering. To enhance the text prompts, we follow the CLIP [28] and adapt prompt ensembling to the given text prompts. We preset 12 template sentences, such as “{} head rendered in a game engine”, and then fill the + +![](images/564530c9162f8746800af5bfff816310c8e5d314bee28305f2a7a0a5c89fedfc.jpg) +Figure 5. In-game fictional characters created by the proposed T2P given different text prompts. The results in the first row are created by the translator. The results in the second row are created by the evolution search. + +“{}” with the input text prompt. We calculate the CLIP text embeddings of the filled sentences and take their mean value as the input text embeddings for the translator and evolution search. For evolution search, we further add “side view of” to the template sentences when calculating the CLIP score of the rendered images of the side view. + +# 4. Experimental Results and Analysis + +# 4.1. Game Character Auto-Creation + +Fig. 4 shows the game characters created by T2P given the text prompt "monkey". The first five images show the in-game characters created by the translator at different fin-tuning iterations. The in-game character gradually grows from a normal human face to look like a monkey. The evolution search further searches discrete facial elements and also slightly improves continuous parameters. The last image of Fig. 4 shows the evolution search adds a beard to the character to make it more vivid. In this process, the proposed T2P is enabled to search both continuous and discrete facial parameters to optimize the in-game character to be consistent with the given text prompt and vivid. Fig. 5 shows more results of fictional character creation. Results in the first row are controlled by continuous parameters, and results in the second row are added discrete facial elements. + +T2P can create characters with animal heads, as shown in Fig. 4, fictional characters, as shown in Fig. 5, and celebrities, as shown in Fig. 6, and characters conditioned on compactied text prompts, as shown in Fig. 7. These results show the powerful zero-shot game character auto-creation ability of the proposed T2P. By inputting only a text prompt, T2P can generate a vivid character, which is more flexible and time-saving for players or game developers compared to manual customization. + +# 4.2. Comparison with Other Methods + +We compare the proposed method with AvatarCLIP [10] and DreamFusion [27]. The comparison includes objective evaluations and subjective evaluations. Since DreamFusion is not open source yet, we use the community implementation version of it, named Stable-Dreamfusion1. This version uses the open-source stable diffusion model [34] to drive the 3D object generation. We only compare the heads generated by these methods. This may introduce unfairness, thus we will never claim superiority besides the head part. + +We feed 24 different text prompts into these two methods and our proposed T2P to generate characters respectively. Three examples are shown in Fig. 8. For objective evaluations, we compare the Inception Score [35], CLIP Ranking-1, and their speed (run on NVIDIA A30), as shown in Table 1. For each method, CLIP Ranking-1 calculates the ratio of its created characters ranked by CLIP as top-1 among the characters created by all three methods. The evaluation scores show the proposed T2P outperforms the other two methods and runs at a much faster speed. + +For subjective evaluations, we invite 20 volunteers to evaluate the generation results in terms of realistic degree and consistency with the given text. They are asked to focus on the heads and faces of the characters and score them from 1 to 5, where 1 is the worst and 5 is the best. The evaluation results are shown in Table 1. Evaluation results show our method consistently outperforms the other two methods. We also notice that AvatarCLIP performs good at celebrities generation, Dreamfusion is good at fictional characters generation, while our method performs better at both types, just as shown in Fig. 8. + +# 4.3. Ablation Studies + +We conduct ablation studies to analyze the importance of the proposed translator and evolution search. We run our framework with three settings, including 1) only evolution search 2) only translator and 3) both translator and evolution search. The details of these settings are as follows. + +1) Evolution Search. The translator is removed from the framework and the evolution search is used to directly search both continuous and discrete facial parameters given text prompts. +2) Translator. The evolution search is abandoned, and the translator is fine-tuned to translate the given text prompts into continuous facial parameters and gives up controlling discrete parameters. +3) Full Implementation. Given text prompts, the translator is fine-tuned to predict continuous facial parameters. Then, the evolution search further searches discrete parameters and also improves the continuous ones. +Fig. 9 shows the CLIP scores increasing curves with the + +![](images/a8c0e18028430106ba80174d06514eb1b674748c6e98b8cc07b78c135af40bf5.jpg) +Figure 6. In-game celebrities created by the proposed T2P. This figure shows the front view and the side view for each character. + +
MethodObjective EvaluationsSubjective Evaluations
Inception Score ↑CLIP Ranking-1 ↑Time ↓Reality ↑Consistency with Text ↑
DreamFusion [27]1.60 ± 0.1216.67%254.50min1.85 ± 1.022.23 ± 1.39
AvatarCLIP [10]1.37 ± 0.3116.67%177.79min1.97 ± 0.532.14 ± 0.66
T2P (ours)1.65 ± 0.2166.66%359.47s3.87 ± 0.473.34 ± 0.53
+ +Table 1. Comparison results of DreamFusion, AvatarCLIP, and the proposed T2P in terms of objective and subjective evaluations. + +T2P running in 300 seconds. The means and standard deviations are calculated based on 100 times repeat running driven by one text prompt. As shown in the figure, the full implementation of our method always outperforms the other two. The translator is optimized rapidly to find optimal continuous parameters but can not further improve the CLIP scores because of lacking discrete facial elements. Compared with the translator, the evolution search is quite slow but can reach a higher CLIP score. The full implementation of T2P takes advantage of both translator and evolution search and achieves fast and better optimization. + +We further test different settings of proposed T2P on 100 + +different text prompts to evaluate their performance. Table 2 shows the results. The first row is the result of directly using the pre-trained translator to predict continuous facial parameters, and the second row is the result of fine-tuning translator to predict parameters. The fine-tuned one can achieve a higher CLIP score, which indicates the necessity of fine-tuning. The CLIP scores of only using the evolution search and the full version of T2P are shown in the third and fourth rows, respectively. The full version of T2P achieves the highest CLIP score because it can search both continuous and discrete facial parameters to create better in-game characters. + +![](images/dec700e641a5a823ea484affc9a91737b1535d790683cc60be23c53e57b431f8.jpg) +Figure 7. In-game characters created by the proposed T2P given complicated prompts. + +![](images/28c5b6378145d59a4612625316da84833e3b1c7d6f7aeeff04d4c8ecb709b524.jpg) +Figure 8. Comparison of AvatarCLIP, DreamFusion, and the proposed T2P. Each column shows the 3D characters created by these methods given the same text prompt. + +![](images/881609561241c57c2c1c30fd174d2c335053c84df12dff839c84ce682b8e5e59.jpg) +Figure 9. Curves of CLIP scores increasing within 300s under three different module settings. + +# 4.4. Facial Parameter Interpolation + +Since the generated characters are controlled by parameters with explicit physical meanings, users can further ad + +
TranslatorEvolution SearchCLIP Score
fixed×27.29 ± 3.10
fine-tuned×34.85 ± 3.15
×35.31 ± 2.26
fine-tuned35.72 ± 2.70
+ +Table 2. Results of ablation studies. Four versions of the proposed method are compared. + +![](images/974c37c5e18a83288778f5010de30eea88e2d41f91d49c30caaa5dd7948ee776.jpg) +Figure 10. Examples of the facial parameter interpolation of game characters. + +just the outlook of the characters as they want. One can also interpolate different facial parameters to create a new character, as shown in Fig. 10. The first row shows the interpolation between the monkey and Thanos, in which the new facial parameters are calculated as follows, + +$$ +\boldsymbol {x} _ {\text {n e w}} = \beta \boldsymbol {x} _ {\text {m o n k e y}} + (1 - \beta) \boldsymbol {x} _ {\text {T h a n o s}}, \tag {9} +$$ + +where $\beta$ is the interpolation coefficient decreasing from 1 to 0. The results in the second row of Fig. 10 show the interpolation between the monkey and Shrek. Besides, more than two characters can also be interpolated. We believe the benefits of the facial parameters controlling bone-driven game characters can give players a higher degree of freedom in character customization. + +# 5. Conclusion + +We propose a novel method called "text-to-parameter translation" to create bone-driven in-game characters given text prompts. Our method achieves high-quality zero-shot creation of in-game characters and can search both continuous and discrete facial parameters in a unified framework. The proposed text-driven framework is flexible and time-saving for users, and the created bone-driven characters with physically meaningful facial parameters are convenient for users to further edit as they want. Experimental results show our method achieves high-quality and vivid zero-shot text-driven game character auto-creation and outperforms other SOTA text-to-3D generation methods in terms of objective evaluations, speed, and subjective evaluations. + +# References + +[1] Volker Blanz and Thomas Vetter. A morphable model for the synthesis of 3d faces. In Proceedings of the 26th annual conference on Computer graphics and interactive techniques, pages 187-194, 1999. 2 +[2] James Booth, Epameinondas Antonakos, Stylianos Ploumpis, George Trigeorgis, Yannis Panagakis, and Stefanos Zafeiriou. 3d face morphable models" in-the-wild". In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 48-57, 2017. 2 +[3] Igor Borovikov, Karine Levonyan, Jon Rein, Pawel Wrotek, and Nitish Victor. Applied monocular reconstruction of parametric faces with domain engineering. arXiv preprint arXiv:2208.02935, 2022. 2 +[4] Léon Bottou. Large-scale machine learning with stochastic gradient descent. In Proceedings of COMPSTAT'2010, pages 177-186. Springer, 2010. 5 +[5] Zehranaz Canfes, M Furkan Atasoy, Alara Dirik, and Pinar Yanardag. Text and image guided 3d avatar generation and manipulation. arXiv preprint arXiv:2202.06079, 2022. 3 +[6] Chen Cao, Yanlin Weng, Shun Zhou, Yiying Tong, and Kun Zhou. Facewarehouse: A 3d facial expression database for visual computing. IEEE Transactions on Visualization and Computer Graphics, 20(3):413-425, 2013. 2 +[7] Pengfei Dou, Shishir K Shah, and Ioannis A Kakadiaris. End-to-end 3d face reconstruction with deep neural networks. In proceedings of the IEEE conference on computer vision and pattern recognition, pages 5908-5917, 2017. 2, 3 +[8] Rinon Gal, Or Patashnik, Haggai Maron, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. Stylegan-nada: Clip-guided domain adaptation of image generators. ACM Transactions on Graphics (TOG), 41(4):1-13, 2022. 3 +[9] Thomas Gering, Andreas Morel-Forster, Clemens Blumer, Bernhard Egger, Marcel Luthi, Sandro Schonborn, and Thomas Vetter. Morphable face models—an open framework. In 2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018), pages 75–82. IEEE, 2018. 2 +[10] Fangzhou Hong, Mingyuan Zhang, Liang Pan, Zhongang Cai, Lei Yang, and Ziwei Liu. Avatarclip: Zero-shot text-driven generation and animation of 3d avatars. arXiv preprint arXiv:2205.08535, 2022. 2, 3, 6, 7 +[11] Gao Huang, Yixuan Li, Geoff Pleiss, Zhuang Liu, John E Hopcroft, and Kilian Q Weinberger. Snapshot ensembles: Train 1, get m for free. arXiv preprint arXiv:1704.00109, 2017. 4 +[12] Patrik Huber, Guosheng Hu, Rafael Tena, Pouria Mortazavian, P Koppen, William J Christmas, Matthias Ratsch, and Josef Kittler. A multiresolution 3d morphable face model and fitting framework. In Proceedings of the 11th international joint conference on computer vision, imaging and computer graphics theory and applications, 2016. 2 +[13] Aaron S Jackson, Adrian Bulat, Vasileios Argyriou, and Georgios Tzimiropoulos. Large pose 3d face reconstruction from a single image via direct volumetric cnn regression. In Proceedings of the IEEE international conference on computer vision, pages 1031-1039, 2017. 3 + +[14] Ajay Jain, Ben Mildenhall, Jonathan T Barron, Pieter Abbeel, and Ben Poole. Zero-shot text-guided object generation with dream fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 867-876, 2022. 3 +[15] Yuming Jiang, Ziqi Huang, Xingang Pan, Chen Change Loy, and Ziwei Liu. Talk-to-edit: Fine-grained facial editing via dialog. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 13799-13808, 2021. 3 +[16] Yuming Jiang, Shuai Yang, Haonan Qju, Wayne Wu, Chen Change Loy, and Ziwei Liu. Text2human: Text-driven controllable human image generation. ACM Transactions on Graphics (TOG), 41(4):1-11, 2022. 3 +[17] Nasir Khalid, Tianhao Xie, Eugene Belilovsky, and Tiberiu Popa. Clip-mesh: Generating textured meshes from text using pretrained image-text models. ACM Transactions on Graphics (TOG), Proc. SIGGRAPH Asia, 2022. 3 +[18] Gwanghyun Kim and Jong Chul Ye. Diffusionclip: Text-guided image manipulation using diffusion models. 2021. 3 +[19] Tianye Li, Timo Bolkart, Michael J Black, Hao Li, and Javier Romero. Learning a model of facial shape and expression from 4d scans. ACM Trans. Graph., 36(6):194-1, 2017. 2 +[20] Jiangke Lin, Yi Yuan, and Zhengxia Zou. Meingame: Create a game character face from a single portrait. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 35, pages 311-319, 2021. 3 +[21] Yuqiao Liu, Yanan Sun, Bing Xue, Mengjie Zhang, Gary G Yen, and Kay Chen Tan. A survey on evolutionary neural architecture search. IEEE transactions on neural networks and learning systems, 2021. 4 +[22] Ilya Loshchilov and Frank Hutter. Sgdr: Stochastic gradient descent with warm restarts. arXiv preprint arXiv:1608.03983, 2016. 4 +[23] Oscar Michel, Roi Bar-On, Richard Liu, Sagie Benaim, and Rana Hanocka. Text2mesh: Text-driven neural stylization for meshes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13492-13502, 2022. 3 +[24] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM, 65(1):99-106, 2021. 3 +[25] Or Patashnik, Zongze Wu, Eli Shechtman, Daniel Cohen-Or, and Dani Lischinski. Styleclip: Text-driven manipulation of stylegan imagery. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2085–2094, 2021. 3 +[26] Weilong Peng, Zhiyong Feng, Chao Xu, and Yong Su. Parametric t-spline face morphable model for detailed fitting in shape subspace. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6139-6147, 2017. 2 +[27] Ben Poole, Ajay Jain, Jonathan T Barron, and Ben Mildenhall. Dreamfusion: Text-to-3d using 2d diffusion. arXiv preprint arXiv:2209.14988, 2022. 3, 6, 7 + +[28] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International Conference on Machine Learning, pages 8748-8763. PMLR, 2021. 5 +[29] Alec Radford, Luke Metz, and Soumith Chintala. Unsupervised representation learning with deep convolutional generative adversarial networks. arXiv preprint arXiv:1511.06434, 2015. 3, 5 +[30] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 2022. 3 +[31] Aditya Ramesh, Mikhail Pavlov, Gabriel Goh, Scott Gray, Chelsea Voss, Alec Radford, Mark Chen, and Ilya Sutskever. Zero-shot text-to-image generation. In International Conference on Machine Learning, pages 8821-8831. PMLR, 2021. 3 +[32] Scott Reed, Zeynep Akata, Xinchen Yan, Lajanugen Logeswaran, Bernt Schiele, and Honglak Lee. Generative adversarial text to image synthesis. In International conference on machine learning, pages 1060-1069. PMLR, 2016. 3 +[33] Elad Richardson, Matan Sela, Roy Or-El, and Ron Kimmel. Learning detailed face reconstruction from a single image. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1259–1268, 2017. 2 +[34] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10684-10695, 2022. 6 +[35] Tim Salimans, Ian Goodfellow, Wojciech Zaremba, Vicki Cheung, Alec Radford, and Xi Chen. Improved techniques for training gans. Advances in neural information processing systems, 29, 2016. 6 +[36] Tim Salimans, Jonathan Ho, Xi Chen, Szymon Sidor, and Ilya Sutskever. Evolution strategies as a scalable alternative to reinforcement learning. arXiv preprint arXiv:1703.03864, 2017. 4 +[37] Aditya Sanghi, Hang Chu, Joseph G Lambourne, Ye Wang, Chin-Yi Cheng, Marco Fumero, and Kamal Rahimi Malekshan. Clip-forge: Towards zero-shot text-to-shape generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18603-18613, 2022. 3 +[38] Tianyang Shi, Yi Yuan, Changjie Fan, Zhengxia Zou, Zhenwei Shi, and Yong Liu. Face-to-parameter translation for game character auto-creation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 161–170, 2019. 2, 3 +[39] Tianyang Shi, Zhengxia Zou, Zhenwei Shi, and Yi Yuan. Neural rendering for game character auto-creation. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2020. 2 +[40] Tianyang Shi, Zhengxia Zou, Xinhui Song, Zheng Song, Changjian Gu, Changjie Fan, and Yi Yuan. Neutral face + +game character auto-creation via pokerface-gan. In Proceedings of the 28th ACM International Conference on Multimedia, pages 3201–3209, 2020. 2 +[41] Tianyang Shi, Zhengxia Zuo, Yi Yuan, and Changjie Fan. Fast and robust face-to-parameter translation for game character auto-creation. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 34, pages 1733–1740, 2020. 2 +[42] Ayush Tewari, Michael Zollhofer, Hyeongwoo Kim, Pablo Garrido, Florian Bernard, Patrick Perez, and Christian Theobalt. Mofa: Model-based deep convolutional face autoencoder for unsupervised monocular reconstruction. In Proceedings of the IEEE International Conference on Computer Vision Workshops, pages 1274-1283, 2017. 2 +[43] Anh Tuan Tran, Tal Hassner, Iacopo Masi, Eran Paz, Yuval Nirkin, and Gerard Medioni. Extreme 3d face reconstruction: Seeing through occlusions. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3935-3944, 2018. 2 +[44] Anh Tuan Tran, Tal Hassner, Iacopo Masi, and Gerard Medioni. Regressing robust and discriminative 3d morphable models with a very deep neural network. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5163-5172, 2017. 2, 3 +[45] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 30, 2017. 5 +[46] Can Wang, Menglei Chai, Mingming He, Dongdong Chen, and Jing Liao. Clip-nerf: Text-and-image driven manipulation of neural radiance fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3835-3844, 2022. 3 +[47] Peng Wang, Lingjie Liu, Yuan Liu, Christian Theobalt, Taku Komura, and Wenping Wang. Neus: Learning neural implicit surfaces by volume rendering for multi-view reconstruction. arXiv preprint arXiv:2106.10689, 2021. 3 +[48] Tianyi Wei, Dongdong Chen, Wenbo Zhou, Jing Liao, Zhentao Tan, Lu Yuan, Weiming Zhang, and Nenghai Yu. Hairclip: Design your hair by text and reference image. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18072-18081, 2022. 3 +[49] Lior Wolf, Yaniv Taigman, and Adam Polyak. Unsupervised creation of parameterized avatars. In Proceedings of the IEEE International Conference on Computer Vision, pages 1530-1538, 2017. 2 +[50] Weihao Xia, Yujiu Yang, Jing-Hao Xue, and Baoyuan Wu. Tedigan: Text-guided diverse face image generation and manipulation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2256-2265, 2021. 3 +[51] Zipeng Xu, Tianwei Lin, Hao Tang, Fu Li, Dongliang He, Nicu Sebe, Radu Timofte, Luc Van Gool, and Errui Ding. Predict, prevent, and evaluate: Disentangled text-driven image manipulation empowered by pre-trained vision-language model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18229-18238, 2022. 3 + +[52] Yingchen Yu, Fangneng Zhan, Rongliang Wu, Jiahui Zhang, Shijian Lu, Miaomiao Cui, Xuansong Xie, Xian-Sheng Hua, and Chunyan Miao. Towards counterfactual image manipulation via clip. In Proceedings of the 30th ACM International Conference on Multimedia, pages 3637-3645, 2022. 3 +[53] Kaiyang Zhou, Jingkang Yang, Chen Change Loy, and Ziwei Liu. Learning to prompt for vision-language models. International Journal of Computer Vision, 130(9):2337-2348, 2022. 4 \ No newline at end of file diff --git a/2023/Zero-Shot Text-to-Parameter Translation for Game Character Auto-Creation/images.zip b/2023/Zero-Shot Text-to-Parameter Translation for Game Character Auto-Creation/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..b28715b3f2a1e25760da324d464b7c53a716649e --- /dev/null +++ b/2023/Zero-Shot Text-to-Parameter Translation for Game Character Auto-Creation/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5a4863f93bc72fdf3bb2df677cbc3e275dcafff6229df8355b580413c6587ce4 +size 615353 diff --git a/2023/Zero-Shot Text-to-Parameter Translation for Game Character Auto-Creation/layout.json b/2023/Zero-Shot Text-to-Parameter Translation for Game Character Auto-Creation/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..921d5d8aa3ca3774bf50b5714f4defeea65e357b --- /dev/null +++ b/2023/Zero-Shot Text-to-Parameter Translation for Game Character Auto-Creation/layout.json @@ -0,0 +1,7582 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 58, + 103, + 536, + 119 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 103, + 536, + 119 + ], + "spans": [ + { + "bbox": [ + 58, + 103, + 536, + 119 + ], + "type": "text", + "content": "Zero-Shot Text-to-Parameter Translation for Game Character Auto-Creation" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 59, + 142, + 532, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 142, + 532, + 171 + ], + "spans": [ + { + "bbox": [ + 59, + 142, + 532, + 171 + ], + "type": "text", + "content": "Rui Zhao" + }, + { + "bbox": [ + 59, + 142, + 532, + 171 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 59, + 142, + 532, + 171 + ], + "type": "text", + "content": ", Wei Li" + }, + { + "bbox": [ + 59, + 142, + 532, + 171 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 59, + 142, + 532, + 171 + ], + "type": "text", + "content": ", Zhipeng Hu" + }, + { + "bbox": [ + 59, + 142, + 532, + 171 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 59, + 142, + 532, + 171 + ], + "type": "text", + "content": ", Lincheng Li" + }, + { + "bbox": [ + 59, + 142, + 532, + 171 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 59, + 142, + 532, + 171 + ], + "type": "text", + "content": ", Zhengxia Zou" + }, + { + "bbox": [ + 59, + 142, + 532, + 171 + ], + "type": "inline_equation", + "content": "^{3*}" + }, + { + "bbox": [ + 59, + 142, + 532, + 171 + ], + "type": "text", + "content": ", Zhenwei Shi" + }, + { + "bbox": [ + 59, + 142, + 532, + 171 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 59, + 142, + 532, + 171 + ], + "type": "text", + "content": ", Changjie Fan" + }, + { + "bbox": [ + 59, + 142, + 532, + 171 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 59, + 142, + 532, + 171 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 59, + 142, + 532, + 171 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 59, + 142, + 532, + 171 + ], + "type": "text", + "content": "Netease Fuxi AI Lab, " + }, + { + "bbox": [ + 59, + 142, + 532, + 171 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 59, + 142, + 532, + 171 + ], + "type": "text", + "content": "Nankai University, " + }, + { + "bbox": [ + 59, + 142, + 532, + 171 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 59, + 142, + 532, + 171 + ], + "type": "text", + "content": "Beihang University" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 135, + 174, + 457, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 174, + 457, + 186 + ], + "spans": [ + { + "bbox": [ + 135, + 174, + 457, + 186 + ], + "type": "text", + "content": "{zhaorui10, zphu, lilincheng, fanchangjie}@corp.netease.com," + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 125, + 187, + 465, + 199 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 125, + 187, + 465, + 199 + ], + "spans": [ + { + "bbox": [ + 125, + 187, + 465, + 199 + ], + "type": "text", + "content": "liwei@dbis.nankai.edu.cn, {zhengxiazou, zhenweishi}@buaa.edu.cn" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 48, + 227, + 548, + 406 + ], + "blocks": [ + { + "bbox": [ + 48, + 227, + 548, + 406 + ], + "lines": [ + { + "bbox": [ + 48, + 227, + 548, + 406 + ], + "spans": [ + { + "bbox": [ + 48, + 227, + 548, + 406 + ], + "type": "image", + "image_path": "db582dc6bc97988279dcf18cad7287276d66ea2a9cad30bf6860b11a081bd6d4.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 412, + 547, + 434 + ], + "lines": [ + { + "bbox": [ + 46, + 412, + 547, + 434 + ], + "spans": [ + { + "bbox": [ + 46, + 412, + 547, + 434 + ], + "type": "text", + "content": "Figure 1. Game characters created by the proposed text-to-parameter translation (T2P) given different text prompts. The front view and three side views are shown for each character." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 143, + 460, + 192, + 473 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 460, + 192, + 473 + ], + "spans": [ + { + "bbox": [ + 143, + 460, + 192, + 473 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 487, + 289, + 691 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 487, + 289, + 691 + ], + "spans": [ + { + "bbox": [ + 46, + 487, + 289, + 691 + ], + "type": "text", + "content": "Recent popular Role-Playing Games (RPGs) saw the great success of character auto-creation systems. The bone-driven face model controlled by continuous parameters (like the position of bones) and discrete parameters (like the hairstyles) makes it possible for users to personalize and customize in-game characters. Previous in-game character auto-creation systems are mostly image-driven, where facial parameters are optimized so that the rendered character looks similar to the reference face photo. This paper proposes a novel text-to-parameter translation method (T2P) to achieve zero-shot text-driven game character auto-creation. With our method, users can create a vivid in-game character with arbitrary text description without using any reference photo or editing hundreds of parameters manually. In our method, taking the power of large-scale pre-trained multi-modal CLIP and neural rendering, T2P searches both continuous facial parameters and discrete facial parame" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 461, + 547, + 570 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 461, + 547, + 570 + ], + "spans": [ + { + "bbox": [ + 306, + 461, + 547, + 570 + ], + "type": "text", + "content": "ters in a unified framework. Due to the discontinuous parameter representation, previous methods have difficulty in effectively learning discrete facial parameters. T2P, to our best knowledge, is the first method that can handle the optimization of both discrete and continuous parameters. Experimental results show that T2P can generate high-quality and vivid game characters with given text prompts. T2P outperforms other SOTA text-to-3D generation methods on both objective evaluations and subjective evaluations." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 593, + 387, + 605 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 593, + 387, + 605 + ], + "spans": [ + { + "bbox": [ + 306, + 593, + 387, + 605 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 613, + 547, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 613, + 547, + 685 + ], + "spans": [ + { + "bbox": [ + 304, + 613, + 547, + 685 + ], + "type": "text", + "content": "Role-Playing Games (RPGs) are praised by gamers for providing immersive experiences. Some of the recent popular RPGs, like Grand Theft Auto Online1 and Naraka2, have opened up character customization systems to players. In such systems, in-game characters are bone-driven and controlled by continuous parameters, like the position," + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 1, + 494, + 15 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 1, + 494, + 15 + ], + "spans": [ + { + "bbox": [ + 145, + 1, + 494, + 15 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 199, + 13, + 442, + 24 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 199, + 13, + 442, + 24 + ], + "spans": [ + { + "bbox": [ + 199, + 13, + 442, + 24 + ], + "type": "text", + "content": "Except for this watermark, it is identical to the accepted version;" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 179, + 25, + 461, + 36 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 179, + 25, + 461, + 36 + ], + "spans": [ + { + "bbox": [ + 179, + 25, + 461, + 36 + ], + "type": "text", + "content": "the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 58, + 702, + 141, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 702, + 141, + 712 + ], + "spans": [ + { + "bbox": [ + 58, + 702, + 141, + 712 + ], + "type": "text", + "content": "*Corresponding Authors." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 317, + 693, + 465, + 703 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 693, + 465, + 703 + ], + "spans": [ + { + "bbox": [ + 317, + 693, + 465, + 703 + ], + "type": "text", + "content": "1https://www.rockstargames.com/GTAOnline" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 318, + 703, + 425, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 703, + 425, + 713 + ], + "spans": [ + { + "bbox": [ + 318, + 703, + 425, + 713 + ], + "type": "text", + "content": "2http://www.narakathegame.com" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "21013" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 288, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 288, + 180 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 288, + 180 + ], + "type": "text", + "content": "rotation, scale of each bone, and discrete parameters, like the hairstyle, beard styles, make-ups, and other facial elements. By manually adjusting these parameters, players can control the appearance of the characters in the game according to their personal preferences, rather than using predefined character templates. However, it is cumbersome and time-consuming for users to manually adjust hundreds of parameters - usually taking up to hours to create a character that matches their expectations." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 182, + 288, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 182, + 288, + 434 + ], + "spans": [ + { + "bbox": [ + 46, + 182, + 288, + 434 + ], + "type": "text", + "content": "To automatically create in-game characters, the method named Face-to-parameter translation (F2P) was recently proposed to automatically create game characters based on a single input face image [38]. F2P and its variants [39, 41] have been successfully used in recent RPGs like Narake and Justice, and virtual meeting platform Yaotai. Recent 3D face reconstruction methods [2, 7, 26, 33, 42-44] can also be adapted to create game characters. However, all the above-mentioned methods require reference face photos for auto-creation. Users may take time to search, download and upload suitable photos for their expected game characters. Compared with images, text prompts are more flexible and time-saving for game character auto-creation. A very recent work AvatarCLIP [10] achieved text-driven avatar auto-creation and animation. It optimizes implicit neural networks to generate characters. However, the created characters are controlled by implicit parameters, which lack explicit physical meanings, thus manually adjusting them needs extra designs. This will be inconvenient for players or game developers to further fine-tune the created game characters as they want." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 436, + 287, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 436, + 287, + 615 + ], + "spans": [ + { + "bbox": [ + 46, + 436, + 287, + 615 + ], + "type": "text", + "content": "To address the above problems, we propose text-to-parameter translation (T2P) to tackle the in-game character auto-creation task based on arbitrary text prompts. T2P takes the power of large-scale pre-trained CLIP to achieve zero-shot text-driven character creation and utilizes neural rendering to make the rendering of in-game characters differentiable to accelerate the parameters optimization. Previous works like F2Ps give up controlling discrete facial parameters due to the problem of discontinuous parameter gradients. To our best knowledge, the proposed T2P is the first method that can handle both continuous and discrete facial parameters optimization in a unified framework to create vivid in-game characters. F2P is also the first text-driven automatic character creation suitable for game environments." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 618, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 618, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 618, + 288, + 713 + ], + "type": "text", + "content": "Our method consists of a pre-training stage and a text-to-parameter translation stage. In the pre-training stage, we first train an imitator to imitate the rendering behavior of the game engine to make the parameter searching pipeline end-to-end differentiable. We also pre-train a translator to translate the CLIP image embeddings of random game characters to their facial parameters. Then at the text-to-parameter translation stage, on one hand, we fine-tune the translator" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 304, + 72, + 545, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 167 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 167 + ], + "type": "text", + "content": "on un-seen CLIP text embeddings to predict continuous parameters given text prompt rather than images, on the other hand, discrete parameters are evolutionally searched. Finally, the game engine takes in the facial parameters and creates the in-game characters which correspond to the text prompt described, as shown in Fig 1. Objective evaluations and subjective evaluations both indicate our method outperforms other SOTA zero-shot text-to-3D methods." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 317, + 168, + 503, + 179 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 168, + 503, + 179 + ], + "spans": [ + { + "bbox": [ + 317, + 168, + 503, + 179 + ], + "type": "text", + "content": "Our contributions are summarized as follows:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 180, + 545, + 324 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 304, + 180, + 545, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 180, + 545, + 228 + ], + "spans": [ + { + "bbox": [ + 304, + 180, + 545, + 228 + ], + "type": "text", + "content": "1) We propose a novel text-to-parameter translation method for zero-shot in-game character auto-creation. To the best of our knowledge, we are the first to study text-driven character creation ready for game environments." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 229, + 545, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 229, + 545, + 275 + ], + "spans": [ + { + "bbox": [ + 304, + 229, + 545, + 275 + ], + "type": "text", + "content": "2) The proposed T2P can optimize both continuous and discrete parameters in a unified framework, unlike earlier methods giving up controlling difficult-to-learn discrete parameters." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 277, + 545, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 277, + 545, + 324 + ], + "spans": [ + { + "bbox": [ + 304, + 277, + 545, + 324 + ], + "type": "text", + "content": "3) The proposed text-driven auto-creation paradigm is flexible and friendly for users, and the predicted physically meaningful facial parameters enable players or game developers to further finetune the game character as they want." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 336, + 392, + 349 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 336, + 392, + 349 + ], + "spans": [ + { + "bbox": [ + 306, + 336, + 392, + 349 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 357, + 502, + 369 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 357, + 502, + 369 + ], + "spans": [ + { + "bbox": [ + 306, + 357, + 502, + 369 + ], + "type": "text", + "content": "2.1. Parametric Character Auto-Creation" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 375, + 545, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 375, + 545, + 615 + ], + "spans": [ + { + "bbox": [ + 304, + 375, + 545, + 615 + ], + "type": "text", + "content": "Character auto-creation has been an emerging research topic because of its significance in role-playing games, augmented reality, and metaverses. Some methods on this topic are recently proposed. Tied Output Synthesis (TOS) learns to predict a set of binary facial parameters to control the graphical engine to generate a character that looks like the human in input photo [49]. Face-to-Parameter translation (F2P) is proposed to optimize a set of continuous facial parameters to minimize the distance between the generated game character's face and the input photo [38]. In F2P's following works [39, 41], the framework is improved to achieve fast and robust character creation. The PockerFace-Gan is proposed to decouple the expression features and identity features in order to generate expression-less game characters [40]. Borovikov et al. applies domain engineering and predict the facial parameters in a global-local way, considering the face as a hierarchical ensemble of general facial structure and local facial regions [3]. These methods all need reference photos to create characters, while we aim at creating characters based on text input." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 623, + 440, + 635 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 623, + 440, + 635 + ], + "spans": [ + { + "bbox": [ + 306, + 623, + 440, + 635 + ], + "type": "text", + "content": "2.2.3D Face Reconstruction" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "text", + "content": "3D face reconstruction also aims to generate a 3D face given single or multi-view 2D facial images. 3D morphable model (3DMM) [1] and its variants [2,6,9,12,19] are representative methods in the literature. They first parameterize a 3D face mesh data and then optimize it to match the facial identity, expression, and texture of given reference im" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "21014" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 51, + 73, + 547, + 286 + ], + "blocks": [ + { + "bbox": [ + 51, + 73, + 547, + 286 + ], + "lines": [ + { + "bbox": [ + 51, + 73, + 547, + 286 + ], + "spans": [ + { + "bbox": [ + 51, + 73, + 547, + 286 + ], + "type": "image", + "image_path": "1ee822a8851306695398626c8ce3763d3ac4c4340965dcaa2c95796d87cbee04.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 292, + 547, + 350 + ], + "lines": [ + { + "bbox": [ + 46, + 292, + 547, + 350 + ], + "spans": [ + { + "bbox": [ + 46, + 292, + 547, + 350 + ], + "type": "text", + "content": "Figure 2. An overview of the proposed T2P. " + }, + { + "bbox": [ + 46, + 292, + 547, + 350 + ], + "type": "inline_equation", + "content": "E_{I}" + }, + { + "bbox": [ + 46, + 292, + 547, + 350 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 292, + 547, + 350 + ], + "type": "inline_equation", + "content": "E_{T}" + }, + { + "bbox": [ + 46, + 292, + 547, + 350 + ], + "type": "text", + "content": " denote the CLIP image encoder and text encoder, respectively. An imitator is trained to mimic the game engine and achieve differentiable rendering. A translator is pre-trained to translate the CLIP image embeddings to continuous facial parameters. When creating game characters given text prompts, T2P searches continuous facial parameters by fine-tuning the translator and searches discrete facial parameters by the evolution search. Finally, the facial parameters are fed into the game engine to render the in-game characters." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 369, + 289, + 465 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 369, + 289, + 465 + ], + "spans": [ + { + "bbox": [ + 46, + 369, + 289, + 465 + ], + "type": "text", + "content": "ages. Taking advantage of deep Convolutional Neural Networks (CNNs), high-level image representations are used to improve the predicting of the morphable model coefficients [7, 13, 44]. The recently proposed MeInGame firstly reconstructs the face as a 3DMM model and then transfers the face to game mesh keeping their topology [20]. It also predicts texture map and lighting coefficients from input images to improve the outlook of the game mesh." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 478, + 233, + 490 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 478, + 233, + 490 + ], + "spans": [ + { + "bbox": [ + 47, + 478, + 233, + 490 + ], + "type": "text", + "content": "2.3. Zero-Shot Text-Driven Generation" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 498, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 498, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 498, + 289, + 715 + ], + "type": "text", + "content": "Zero-shot content generation is recently made possible by the powerful multimodel representation and generalization capabilities of CLIP [32]. Combining the CLIP with variational autoencoder or diffusion model, DALL-E [31], DALL-E 2 [31] and Imagen [30] achieved high-quality zero-shot text-to-image synthesis, and sparked widespread discussion. Text-driven image translation and manipulation, and human image generation are also explored [8, 15, 16, 18, 25, 48, 50-52]. Taking advantage of CLIP, zero-shot text-driven 3D object generation and manipulation methods made rapid advances [5, 14, 17, 23, 37, 46]. The most recently proposed Dreamfusion uses Imagen to supervise the Neural Radiance Fields network (NeRF) [24] to generate 3D object [27]. The most related work to ours named AvatarCLIP was recently proposed to achieve zero-shot text-driven 3D avatar generation and animation [10]. Given a text prompt, AvatarCLIP first generates a coarse shape by code-book-based retrieval, guided by CLIP. Then the coarse" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 369, + 547, + 490 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 369, + 547, + 490 + ], + "spans": [ + { + "bbox": [ + 304, + 369, + 547, + 490 + ], + "type": "text", + "content": "shape is used to initialize a NeuS network [47] to generate the implicit representation. Finally, the implicit 3D avatar is optimized to sculpt fine geometry and generate texture. This method treats the 3D human generation as a NeuS optimization process. However, the implicit representation makes it difficult to implement in games and unfriendly to user interaction. As a comparison, our created bone-driven game characters are controlled by explicit parameters with physical meanings. This enables players and game developers to further edit the created characters according to their needs." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 306, + 499, + 362, + 512 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 499, + 362, + 512 + ], + "spans": [ + { + "bbox": [ + 306, + 499, + 362, + 512 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 519, + 547, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 519, + 547, + 605 + ], + "spans": [ + { + "bbox": [ + 304, + 519, + 547, + 605 + ], + "type": "text", + "content": "Fig. 2 shows an overview of the proposed T2P. We first train an imitator to simulate the game engine and pretrain a translator to translate the CLIP image embeddings to continuous facial parameters. Then, to achieve text-to-parameter translation, given the text prompts, we fine-tune the translator to predict continuous parameters and combine the evolution search to optimize discrete parameters." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 611, + 369, + 623 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 611, + 369, + 623 + ], + "spans": [ + { + "bbox": [ + 306, + 611, + 369, + 623 + ], + "type": "text", + "content": "3.1. Imitator" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 629, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 629, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 629, + 547, + 715 + ], + "type": "text", + "content": "We train a neural imitator to mimic the behavior of the game engine in order to differentiate the rendering of in-game characters. It takes in continuous facial parameters " + }, + { + "bbox": [ + 304, + 629, + 547, + 715 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 304, + 629, + 547, + 715 + ], + "type": "text", + "content": " and renders the front view of the game character " + }, + { + "bbox": [ + 304, + 629, + 547, + 715 + ], + "type": "inline_equation", + "content": "\\pmb{y}" + }, + { + "bbox": [ + 304, + 629, + 547, + 715 + ], + "type": "text", + "content": ". Different from the F2P [38] taking a similar generator network architecture of DC-GAN [29], we add a positional encoder at the input-end of the renderer to improve the facial param-" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "21015" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 51, + 72, + 282, + 150 + ], + "blocks": [ + { + "bbox": [ + 51, + 72, + 282, + 150 + ], + "lines": [ + { + "bbox": [ + 51, + 72, + 282, + 150 + ], + "spans": [ + { + "bbox": [ + 51, + 72, + 282, + 150 + ], + "type": "image", + "image_path": "9ff6fbf9c3690ff6d7095235fccf21ef72ee04810507a995b9e68ba29091debe.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 159, + 288, + 237 + ], + "lines": [ + { + "bbox": [ + 46, + 159, + 288, + 237 + ], + "spans": [ + { + "bbox": [ + 46, + 159, + 288, + 237 + ], + "type": "text", + "content": "Figure 3. The architecture of our translator. The translator contains a set of transformer encoder layers, several learnable tokens, a fine-tuning head, and a prediction head. The translator is firstly pre-trained on CLIP image embeddings and then fine-tuned on CLIP text embeddings to predict continuous facial parameters. When fine-tuning the translator, only the parameters of the fine-tuning head are updated." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 258, + 287, + 330 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 258, + 287, + 330 + ], + "spans": [ + { + "bbox": [ + 46, + 258, + 287, + 330 + ], + "type": "text", + "content": "eters parsing on complex textures and geometry. We treat the imitator training as a regression problem to minimize the pixel-wise distance between the images rendered by the game engine and the imitator. To avoid the blurry rendered pixels, we use L1 loss as the loss function to train the imitator:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 72, + 337, + 287, + 366 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 337, + 287, + 366 + ], + "spans": [ + { + "bbox": [ + 72, + 337, + 287, + 366 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} _ {G} (\\boldsymbol {x}) = E _ {\\boldsymbol {x} \\sim u (\\boldsymbol {x})} \\left\\{\\left| \\left| \\boldsymbol {y} - \\hat {\\boldsymbol {y}} \\right| \\right| _ {1} \\right\\} \\tag {1} \\\\ = E _ {\\boldsymbol {x} \\sim u (\\boldsymbol {x})} \\left\\{\\left| \\left| G (\\boldsymbol {x}) - \\operatorname {E n g i n e} (\\boldsymbol {x}) \\right| \\right| _ {1} \\right\\}, \\\\ \\end{array}", + "image_path": "19e2555e3f5ccd72f27b8ca8e79f903eba9e4840f9b677a8947144c3f1f8b36c.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 374, + 287, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 374, + 287, + 399 + ], + "spans": [ + { + "bbox": [ + 47, + 374, + 287, + 399 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 374, + 287, + 399 + ], + "type": "inline_equation", + "content": "G(\\pmb{x})" + }, + { + "bbox": [ + 47, + 374, + 287, + 399 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 374, + 287, + 399 + ], + "type": "inline_equation", + "content": "\\text{Engine}(\\pmb{x})" + }, + { + "bbox": [ + 47, + 374, + 287, + 399 + ], + "type": "text", + "content": " represent the image rendered by the imitator and game engine, respectively." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 399, + 287, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 399, + 287, + 471 + ], + "spans": [ + { + "bbox": [ + 47, + 399, + 287, + 471 + ], + "type": "text", + "content": "To prepare the training data, we randomly sample 170K continuous facial parameters " + }, + { + "bbox": [ + 47, + 399, + 287, + 471 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 47, + 399, + 287, + 471 + ], + "type": "text", + "content": " from a multidimensional uniform distribution " + }, + { + "bbox": [ + 47, + 399, + 287, + 471 + ], + "type": "inline_equation", + "content": "u(\\pmb{x})" + }, + { + "bbox": [ + 47, + 399, + 287, + 471 + ], + "type": "text", + "content": ". We feed these parameters into the game engine to render out the facial images. Then these facial parameters and image pairs are split into 80% and 20% for training and validation." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 479, + 230, + 491 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 479, + 230, + 491 + ], + "spans": [ + { + "bbox": [ + 47, + 479, + 230, + 491 + ], + "type": "text", + "content": "3.2. Continuous Parameters Searching" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 497, + 287, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 497, + 287, + 581 + ], + "spans": [ + { + "bbox": [ + 46, + 497, + 287, + 581 + ], + "type": "text", + "content": "We aim to train a translator to predict continuous facial parameters based on CLIP text embeddings. To reduce the learning difficulty, we first pre-train the translator on CLIP image embeddings and then fine-tune it on text CLIP embeddings. The main reason is that text-parameter pairs are expensive to collect, while image-parameter pairs can be infinitely generated with the game engine." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 581, + 287, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 581, + 287, + 688 + ], + "spans": [ + { + "bbox": [ + 46, + 581, + 287, + 688 + ], + "type": "text", + "content": "We take the randomly sampled facial parameters and rendered image pairs mentioned in section 3.1 as training data. The rendered images are fed into the CLIP image encoder to collect image embeddings. Then we build a translator " + }, + { + "bbox": [ + 46, + 581, + 287, + 688 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 46, + 581, + 287, + 688 + ], + "type": "text", + "content": " based on a transformer encoder, and train it to map the image embeddings " + }, + { + "bbox": [ + 46, + 581, + 287, + 688 + ], + "type": "inline_equation", + "content": "e_{I}" + }, + { + "bbox": [ + 46, + 581, + 287, + 688 + ], + "type": "text", + "content": " into facial parameters " + }, + { + "bbox": [ + 46, + 581, + 287, + 688 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 46, + 581, + 287, + 688 + ], + "type": "text", + "content": ", as shown in Fig. 3. The object function is defined as the L1 reconstruction loss between the true facial parameters and the predicted ones:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 80, + 699, + 287, + 713 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 699, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 80, + 699, + 287, + 713 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {F} \\left(\\boldsymbol {e} _ {I}, \\boldsymbol {x}\\right) = E _ {e _ {I} \\sim u \\left(\\boldsymbol {e} _ {I}\\right)} \\left\\{\\left| \\left| F \\left(\\boldsymbol {e} _ {I}\\right) - \\hat {\\boldsymbol {x}} \\right| \\right| _ {1} \\right\\}. \\tag {2}", + "image_path": "7f536d5cc62a15f0f2549726ba9e1af7ca866731c8f2f46319201ac99e7595fa.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 72, + 545, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 240 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 240 + ], + "type": "text", + "content": "When T2P creates game characters given text prompts, there is no image embeddings available. Though the CLIP is trained to pull the text and image pairs close to each other in the embedding space, there are still gaps between the two modalities. We, therefore, fine-tune the translator to fit the input text embeddings. Inspired by the recent prompt tuning study [53], we fix the parameters of the transformer and fine-tune a tiny tuner head. The translator is trained to map the text embeddings " + }, + { + "bbox": [ + 304, + 72, + 545, + 240 + ], + "type": "inline_equation", + "content": "e_{T}" + }, + { + "bbox": [ + 304, + 72, + 545, + 240 + ], + "type": "text", + "content": " to facial parameters " + }, + { + "bbox": [ + 304, + 72, + 545, + 240 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 304, + 72, + 545, + 240 + ], + "type": "text", + "content": ". Then the facial parameters are fed into the imitator to render the image of the game character. The fine-tuning object function is to minimize the cosine distance between the given text embeddings " + }, + { + "bbox": [ + 304, + 72, + 545, + 240 + ], + "type": "inline_equation", + "content": "e_{T}" + }, + { + "bbox": [ + 304, + 72, + 545, + 240 + ], + "type": "text", + "content": " and the image embeddings of the rendered image:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 329, + 249, + 545, + 277 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 329, + 249, + 545, + 277 + ], + "spans": [ + { + "bbox": [ + 329, + 249, + 545, + 277 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} _ {C L I P} \\left(\\boldsymbol {e} _ {T}, \\boldsymbol {x}\\right) = 1 - \\cos \\left(\\boldsymbol {e} _ {T}, E _ {I} (G (\\boldsymbol {x}))\\right) \\tag {3} \\\\ = 1 - \\cos \\left(\\boldsymbol {e} _ {T}, E _ {I} \\left(G \\left(F \\left(\\boldsymbol {e} _ {T}\\right)\\right)\\right), \\right. \\\\ \\end{array}", + "image_path": "83b07be12e1d40c473e51542da305cf3c91b193242a418a353818a28ad9b1ddd.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 286, + 545, + 310 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 286, + 545, + 310 + ], + "spans": [ + { + "bbox": [ + 305, + 286, + 545, + 310 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 286, + 545, + 310 + ], + "type": "inline_equation", + "content": "E_{I}" + }, + { + "bbox": [ + 305, + 286, + 545, + 310 + ], + "type": "text", + "content": " is the CLIP image encoder. The parameters of the fine-tuned head " + }, + { + "bbox": [ + 305, + 286, + 545, + 310 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 305, + 286, + 545, + 310 + ], + "type": "text", + "content": " are iteratively updated as follows," + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 378, + 318, + 545, + 342 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 378, + 318, + 545, + 342 + ], + "spans": [ + { + "bbox": [ + 378, + 318, + 545, + 342 + ], + "type": "interline_equation", + "content": "w \\leftarrow w - \\eta_ {t} \\frac {\\partial \\mathcal {L} _ {C L I P}}{\\partial w}, \\tag {4}", + "image_path": "c50469c78d8ebdfd1b6b71c6443872ec00096759dd5dd9ead51a92164bf83cdc.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 350, + 545, + 409 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 350, + 545, + 409 + ], + "spans": [ + { + "bbox": [ + 304, + 350, + 545, + 409 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 350, + 545, + 409 + ], + "type": "inline_equation", + "content": "\\eta_{t}" + }, + { + "bbox": [ + 304, + 350, + 545, + 409 + ], + "type": "text", + "content": " is the learning rate at " + }, + { + "bbox": [ + 304, + 350, + 545, + 409 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 304, + 350, + 545, + 409 + ], + "type": "text", + "content": "th iteration. We follow the snapshot ensembles [11] and set the learning rate using the cosine annealing schedule with warm restarts (SGDR) [22] to encourage the translator to converge to and escape from local minima:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 321, + 416, + 545, + 440 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 416, + 545, + 440 + ], + "spans": [ + { + "bbox": [ + 321, + 416, + 545, + 440 + ], + "type": "interline_equation", + "content": "\\eta_ {t} = \\eta_ {\\min } + \\frac {1}{2} \\left(\\eta_ {\\max } - \\eta_ {\\min }\\right) \\left(1 + \\cos \\left(\\frac {N _ {t}}{N} \\pi\\right)\\right), \\tag {5}", + "image_path": "27e2ee9ace61759ee9aeb909ef2e2586d82791b5d4c9ec319e1fcd80898810c5.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 448, + 545, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 448, + 545, + 544 + ], + "spans": [ + { + "bbox": [ + 304, + 448, + 545, + 544 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 448, + 545, + 544 + ], + "type": "inline_equation", + "content": "\\eta_{min}" + }, + { + "bbox": [ + 304, + 448, + 545, + 544 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 448, + 545, + 544 + ], + "type": "inline_equation", + "content": "\\eta_{max}" + }, + { + "bbox": [ + 304, + 448, + 545, + 544 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 304, + 448, + 545, + 544 + ], + "type": "inline_equation", + "content": "\\eta_t" + }, + { + "bbox": [ + 304, + 448, + 545, + 544 + ], + "type": "text", + "content": " denote the minimum, maximum, and current learning rate, respectively. " + }, + { + "bbox": [ + 304, + 448, + 545, + 544 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 304, + 448, + 545, + 544 + ], + "type": "text", + "content": " denotes the number of iterations between two warm restarts, and " + }, + { + "bbox": [ + 304, + 448, + 545, + 544 + ], + "type": "inline_equation", + "content": "N_t" + }, + { + "bbox": [ + 304, + 448, + 545, + 544 + ], + "type": "text", + "content": " denotes the number of iterations since the last restart. Each time the " + }, + { + "bbox": [ + 304, + 448, + 545, + 544 + ], + "type": "inline_equation", + "content": "N_t" + }, + { + "bbox": [ + 304, + 448, + 545, + 544 + ], + "type": "text", + "content": " equals " + }, + { + "bbox": [ + 304, + 448, + 545, + 544 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 304, + 448, + 545, + 544 + ], + "type": "text", + "content": ", the current iteration is called a snapshot point, and we save the predicted facial parameters at this point. These facial parameters are then used to initialize the first population of the evolution search." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 551, + 473, + 563 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 551, + 473, + 563 + ], + "spans": [ + { + "bbox": [ + 306, + 551, + 473, + 563 + ], + "type": "text", + "content": "3.3. Discrete Parameters Searching" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 570, + 545, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 570, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 304, + 570, + 545, + 712 + ], + "type": "text", + "content": "In the bone-driven face model, besides continuous facial parameters controlling its bones, discrete facial elements (like the hairstyle, beard styles, and make-up) are also important. However, these elements are difficult for the imitator to learn, because they are discrete and highly changeable. Unlike previous methods that ignore discrete parameters during optimization, we propose to evolutionally search them by directly interacting with the game engine. Evolutionary algorithms have been widely used in reinforcement learning and neural architecture search [21, 36], where the objective function can be optimized without using any gradient information." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "21016" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 62, + 70, + 531, + 175 + ], + "blocks": [ + { + "bbox": [ + 62, + 70, + 531, + 175 + ], + "lines": [ + { + "bbox": [ + 62, + 70, + 531, + 175 + ], + "spans": [ + { + "bbox": [ + 62, + 70, + 531, + 175 + ], + "type": "image", + "image_path": "c8eec65ad6ffdac643645c7ac82ebc49997794290b8d614c918f835ac3c6a09b.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 182, + 547, + 205 + ], + "lines": [ + { + "bbox": [ + 46, + 182, + 547, + 205 + ], + "spans": [ + { + "bbox": [ + 46, + 182, + 547, + 205 + ], + "type": "text", + "content": "Figure 4. Game characters created by the proposed T2P given the text prompt \"monkey\". The first five game characters are created by the translator at different fine-tuning iterations. The last one is created by the evolution search, adding a discrete facial element, a beard." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 225, + 289, + 321 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 225, + 289, + 321 + ], + "spans": [ + { + "bbox": [ + 46, + 225, + 289, + 321 + ], + "type": "text", + "content": "Here we perform a text-driven evolution search to find the optimum discrete facial parameters. The initial generation contains random initialized discrete parameters as well as the continuous facial parameters predicted by the translator. To impose supervision on 3D views, we render out two images for each game character, one for front view " + }, + { + "bbox": [ + 46, + 225, + 289, + 321 + ], + "type": "inline_equation", + "content": "y_{front}" + }, + { + "bbox": [ + 46, + 225, + 289, + 321 + ], + "type": "text", + "content": " and one for side view " + }, + { + "bbox": [ + 46, + 225, + 289, + 321 + ], + "type": "inline_equation", + "content": "y_{side}" + }, + { + "bbox": [ + 46, + 225, + 289, + 321 + ], + "type": "text", + "content": ". The facial parameters are scored by the CLIP model as follows," + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 78, + 329, + 287, + 357 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 329, + 287, + 357 + ], + "spans": [ + { + "bbox": [ + 78, + 329, + 287, + 357 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} S _ {C L I P} = \\alpha \\cos \\left(E _ {T} (T), E _ {I} \\left(\\boldsymbol {y} _ {\\text {f r o n t}}\\right)\\right) \\tag {6} \\\\ + (1 - \\alpha) \\cos \\left(E _ {T} \\left(T ^ {\\prime}\\right), E _ {I} (\\boldsymbol {y} _ {\\text {s i d e}})\\right), \\\\ \\end{array}", + "image_path": "98967784b36405fd38f4861b7af9cc72f2a7b8c81ca856fbb6290abb7d582a81.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 365, + 287, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 365, + 287, + 460 + ], + "spans": [ + { + "bbox": [ + 46, + 365, + 287, + 460 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 365, + 287, + 460 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 46, + 365, + 287, + 460 + ], + "type": "text", + "content": " is the weight coefficient, " + }, + { + "bbox": [ + 46, + 365, + 287, + 460 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 46, + 365, + 287, + 460 + ], + "type": "text", + "content": " is the given text prompt, " + }, + { + "bbox": [ + 46, + 365, + 287, + 460 + ], + "type": "inline_equation", + "content": "T'" + }, + { + "bbox": [ + 46, + 365, + 287, + 460 + ], + "type": "text", + "content": " is the automatically adjusted text prompt for the side view, " + }, + { + "bbox": [ + 46, + 365, + 287, + 460 + ], + "type": "inline_equation", + "content": "E_{T}" + }, + { + "bbox": [ + 46, + 365, + 287, + 460 + ], + "type": "text", + "content": " is the CLIP text encoder and " + }, + { + "bbox": [ + 46, + 365, + 287, + 460 + ], + "type": "inline_equation", + "content": "E_{I}" + }, + { + "bbox": [ + 46, + 365, + 287, + 460 + ], + "type": "text", + "content": " is the CLIP image encoder. Then " + }, + { + "bbox": [ + 46, + 365, + 287, + 460 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 46, + 365, + 287, + 460 + ], + "type": "text", + "content": " random pairs of facial parameters are selected as parents to produce the next generation through crossover and mutation. For the crossover step, child " + }, + { + "bbox": [ + 46, + 365, + 287, + 460 + ], + "type": "inline_equation", + "content": "\\pmb{x}^c" + }, + { + "bbox": [ + 46, + 365, + 287, + 460 + ], + "type": "text", + "content": " is generated by randomly choosing a value from parents " + }, + { + "bbox": [ + 46, + 365, + 287, + 460 + ], + "type": "inline_equation", + "content": "\\pmb{x}^f" + }, + { + "bbox": [ + 46, + 365, + 287, + 460 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 365, + 287, + 460 + ], + "type": "inline_equation", + "content": "\\pmb{x}^m" + }, + { + "bbox": [ + 46, + 365, + 287, + 460 + ], + "type": "text", + "content": " at each position " + }, + { + "bbox": [ + 46, + 365, + 287, + 460 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 46, + 365, + 287, + 460 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 97, + 467, + 287, + 483 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 467, + 287, + 483 + ], + "spans": [ + { + "bbox": [ + 97, + 467, + 287, + 483 + ], + "type": "interline_equation", + "content": "P \\left(x _ {i} ^ {c} = x _ {i} ^ {f}\\right) + P \\left(x _ {i} ^ {c} = x _ {i} ^ {m}\\right) = 1. \\tag {7}", + "image_path": "22620cce5e4e2e1e27154610f7c21440344d9674e1244a28b9a9c701064ebcf1.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 490, + 287, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 490, + 287, + 515 + ], + "spans": [ + { + "bbox": [ + 46, + 490, + 287, + 515 + ], + "type": "text", + "content": "For the mutation step, each child parameter " + }, + { + "bbox": [ + 46, + 490, + 287, + 515 + ], + "type": "inline_equation", + "content": "\\pmb{x}^c" + }, + { + "bbox": [ + 46, + 490, + 287, + 515 + ], + "type": "text", + "content": " is added randomly noise at multiple randomly selected position " + }, + { + "bbox": [ + 46, + 490, + 287, + 515 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 46, + 490, + 287, + 515 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 127, + 522, + 287, + 536 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 522, + 287, + 536 + ], + "spans": [ + { + "bbox": [ + 127, + 522, + 287, + 536 + ], + "type": "interline_equation", + "content": "x _ {i} ^ {c \\prime} = x _ {i} ^ {c} + n o i s e. \\tag {8}", + "image_path": "de15a8dd090d8dba6122cda1ff6871849ea3c42e141abc79b62215622f513cfe.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 544, + 287, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 544, + 287, + 605 + ], + "spans": [ + { + "bbox": [ + 46, + 544, + 287, + 605 + ], + "type": "text", + "content": "The newly generated children's parameters together with the better ones of the parents' parameters are selected as the next generation and get involved in the looping selection, crossover, and mutation. The evolution process terminates until the CLIP score is converged." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 611, + 180, + 624 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 611, + 180, + 624 + ], + "spans": [ + { + "bbox": [ + 47, + 611, + 180, + 624 + ], + "type": "text", + "content": "3.4. Implementation Details" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 629, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 629, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 629, + 288, + 714 + ], + "type": "text", + "content": "Network architecture. Our imitator consists of a positional encoder with four fully-connected layers and a generator with six transposed convolution layers. The generator is similar to DCGAN's generator [29], except that its Tanh activation of the output layer is removed to encourage a better convergence. The translator consists of eight Transformer encoder layers [45], each of them having eight" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 225, + 545, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 225, + 545, + 297 + ], + "spans": [ + { + "bbox": [ + 304, + 225, + 545, + 297 + ], + "type": "text", + "content": "multi-attention heads, and sixteen input tokens. The first token is the CLIP embeddings and the other tokens are learnable. We concatenate a prediction head with one single fully-connected layer after the Transformer. The fine-tuning head of the translator is a three layers perceptron with a bottleneck architecture." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 300, + 546, + 492 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 300, + 546, + 492 + ], + "spans": [ + { + "bbox": [ + 304, + 300, + 546, + 492 + ], + "type": "text", + "content": "Training details. The imitator and translator are both trained using SGD optimizer [4]. We set the momentum to 0.9 and set the weight decay to 5e-4. For imitator pretraining, the learning rate is set to 1e-3 and is reduced to " + }, + { + "bbox": [ + 304, + 300, + 546, + 492 + ], + "type": "inline_equation", + "content": "0.98\\mathrm{x}" + }, + { + "bbox": [ + 304, + 300, + 546, + 492 + ], + "type": "text", + "content": " per 30 epochs, and the training is stopped after 500 epochs. For translator pre-training, the learning rate is set to 1e-4 and is reduced to " + }, + { + "bbox": [ + 304, + 300, + 546, + 492 + ], + "type": "inline_equation", + "content": "0.1\\mathrm{x}" + }, + { + "bbox": [ + 304, + 300, + 546, + 492 + ], + "type": "text", + "content": " at the 600th epoch and the training is stopped at the 1000th epoch. We randomly sample 170K facial parameters and corresponding rendered images of in-game characters pairs to train the imitator and translator. For translator fine-tuning, the minimum and maximum learning rates are set to " + }, + { + "bbox": [ + 304, + 300, + 546, + 492 + ], + "type": "inline_equation", + "content": "\\eta_{min} = 0" + }, + { + "bbox": [ + 304, + 300, + 546, + 492 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 300, + 546, + 492 + ], + "type": "inline_equation", + "content": "\\eta_{max} = 1" + }, + { + "bbox": [ + 304, + 300, + 546, + 492 + ], + "type": "text", + "content": ", respectively, and the number of iterations between two warm starts " + }, + { + "bbox": [ + 304, + 300, + 546, + 492 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 304, + 300, + 546, + 492 + ], + "type": "text", + "content": " is set to 10 for the SGDR learning rate scheduler. Fine-tuning is stopped when the CLIP scores are no longer improved by more than 100 iterations." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 495, + 547, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 495, + 547, + 662 + ], + "spans": [ + { + "bbox": [ + 304, + 495, + 547, + 662 + ], + "type": "text", + "content": "Evolution search. The facial parameters predicted by the translator at the last 5 snapshot points are selected as initial values. Each set of facial parameters contains 269 continuous parameters and 62 discrete parameters, and the initialized values of these discrete parameters are set to zeros, which means these facial elements do not appear at the beginning. These 5 sets of facial parameters together with 5 more random ones are the first population for the evolution search. We found that updating continuous parameters together with discrete parameters in the evolution search achieves better results. The number of selected pairs of parents is set to 10. The weight coefficient " + }, + { + "bbox": [ + 304, + 495, + 547, + 662 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 304, + 495, + 547, + 662 + ], + "type": "text", + "content": " is set to 0.8. The crossover rate is set to 0.4 and the mutation rate is set to 0.05." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 666, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 666, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 666, + 547, + 715 + ], + "type": "text", + "content": "Prompt engineering. To enhance the text prompts, we follow the CLIP [28] and adapt prompt ensembling to the given text prompts. We preset 12 template sentences, such as “{} head rendered in a game engine”, and then fill the" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "21017" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 53, + 70, + 282, + 232 + ], + "blocks": [ + { + "bbox": [ + 53, + 70, + 282, + 232 + ], + "lines": [ + { + "bbox": [ + 53, + 70, + 282, + 232 + ], + "spans": [ + { + "bbox": [ + 53, + 70, + 282, + 232 + ], + "type": "image", + "image_path": "564530c9162f8746800af5bfff816310c8e5d314bee28305f2a7a0a5c89fedfc.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 239, + 288, + 283 + ], + "lines": [ + { + "bbox": [ + 46, + 239, + 288, + 283 + ], + "spans": [ + { + "bbox": [ + 46, + 239, + 288, + 283 + ], + "type": "text", + "content": "Figure 5. In-game fictional characters created by the proposed T2P given different text prompts. The results in the first row are created by the translator. The results in the second row are created by the evolution search." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 305, + 287, + 377 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 305, + 287, + 377 + ], + "spans": [ + { + "bbox": [ + 46, + 305, + 287, + 377 + ], + "type": "text", + "content": "“{}” with the input text prompt. We calculate the CLIP text embeddings of the filled sentences and take their mean value as the input text embeddings for the translator and evolution search. For evolution search, we further add “side view of” to the template sentences when calculating the CLIP score of the rendered images of the side view." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 388, + 241, + 402 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 388, + 241, + 402 + ], + "spans": [ + { + "bbox": [ + 47, + 388, + 241, + 402 + ], + "type": "text", + "content": "4. Experimental Results and Analysis" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 407, + 219, + 419 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 407, + 219, + 419 + ], + "spans": [ + { + "bbox": [ + 47, + 407, + 219, + 419 + ], + "type": "text", + "content": "4.1. Game Character Auto-Creation" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 426, + 287, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 426, + 287, + 605 + ], + "spans": [ + { + "bbox": [ + 46, + 426, + 287, + 605 + ], + "type": "text", + "content": "Fig. 4 shows the game characters created by T2P given the text prompt \"monkey\". The first five images show the in-game characters created by the translator at different fin-tuning iterations. The in-game character gradually grows from a normal human face to look like a monkey. The evolution search further searches discrete facial elements and also slightly improves continuous parameters. The last image of Fig. 4 shows the evolution search adds a beard to the character to make it more vivid. In this process, the proposed T2P is enabled to search both continuous and discrete facial parameters to optimize the in-game character to be consistent with the given text prompt and vivid. Fig. 5 shows more results of fictional character creation. Results in the first row are controlled by continuous parameters, and results in the second row are added discrete facial elements." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 606, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 606, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 606, + 287, + 713 + ], + "type": "text", + "content": "T2P can create characters with animal heads, as shown in Fig. 4, fictional characters, as shown in Fig. 5, and celebrities, as shown in Fig. 6, and characters conditioned on compactied text prompts, as shown in Fig. 7. These results show the powerful zero-shot game character auto-creation ability of the proposed T2P. By inputting only a text prompt, T2P can generate a vivid character, which is more flexible and time-saving for players or game developers compared to manual customization." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 72, + 484, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 72, + 484, + 85 + ], + "spans": [ + { + "bbox": [ + 306, + 72, + 484, + 85 + ], + "type": "text", + "content": "4.2. Comparison with Other Methods" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 90, + 545, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 90, + 545, + 198 + ], + "spans": [ + { + "bbox": [ + 304, + 90, + 545, + 198 + ], + "type": "text", + "content": "We compare the proposed method with AvatarCLIP [10] and DreamFusion [27]. The comparison includes objective evaluations and subjective evaluations. Since DreamFusion is not open source yet, we use the community implementation version of it, named Stable-Dreamfusion1. This version uses the open-source stable diffusion model [34] to drive the 3D object generation. We only compare the heads generated by these methods. This may introduce unfairness, thus we will never claim superiority besides the head part." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 198, + 545, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 198, + 545, + 319 + ], + "spans": [ + { + "bbox": [ + 304, + 198, + 545, + 319 + ], + "type": "text", + "content": "We feed 24 different text prompts into these two methods and our proposed T2P to generate characters respectively. Three examples are shown in Fig. 8. For objective evaluations, we compare the Inception Score [35], CLIP Ranking-1, and their speed (run on NVIDIA A30), as shown in Table 1. For each method, CLIP Ranking-1 calculates the ratio of its created characters ranked by CLIP as top-1 among the characters created by all three methods. The evaluation scores show the proposed T2P outperforms the other two methods and runs at a much faster speed." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 319, + 546, + 450 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 319, + 546, + 450 + ], + "spans": [ + { + "bbox": [ + 304, + 319, + 546, + 450 + ], + "type": "text", + "content": "For subjective evaluations, we invite 20 volunteers to evaluate the generation results in terms of realistic degree and consistency with the given text. They are asked to focus on the heads and faces of the characters and score them from 1 to 5, where 1 is the worst and 5 is the best. The evaluation results are shown in Table 1. Evaluation results show our method consistently outperforms the other two methods. We also notice that AvatarCLIP performs good at celebrities generation, Dreamfusion is good at fictional characters generation, while our method performs better at both types, just as shown in Fig. 8." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 458, + 406, + 470 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 458, + 406, + 470 + ], + "spans": [ + { + "bbox": [ + 306, + 458, + 406, + 470 + ], + "type": "text", + "content": "4.3. Ablation Studies" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 477, + 545, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 477, + 545, + 536 + ], + "spans": [ + { + "bbox": [ + 304, + 477, + 545, + 536 + ], + "type": "text", + "content": "We conduct ablation studies to analyze the importance of the proposed translator and evolution search. We run our framework with three settings, including 1) only evolution search 2) only translator and 3) both translator and evolution search. The details of these settings are as follows." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 537, + 545, + 694 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 304, + 537, + 545, + 585 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 537, + 545, + 585 + ], + "spans": [ + { + "bbox": [ + 304, + 537, + 545, + 585 + ], + "type": "text", + "content": "1) Evolution Search. The translator is removed from the framework and the evolution search is used to directly search both continuous and discrete facial parameters given text prompts." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 586, + 545, + 633 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 586, + 545, + 633 + ], + "spans": [ + { + "bbox": [ + 304, + 586, + 545, + 633 + ], + "type": "text", + "content": "2) Translator. The evolution search is abandoned, and the translator is fine-tuned to translate the given text prompts into continuous facial parameters and gives up controlling discrete parameters." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 634, + 545, + 681 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 634, + 545, + 681 + ], + "spans": [ + { + "bbox": [ + 304, + 634, + 545, + 681 + ], + "type": "text", + "content": "3) Full Implementation. Given text prompts, the translator is fine-tuned to predict continuous facial parameters. Then, the evolution search further searches discrete parameters and also improves the continuous ones." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 317, + 682, + 545, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 682, + 545, + 694 + ], + "spans": [ + { + "bbox": [ + 317, + 682, + 545, + 694 + ], + "type": "text", + "content": "Fig. 9 shows the CLIP scores increasing curves with the" + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 317, + 702, + 478, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 702, + 478, + 713 + ], + "spans": [ + { + "bbox": [ + 317, + 702, + 478, + 713 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 317, + 702, + 478, + 713 + ], + "type": "text", + "content": "https://github.com/ashawkey/stable-dreamfusion" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "21018" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 51, + 70, + 542, + 426 + ], + "blocks": [ + { + "bbox": [ + 51, + 70, + 542, + 426 + ], + "lines": [ + { + "bbox": [ + 51, + 70, + 542, + 426 + ], + "spans": [ + { + "bbox": [ + 51, + 70, + 542, + 426 + ], + "type": "image", + "image_path": "a8c0e18028430106ba80174d06514eb1b674748c6e98b8cc07b78c135af40bf5.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 63, + 433, + 529, + 445 + ], + "lines": [ + { + "bbox": [ + 63, + 433, + 529, + 445 + ], + "spans": [ + { + "bbox": [ + 63, + 433, + 529, + 445 + ], + "type": "text", + "content": "Figure 6. In-game celebrities created by the proposed T2P. This figure shows the front view and the side view for each character." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 55, + 455, + 538, + 529 + ], + "blocks": [ + { + "bbox": [ + 55, + 455, + 538, + 529 + ], + "lines": [ + { + "bbox": [ + 55, + 455, + 538, + 529 + ], + "spans": [ + { + "bbox": [ + 55, + 455, + 538, + 529 + ], + "type": "table", + "html": "
MethodObjective EvaluationsSubjective Evaluations
Inception Score ↑CLIP Ranking-1 ↑Time ↓Reality ↑Consistency with Text ↑
DreamFusion [27]1.60 ± 0.1216.67%254.50min1.85 ± 1.022.23 ± 1.39
AvatarCLIP [10]1.37 ± 0.3116.67%177.79min1.97 ± 0.532.14 ± 0.66
T2P (ours)1.65 ± 0.2166.66%359.47s3.87 ± 0.473.34 ± 0.53
", + "image_path": "4ec08a6b83ab5c4f28db921e6451c37ba276e3e079a94127994f157808117282.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 61, + 534, + 530, + 545 + ], + "lines": [ + { + "bbox": [ + 61, + 534, + 530, + 545 + ], + "spans": [ + { + "bbox": [ + 61, + 534, + 530, + 545 + ], + "type": "text", + "content": "Table 1. Comparison results of DreamFusion, AvatarCLIP, and the proposed T2P in terms of objective and subjective evaluations." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 46, + 567, + 288, + 699 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 567, + 288, + 699 + ], + "spans": [ + { + "bbox": [ + 46, + 567, + 288, + 699 + ], + "type": "text", + "content": "T2P running in 300 seconds. The means and standard deviations are calculated based on 100 times repeat running driven by one text prompt. As shown in the figure, the full implementation of our method always outperforms the other two. The translator is optimized rapidly to find optimal continuous parameters but can not further improve the CLIP scores because of lacking discrete facial elements. Compared with the translator, the evolution search is quite slow but can reach a higher CLIP score. The full implementation of T2P takes advantage of both translator and evolution search and achieves fast and better optimization." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 59, + 701, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 701, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 59, + 701, + 288, + 713 + ], + "type": "text", + "content": "We further test different settings of proposed T2P on 100" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 567, + 545, + 711 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 567, + 545, + 711 + ], + "spans": [ + { + "bbox": [ + 304, + 567, + 545, + 711 + ], + "type": "text", + "content": "different text prompts to evaluate their performance. Table 2 shows the results. The first row is the result of directly using the pre-trained translator to predict continuous facial parameters, and the second row is the result of fine-tuning translator to predict parameters. The fine-tuned one can achieve a higher CLIP score, which indicates the necessity of fine-tuning. The CLIP scores of only using the evolution search and the full version of T2P are shown in the third and fourth rows, respectively. The full version of T2P achieves the highest CLIP score because it can search both continuous and discrete facial parameters to create better in-game characters." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "21019" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 55, + 70, + 282, + 167 + ], + "blocks": [ + { + "bbox": [ + 55, + 70, + 282, + 167 + ], + "lines": [ + { + "bbox": [ + 55, + 70, + 282, + 167 + ], + "spans": [ + { + "bbox": [ + 55, + 70, + 282, + 167 + ], + "type": "image", + "image_path": "dec700e641a5a823ea484affc9a91737b1535d790683cc60be23c53e57b431f8.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 47, + 172, + 287, + 195 + ], + "lines": [ + { + "bbox": [ + 47, + 172, + 287, + 195 + ], + "spans": [ + { + "bbox": [ + 47, + 172, + 287, + 195 + ], + "type": "text", + "content": "Figure 7. In-game characters created by the proposed T2P given complicated prompts." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 55, + 205, + 282, + 434 + ], + "blocks": [ + { + "bbox": [ + 55, + 205, + 282, + 434 + ], + "lines": [ + { + "bbox": [ + 55, + 205, + 282, + 434 + ], + "spans": [ + { + "bbox": [ + 55, + 205, + 282, + 434 + ], + "type": "image", + "image_path": "28c5b6378145d59a4612625316da84833e3b1c7d6f7aeeff04d4c8ecb709b524.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 47, + 440, + 287, + 475 + ], + "lines": [ + { + "bbox": [ + 47, + 440, + 287, + 475 + ], + "spans": [ + { + "bbox": [ + 47, + 440, + 287, + 475 + ], + "type": "text", + "content": "Figure 8. Comparison of AvatarCLIP, DreamFusion, and the proposed T2P. Each column shows the 3D characters created by these methods given the same text prompt." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 52, + 488, + 282, + 619 + ], + "blocks": [ + { + "bbox": [ + 52, + 488, + 282, + 619 + ], + "lines": [ + { + "bbox": [ + 52, + 488, + 282, + 619 + ], + "spans": [ + { + "bbox": [ + 52, + 488, + 282, + 619 + ], + "type": "image", + "image_path": "881609561241c57c2c1c30fd174d2c335053c84df12dff839c84ce682b8e5e59.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 47, + 627, + 287, + 651 + ], + "lines": [ + { + "bbox": [ + 47, + 627, + 287, + 651 + ], + "spans": [ + { + "bbox": [ + 47, + 627, + 287, + 651 + ], + "type": "text", + "content": "Figure 9. Curves of CLIP scores increasing within 300s under three different module settings." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 670, + 211, + 685 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 670, + 211, + 685 + ], + "spans": [ + { + "bbox": [ + 47, + 670, + 211, + 685 + ], + "type": "text", + "content": "4.4. Facial Parameter Interpolation" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 689, + 287, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 287, + 715 + ], + "type": "text", + "content": "Since the generated characters are controlled by parameters with explicit physical meanings, users can further ad" + } + ] + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 326, + 70, + 526, + 143 + ], + "blocks": [ + { + "bbox": [ + 326, + 70, + 526, + 143 + ], + "lines": [ + { + "bbox": [ + 326, + 70, + 526, + 143 + ], + "spans": [ + { + "bbox": [ + 326, + 70, + 526, + 143 + ], + "type": "table", + "html": "
TranslatorEvolution SearchCLIP Score
fixed×27.29 ± 3.10
fine-tuned×34.85 ± 3.15
×35.31 ± 2.26
fine-tuned35.72 ± 2.70
", + "image_path": "7d44d56dfbba56c6d895f6cfad1234fd7b435a79f9030236a53e81d2e884a589.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 148, + 545, + 171 + ], + "lines": [ + { + "bbox": [ + 306, + 148, + 545, + 171 + ], + "spans": [ + { + "bbox": [ + 306, + 148, + 545, + 171 + ], + "type": "text", + "content": "Table 2. Results of ablation studies. Four versions of the proposed method are compared." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 312, + 182, + 541, + 303 + ], + "blocks": [ + { + "bbox": [ + 312, + 182, + 541, + 303 + ], + "lines": [ + { + "bbox": [ + 312, + 182, + 541, + 303 + ], + "spans": [ + { + "bbox": [ + 312, + 182, + 541, + 303 + ], + "type": "image", + "image_path": "974c37c5e18a83288778f5010de30eea88e2d41f91d49c30caaa5dd7948ee776.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 306, + 309, + 545, + 331 + ], + "lines": [ + { + "bbox": [ + 306, + 309, + 545, + 331 + ], + "spans": [ + { + "bbox": [ + 306, + 309, + 545, + 331 + ], + "type": "text", + "content": "Figure 10. Examples of the facial parameter interpolation of game characters." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 353, + 546, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 353, + 546, + 413 + ], + "spans": [ + { + "bbox": [ + 305, + 353, + 546, + 413 + ], + "type": "text", + "content": "just the outlook of the characters as they want. One can also interpolate different facial parameters to create a new character, as shown in Fig. 10. The first row shows the interpolation between the monkey and Thanos, in which the new facial parameters are calculated as follows," + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 345, + 422, + 545, + 435 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 345, + 422, + 545, + 435 + ], + "spans": [ + { + "bbox": [ + 345, + 422, + 545, + 435 + ], + "type": "interline_equation", + "content": "\\boldsymbol {x} _ {\\text {n e w}} = \\beta \\boldsymbol {x} _ {\\text {m o n k e y}} + (1 - \\beta) \\boldsymbol {x} _ {\\text {T h a n o s}}, \\tag {9}", + "image_path": "a9e51d41c4ebf6fdbe2b29dcc093965e4a157d0b7e5c731aa66e925d279fcabd.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 443, + 546, + 526 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 443, + 546, + 526 + ], + "spans": [ + { + "bbox": [ + 305, + 443, + 546, + 526 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 443, + 546, + 526 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 305, + 443, + 546, + 526 + ], + "type": "text", + "content": " is the interpolation coefficient decreasing from 1 to 0. The results in the second row of Fig. 10 show the interpolation between the monkey and Shrek. Besides, more than two characters can also be interpolated. We believe the benefits of the facial parameters controlling bone-driven game characters can give players a higher degree of freedom in character customization." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 538, + 379, + 550 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 538, + 379, + 550 + ], + "spans": [ + { + "bbox": [ + 306, + 538, + 379, + 550 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 558, + 546, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 558, + 546, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 558, + 546, + 713 + ], + "type": "text", + "content": "We propose a novel method called \"text-to-parameter translation\" to create bone-driven in-game characters given text prompts. Our method achieves high-quality zero-shot creation of in-game characters and can search both continuous and discrete facial parameters in a unified framework. The proposed text-driven framework is flexible and time-saving for users, and the created bone-driven characters with physically meaningful facial parameters are convenient for users to further edit as they want. Experimental results show our method achieves high-quality and vivid zero-shot text-driven game character auto-creation and outperforms other SOTA text-to-3D generation methods in terms of objective evaluations, speed, and subjective evaluations." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "21020" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 714 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 134 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 134 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 134 + ], + "type": "text", + "content": "[1] Volker Blanz and Thomas Vetter. A morphable model for the synthesis of 3d faces. In Proceedings of the 26th annual conference on Computer graphics and interactive techniques, pages 187-194, 1999. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 136, + 288, + 190 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 136, + 288, + 190 + ], + "spans": [ + { + "bbox": [ + 53, + 136, + 288, + 190 + ], + "type": "text", + "content": "[2] James Booth, Epameinondas Antonakos, Stylianos Ploumpis, George Trigeorgis, Yannis Panagakis, and Stefanos Zafeiriou. 3d face morphable models\" in-the-wild\". In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 48-57, 2017. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 191, + 288, + 235 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 191, + 288, + 235 + ], + "spans": [ + { + "bbox": [ + 53, + 191, + 288, + 235 + ], + "type": "text", + "content": "[3] Igor Borovikov, Karine Levonyan, Jon Rein, Pawel Wrotek, and Nitish Victor. Applied monocular reconstruction of parametric faces with domain engineering. arXiv preprint arXiv:2208.02935, 2022. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 236, + 287, + 269 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 236, + 287, + 269 + ], + "spans": [ + { + "bbox": [ + 53, + 236, + 287, + 269 + ], + "type": "text", + "content": "[4] Léon Bottou. Large-scale machine learning with stochastic gradient descent. In Proceedings of COMPSTAT'2010, pages 177-186. Springer, 2010. 5" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 270, + 287, + 302 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 270, + 287, + 302 + ], + "spans": [ + { + "bbox": [ + 53, + 270, + 287, + 302 + ], + "type": "text", + "content": "[5] Zehranaz Canfes, M Furkan Atasoy, Alara Dirik, and Pinar Yanardag. Text and image guided 3d avatar generation and manipulation. arXiv preprint arXiv:2202.06079, 2022. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 303, + 287, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 303, + 287, + 346 + ], + "spans": [ + { + "bbox": [ + 53, + 303, + 287, + 346 + ], + "type": "text", + "content": "[6] Chen Cao, Yanlin Weng, Shun Zhou, Yiying Tong, and Kun Zhou. Facewarehouse: A 3d facial expression database for visual computing. IEEE Transactions on Visualization and Computer Graphics, 20(3):413-425, 2013. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 347, + 287, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 347, + 287, + 392 + ], + "spans": [ + { + "bbox": [ + 53, + 347, + 287, + 392 + ], + "type": "text", + "content": "[7] Pengfei Dou, Shishir K Shah, and Ioannis A Kakadiaris. End-to-end 3d face reconstruction with deep neural networks. In proceedings of the IEEE conference on computer vision and pattern recognition, pages 5908-5917, 2017. 2, 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 392, + 287, + 435 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 392, + 287, + 435 + ], + "spans": [ + { + "bbox": [ + 53, + 392, + 287, + 435 + ], + "type": "text", + "content": "[8] Rinon Gal, Or Patashnik, Haggai Maron, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. Stylegan-nada: Clip-guided domain adaptation of image generators. ACM Transactions on Graphics (TOG), 41(4):1-13, 2022. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 436, + 287, + 501 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 436, + 287, + 501 + ], + "spans": [ + { + "bbox": [ + 53, + 436, + 287, + 501 + ], + "type": "text", + "content": "[9] Thomas Gering, Andreas Morel-Forster, Clemens Blumer, Bernhard Egger, Marcel Luthi, Sandro Schonborn, and Thomas Vetter. Morphable face models—an open framework. In 2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018), pages 75–82. IEEE, 2018. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 502, + 287, + 546 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 502, + 287, + 546 + ], + "spans": [ + { + "bbox": [ + 48, + 502, + 287, + 546 + ], + "type": "text", + "content": "[10] Fangzhou Hong, Mingyuan Zhang, Liang Pan, Zhongang Cai, Lei Yang, and Ziwei Liu. Avatarclip: Zero-shot text-driven generation and animation of 3d avatars. arXiv preprint arXiv:2205.08535, 2022. 2, 3, 6, 7" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 547, + 287, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 547, + 287, + 590 + ], + "spans": [ + { + "bbox": [ + 48, + 547, + 287, + 590 + ], + "type": "text", + "content": "[11] Gao Huang, Yixuan Li, Geoff Pleiss, Zhuang Liu, John E Hopcroft, and Kilian Q Weinberger. Snapshot ensembles: Train 1, get m for free. arXiv preprint arXiv:1704.00109, 2017. 4" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 592, + 287, + 658 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 592, + 287, + 658 + ], + "spans": [ + { + "bbox": [ + 48, + 592, + 287, + 658 + ], + "type": "text", + "content": "[12] Patrik Huber, Guosheng Hu, Rafael Tena, Pouria Mortazavian, P Koppen, William J Christmas, Matthias Ratsch, and Josef Kittler. A multiresolution 3d morphable face model and fitting framework. In Proceedings of the 11th international joint conference on computer vision, imaging and computer graphics theory and applications, 2016. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 658, + 287, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 658, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 48, + 658, + 287, + 714 + ], + "type": "text", + "content": "[13] Aaron S Jackson, Adrian Bulat, Vasileios Argyriou, and Georgios Tzimiropoulos. Large pose 3d face reconstruction from a single image via direct volumetric cnn regression. In Proceedings of the IEEE international conference on computer vision, pages 1031-1039, 2017. 3" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 714 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 127 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 127 + ], + "type": "text", + "content": "[14] Ajay Jain, Ben Mildenhall, Jonathan T Barron, Pieter Abbeel, and Ben Poole. Zero-shot text-guided object generation with dream fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 867-876, 2022. 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 129, + 545, + 173 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 129, + 545, + 173 + ], + "spans": [ + { + "bbox": [ + 307, + 129, + 545, + 173 + ], + "type": "text", + "content": "[15] Yuming Jiang, Ziqi Huang, Xingang Pan, Chen Change Loy, and Ziwei Liu. Talk-to-edit: Fine-grained facial editing via dialog. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 13799-13808, 2021. 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 175, + 545, + 218 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 175, + 545, + 218 + ], + "spans": [ + { + "bbox": [ + 307, + 175, + 545, + 218 + ], + "type": "text", + "content": "[16] Yuming Jiang, Shuai Yang, Haonan Qju, Wayne Wu, Chen Change Loy, and Ziwei Liu. Text2human: Text-driven controllable human image generation. ACM Transactions on Graphics (TOG), 41(4):1-11, 2022. 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 219, + 545, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 219, + 545, + 262 + ], + "spans": [ + { + "bbox": [ + 307, + 219, + 545, + 262 + ], + "type": "text", + "content": "[17] Nasir Khalid, Tianhao Xie, Eugene Belilovsky, and Tiberiu Popa. Clip-mesh: Generating textured meshes from text using pretrained image-text models. ACM Transactions on Graphics (TOG), Proc. SIGGRAPH Asia, 2022. 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 264, + 545, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 264, + 545, + 296 + ], + "spans": [ + { + "bbox": [ + 307, + 264, + 545, + 296 + ], + "type": "text", + "content": "[18] Gwanghyun Kim and Jong Chul Ye. Diffusionclip: Text-guided image manipulation using diffusion models. 2021. 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 297, + 545, + 331 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 297, + 545, + 331 + ], + "spans": [ + { + "bbox": [ + 307, + 297, + 545, + 331 + ], + "type": "text", + "content": "[19] Tianye Li, Timo Bolkart, Michael J Black, Hao Li, and Javier Romero. Learning a model of facial shape and expression from 4d scans. ACM Trans. Graph., 36(6):194-1, 2017. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 332, + 545, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 332, + 545, + 376 + ], + "spans": [ + { + "bbox": [ + 307, + 332, + 545, + 376 + ], + "type": "text", + "content": "[20] Jiangke Lin, Yi Yuan, and Zhengxia Zou. Meingame: Create a game character face from a single portrait. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 35, pages 311-319, 2021. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 377, + 545, + 421 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 377, + 545, + 421 + ], + "spans": [ + { + "bbox": [ + 307, + 377, + 545, + 421 + ], + "type": "text", + "content": "[21] Yuqiao Liu, Yanan Sun, Bing Xue, Mengjie Zhang, Gary G Yen, and Kay Chen Tan. A survey on evolutionary neural architecture search. IEEE transactions on neural networks and learning systems, 2021. 4" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 422, + 545, + 454 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 422, + 545, + 454 + ], + "spans": [ + { + "bbox": [ + 307, + 422, + 545, + 454 + ], + "type": "text", + "content": "[22] Ilya Loshchilov and Frank Hutter. Sgdr: Stochastic gradient descent with warm restarts. arXiv preprint arXiv:1608.03983, 2016. 4" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 456, + 545, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 456, + 545, + 510 + ], + "spans": [ + { + "bbox": [ + 307, + 456, + 545, + 510 + ], + "type": "text", + "content": "[23] Oscar Michel, Roi Bar-On, Richard Liu, Sagie Benaim, and Rana Hanocka. Text2mesh: Text-driven neural stylization for meshes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13492-13502, 2022. 3" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 512, + 545, + 566 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 512, + 545, + 566 + ], + "spans": [ + { + "bbox": [ + 307, + 512, + 545, + 566 + ], + "type": "text", + "content": "[24] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM, 65(1):99-106, 2021. 3" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 567, + 545, + 622 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 567, + 545, + 622 + ], + "spans": [ + { + "bbox": [ + 307, + 567, + 545, + 622 + ], + "type": "text", + "content": "[25] Or Patashnik, Zongze Wu, Eli Shechtman, Daniel Cohen-Or, and Dani Lischinski. Styleclip: Text-driven manipulation of stylegan imagery. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2085–2094, 2021. 3" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 624, + 545, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 624, + 545, + 678 + ], + "spans": [ + { + "bbox": [ + 307, + 624, + 545, + 678 + ], + "type": "text", + "content": "[26] Weilong Peng, Zhiyong Feng, Chao Xu, and Yong Su. Parametric t-spline face morphable model for detailed fitting in shape subspace. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6139-6147, 2017. 2" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 679, + 545, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 679, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 307, + 679, + 545, + 714 + ], + "type": "text", + "content": "[27] Ben Poole, Ajay Jain, Jonathan T Barron, and Ben Mildenhall. Dreamfusion: Text-to-3d using 2d diffusion. arXiv preprint arXiv:2209.14988, 2022. 3, 6, 7" + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "21021" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 714 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 139 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 139 + ], + "type": "text", + "content": "[28] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International Conference on Machine Learning, pages 8748-8763. PMLR, 2021. 5" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 140, + 288, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 140, + 288, + 184 + ], + "spans": [ + { + "bbox": [ + 48, + 140, + 288, + 184 + ], + "type": "text", + "content": "[29] Alec Radford, Luke Metz, and Soumith Chintala. Unsupervised representation learning with deep convolutional generative adversarial networks. arXiv preprint arXiv:1511.06434, 2015. 3, 5" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 185, + 287, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 185, + 287, + 228 + ], + "spans": [ + { + "bbox": [ + 48, + 185, + 287, + 228 + ], + "type": "text", + "content": "[30] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 2022. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 230, + 287, + 284 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 230, + 287, + 284 + ], + "spans": [ + { + "bbox": [ + 48, + 230, + 287, + 284 + ], + "type": "text", + "content": "[31] Aditya Ramesh, Mikhail Pavlov, Gabriel Goh, Scott Gray, Chelsea Voss, Alec Radford, Mark Chen, and Ilya Sutskever. Zero-shot text-to-image generation. In International Conference on Machine Learning, pages 8821-8831. PMLR, 2021. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 286, + 287, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 286, + 287, + 330 + ], + "spans": [ + { + "bbox": [ + 48, + 286, + 287, + 330 + ], + "type": "text", + "content": "[32] Scott Reed, Zeynep Akata, Xinchen Yan, Lajanugen Logeswaran, Bernt Schiele, and Honglak Lee. Generative adversarial text to image synthesis. In International conference on machine learning, pages 1060-1069. PMLR, 2016. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 331, + 287, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 331, + 287, + 376 + ], + "spans": [ + { + "bbox": [ + 48, + 331, + 287, + 376 + ], + "type": "text", + "content": "[33] Elad Richardson, Matan Sela, Roy Or-El, and Ron Kimmel. Learning detailed face reconstruction from a single image. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1259–1268, 2017. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 376, + 287, + 432 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 376, + 287, + 432 + ], + "spans": [ + { + "bbox": [ + 48, + 376, + 287, + 432 + ], + "type": "text", + "content": "[34] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10684-10695, 2022. 6" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 434, + 287, + 476 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 434, + 287, + 476 + ], + "spans": [ + { + "bbox": [ + 48, + 434, + 287, + 476 + ], + "type": "text", + "content": "[35] Tim Salimans, Ian Goodfellow, Wojciech Zaremba, Vicki Cheung, Alec Radford, and Xi Chen. Improved techniques for training gans. Advances in neural information processing systems, 29, 2016. 6" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 478, + 287, + 520 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 478, + 287, + 520 + ], + "spans": [ + { + "bbox": [ + 48, + 478, + 287, + 520 + ], + "type": "text", + "content": "[36] Tim Salimans, Jonathan Ho, Xi Chen, Szymon Sidor, and Ilya Sutskever. Evolution strategies as a scalable alternative to reinforcement learning. arXiv preprint arXiv:1703.03864, 2017. 4" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 522, + 287, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 522, + 287, + 588 + ], + "spans": [ + { + "bbox": [ + 48, + 522, + 287, + 588 + ], + "type": "text", + "content": "[37] Aditya Sanghi, Hang Chu, Joseph G Lambourne, Ye Wang, Chin-Yi Cheng, Marco Fumero, and Kamal Rahimi Malekshan. Clip-forge: Towards zero-shot text-to-shape generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18603-18613, 2022. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 590, + 287, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 590, + 287, + 645 + ], + "spans": [ + { + "bbox": [ + 48, + 590, + 287, + 645 + ], + "type": "text", + "content": "[38] Tianyang Shi, Yi Yuan, Changjie Fan, Zhengxia Zou, Zhenwei Shi, and Yong Liu. Face-to-parameter translation for game character auto-creation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 161–170, 2019. 2, 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 647, + 287, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 647, + 287, + 689 + ], + "spans": [ + { + "bbox": [ + 48, + 647, + 287, + 689 + ], + "type": "text", + "content": "[39] Tianyang Shi, Zhengxia Zou, Zhenwei Shi, and Yi Yuan. Neural rendering for game character auto-creation. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2020. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 691, + 287, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 691, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 48, + 691, + 287, + 714 + ], + "type": "text", + "content": "[40] Tianyang Shi, Zhengxia Zou, Xinhui Song, Zheng Song, Changjian Gu, Changjie Fan, and Yi Yuan. Neutral face" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 712 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 327, + 73, + 545, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 327, + 73, + 545, + 106 + ], + "spans": [ + { + "bbox": [ + 327, + 73, + 545, + 106 + ], + "type": "text", + "content": "game character auto-creation via pokerface-gan. In Proceedings of the 28th ACM International Conference on Multimedia, pages 3201–3209, 2020. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 106, + 545, + 160 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 106, + 545, + 160 + ], + "spans": [ + { + "bbox": [ + 307, + 106, + 545, + 160 + ], + "type": "text", + "content": "[41] Tianyang Shi, Zhengxia Zuo, Yi Yuan, and Changjie Fan. Fast and robust face-to-parameter translation for game character auto-creation. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 34, pages 1733–1740, 2020. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 162, + 545, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 162, + 545, + 228 + ], + "spans": [ + { + "bbox": [ + 307, + 162, + 545, + 228 + ], + "type": "text", + "content": "[42] Ayush Tewari, Michael Zollhofer, Hyeongwoo Kim, Pablo Garrido, Florian Bernard, Patrick Perez, and Christian Theobalt. Mofa: Model-based deep convolutional face autoencoder for unsupervised monocular reconstruction. In Proceedings of the IEEE International Conference on Computer Vision Workshops, pages 1274-1283, 2017. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 228, + 545, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 228, + 545, + 282 + ], + "spans": [ + { + "bbox": [ + 307, + 228, + 545, + 282 + ], + "type": "text", + "content": "[43] Anh Tuan Tran, Tal Hassner, Iacopo Masi, Eran Paz, Yuval Nirkin, and Gerard Medioni. Extreme 3d face reconstruction: Seeing through occlusions. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3935-3944, 2018. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 283, + 545, + 338 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 283, + 545, + 338 + ], + "spans": [ + { + "bbox": [ + 307, + 283, + 545, + 338 + ], + "type": "text", + "content": "[44] Anh Tuan Tran, Tal Hassner, Iacopo Masi, and Gerard Medioni. Regressing robust and discriminative 3d morphable models with a very deep neural network. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5163-5172, 2017. 2, 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 338, + 545, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 338, + 545, + 381 + ], + "spans": [ + { + "bbox": [ + 307, + 338, + 545, + 381 + ], + "type": "text", + "content": "[45] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 30, 2017. 5" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 383, + 545, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 383, + 545, + 437 + ], + "spans": [ + { + "bbox": [ + 307, + 383, + 545, + 437 + ], + "type": "text", + "content": "[46] Can Wang, Menglei Chai, Mingming He, Dongdong Chen, and Jing Liao. Clip-nerf: Text-and-image driven manipulation of neural radiance fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3835-3844, 2022. 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 438, + 545, + 481 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 438, + 545, + 481 + ], + "spans": [ + { + "bbox": [ + 307, + 438, + 545, + 481 + ], + "type": "text", + "content": "[47] Peng Wang, Lingjie Liu, Yuan Liu, Christian Theobalt, Taku Komura, and Wenping Wang. Neus: Learning neural implicit surfaces by volume rendering for multi-view reconstruction. arXiv preprint arXiv:2106.10689, 2021. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 482, + 545, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 482, + 545, + 536 + ], + "spans": [ + { + "bbox": [ + 307, + 482, + 545, + 536 + ], + "type": "text", + "content": "[48] Tianyi Wei, Dongdong Chen, Wenbo Zhou, Jing Liao, Zhentao Tan, Lu Yuan, Weiming Zhang, and Nenghai Yu. Hairclip: Design your hair by text and reference image. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18072-18081, 2022. 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 537, + 545, + 580 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 537, + 545, + 580 + ], + "spans": [ + { + "bbox": [ + 307, + 537, + 545, + 580 + ], + "type": "text", + "content": "[49] Lior Wolf, Yaniv Taigman, and Adam Polyak. Unsupervised creation of parameterized avatars. In Proceedings of the IEEE International Conference on Computer Vision, pages 1530-1538, 2017. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 582, + 545, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 582, + 545, + 635 + ], + "spans": [ + { + "bbox": [ + 307, + 582, + 545, + 635 + ], + "type": "text", + "content": "[50] Weihao Xia, Yujiu Yang, Jing-Hao Xue, and Baoyuan Wu. Tedigan: Text-guided diverse face image generation and manipulation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2256-2265, 2021. 3" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 636, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 636, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 307, + 636, + 545, + 712 + ], + "type": "text", + "content": "[51] Zipeng Xu, Tianwei Lin, Hao Tang, Fu Li, Dongliang He, Nicu Sebe, Radu Timofte, Luc Van Gool, and Errui Ding. Predict, prevent, and evaluate: Disentangled text-driven image manipulation empowered by pre-trained vision-language model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18229-18238, 2022. 3" + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "21022" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 171 + ], + "type": "list", + "angle": 0, + "index": 2, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "type": "text", + "content": "[52] Yingchen Yu, Fangneng Zhan, Rongliang Wu, Jiahui Zhang, Shijian Lu, Miaomiao Cui, Xuansong Xie, Xian-Sheng Hua, and Chunyan Miao. Towards counterfactual image manipulation via clip. In Proceedings of the 30th ACM International Conference on Multimedia, pages 3637-3645, 2022. 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 129, + 287, + 171 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 129, + 287, + 171 + ], + "spans": [ + { + "bbox": [ + 48, + 129, + 287, + 171 + ], + "type": "text", + "content": "[53] Kaiyang Zhou, Jingkang Yang, Chen Change Loy, and Ziwei Liu. Learning to prompt for vision-language models. International Journal of Computer Vision, 130(9):2337-2348, 2022. 4" + } + ] + } + ], + "index": 1 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "text", + "content": "21023" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/expOSE_ Accurate Initialization-Free Projective Factorization Using Exponential Regularization/6f578c7a-ccab-49cb-bc75-04e328397fea_content_list.json b/2023/expOSE_ Accurate Initialization-Free Projective Factorization Using Exponential Regularization/6f578c7a-ccab-49cb-bc75-04e328397fea_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..b77067f913b7d5055409ed018b6392527b6d8664 --- /dev/null +++ b/2023/expOSE_ Accurate Initialization-Free Projective Factorization Using Exponential Regularization/6f578c7a-ccab-49cb-bc75-04e328397fea_content_list.json @@ -0,0 +1,1811 @@ +[ + { + "type": "text", + "text": "expOSE: Accurate Initialization-Free Projective Factorization using Exponential Regularization", + "text_level": 1, + "bbox": [ + 78, + 130, + 888, + 174 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Jose Pedro Iglesias1, Amanda Nilsson2, Carl Olsson1,2", + "bbox": [ + 266, + 202, + 696, + 220 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1Chalmers University of Technology, Sweden", + "bbox": [ + 303, + 227, + 665, + 246 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{2}$ Lund University, Sweden", + "bbox": [ + 380, + 246, + 589, + 263 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 297, + 312, + 314 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Bundle adjustment is a key component in practically all available Structure from Motion systems. While it is crucial for achieving accurate reconstruction, convergence to the right solution hinges on good initialization. The recently introduced factorization-based $pOSE$ methods formulate a surrogate for the bundle adjustment error without reliance on good initialization. In this paper, we show that $pOSE$ has an undesirable penalization of large depths. To address this we propose $expOSE$ which has an exponential regularization that is negligible for positive depths. To achieve efficient inference we use a quadratic approximation that allows an iterative solution with VarPro. Furthermore, we extend the method with radial distortion robustness by decomposing the Object Space Error into radial and tangential components. Experimental results confirm that the proposed method is robust to initialization and improves reconstruction quality compared to state-of-the-art methods even without bundle adjustment refinement.", + "bbox": [ + 73, + 330, + 473, + 603 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 617, + 209, + 633 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Factorization is a long-established method in Structure from Motion (SfM). It originates from [38] by Tomasi and Kanade showing how, under the orthographic camera model, structure and motion can be computed simultaneously from an image sequence using singular value decomposition (SVD). The method was later reformulated for affine cameras, including weak perspective projection [32]. Strum and Triggs [36] further extended factorization to projective cameras by accounting for projective depths.", + "bbox": [ + 73, + 643, + 468, + 779 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "One appeal of these factorization algorithms is they can yield a closed-form solution by using the SVD. It is however only possible to use the SVD if every considered scene", + "bbox": [ + 75, + 780, + 468, + 825 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/e5965866ff80e2755348a5a545feec6711b8d143417fa7dc4355807b775ad01b.jpg", + "image_caption": [ + "Figure 1. (Left) Examples of two of the images in the Fountain sequence. (Right) Reconstruction obtained with expOSE (top) and pOSE (bottom) for 3 different values of $\\eta$ . Our method achieves the same convergence rate as pOSE while having a higher reconstruction quality and being less dependent on the choice of $\\eta$ ." + ], + "image_footnote": [], + "bbox": [ + 506, + 301, + 640, + 452 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/878e459cf6478bb9da95e7f536632ff8ac1e3886a3f05018ab6b5765ba1809d3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 651, + 300, + 887, + 453 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "point is visible throughout the whole image sequence. In cases of missing data, the SVD can be replaced with iterative methods. Simple splitting methods [4,8,22] are able to regularize singular values when computing a proximal operator, but can give rather erroneous solutions because of a low convergence rate close to the optimum. [5, 8] give an idea of convex formulation using the nuclear norm, but are usually too weak for SfM in the presence of noise [19, 30]. The papers [1, 9, 10, 31] suggest different ways to assure that direct bilinear optimization only has a global minimum. However, SfM problems with local minima do not fulfill their required conditions [3].", + "bbox": [ + 496, + 566, + 892, + 747 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "It was recently shown by Hong et al. [14-17] that direct bilinear estimation of structure and motion can be made robust to local minima in combination with the Variable Projection (VarPro) method. In [15] the objective is exchanged for the Pseudo Object Space Error (pOSE) which is a tradeoff between the object space error and a quadratic regularization term. This was later extended to a radial distortion invariant version RpOSE, presented in [18]. With their bilinear factorization structure and a large basin of convergence when using VarPro, these pOSE models tend to find", + "bbox": [ + 496, + 750, + 893, + 901 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 807, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "1This work has been funded by the Swedish Research Council (grant no. 2018-05375), the Swedish Foundation for Strategic Research project, Semantic Mapping and Visual Navigation for Smart Robots (grant no. RIT15-0038), and the Wallenberg AI, Autonomous Systems and Software Program (WASP).", + "bbox": [ + 75, + 838, + 468, + 900 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "8959", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "a global minimum independently of the initialization. Additionally, both pOSE and RpOSE have in [18] been shown to be local approximations of the reprojection error, enabling iterative refinement to the maximum likelihood solution.", + "bbox": [ + 75, + 90, + 468, + 150 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this paper, we show that the regularization term in the pOSE formulation overly penalizes large positive depths and can thereby limit the range of feasible depths too much to achieve satisfactory solutions. We instead propose regularization with an exponential penalty that is negligible for positive depths. To achieve efficient inference we use a quadratic approximation of the exponential term suitable for optimization with VarPro. Moreover, we extend the method with radial distortion robustness by decomposing the OSE into radial and tangent components.", + "bbox": [ + 75, + 151, + 470, + 301 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In short, the main contributions of this paper are:", + "bbox": [ + 96, + 303, + 419, + 316 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We investigate the pOSE models' undesirable penalization of large depths and propose expOSE which has negligible regularization of positive depths;", + "- We formulate a quadratic approximation of the exponential regularization term in expOSE to make it suitable for optimization with VarPro and show that, with random initialization, the model achieves convergence rates similar to pOSE with significantly higher reconstruction quality;", + "- We extend expOSE with radial distortion robustness by decomposing the Object Space Error (OSE) into radial and tangent components and propose an SfM pipeline that is able to obtain a complete and accurate Euclidean reconstruction from uncalibrated cameras." + ], + "bbox": [ + 94, + 323, + 468, + 550 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Reconstruction Objectives", + "text_level": 1, + "bbox": [ + 76, + 561, + 321, + 579 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this section, we illustrate the problems with direct optimization of reprojection error and discuss how this is addressed using the pOSE model [15]. We then present our exponential regularization and show how this addresses the limitations of the pOSE model.", + "bbox": [ + 75, + 587, + 468, + 662 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1. Reprojection Error and Cheirality", + "text_level": 1, + "bbox": [ + 76, + 670, + 377, + 686 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Bundle adjustment [12, 39] is the standard routine when it comes to solving the Structure-from-Motion problem. Given measured point projections $m_{ij}$ the goal is to attempt to minimize", + "bbox": [ + 75, + 694, + 468, + 751 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\n\\sum_ {i j} \\left\\| \\boldsymbol {m} _ {i j} - \\frac {\\boldsymbol {x} _ {i j}}{z _ {i j}} \\right\\| ^ {2}, \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 202, + 750, + 468, + 790 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "where $\\left[ \\begin{array}{c} \\boldsymbol{x}_{ij} \\\\ z_{ij} \\end{array} \\right] = P_i U_j$ . Here $\\boldsymbol{x}_{ij}$ is a 2 vector, $z_{ij}$ is a number, referred to as the projective depth, $P_i$ is a $3 \\times 4$ camera matrix and $U_i$ is a $4 \\times 1$ vector containing homogeneous coordinates of the projected 3D point. Under the assumption of Gaussian image noise, this gives the maximal likelihood estimate of the camera matrices and 3D points [12].", + "bbox": [ + 75, + 794, + 468, + 900 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/9a698cec3c2119d3eb24916444caab2a3ba0f6b7a79f0c218b5c8f41864c8c0b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 540, + 88, + 733, + 199 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/a19912a48bd7222597cb5fab164211f60ce7d2e133bb513f3a479e5f51d9d917.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 779, + 108, + 843, + 195 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/b36e17128f4398513525dca37cb88b5fe33e4e82687f6e8c67a6563f95c4e1ba.jpg", + "image_caption": [ + "Figure 2. Left: Objective values of the reprojection error (blue), the pOSE error $(\\eta = 0.1, \\text{red})$ and our proposed formulation $(\\eta = 0.1, \\text{yellow})$ on the lines $(1 - t)(0.5, 0, -1) + t(0.5, 0, 1)$ (top) and $(1 - t)(-0.5, 0, -1) + t(0.5, 0, 1)$ (bottom) when $m = (0.5, 0)$ . Note that the reprojection error is undefined at $z = 0$ since this corresponds to the camera center. Right: Corresponding camera and sampling line." + ], + "image_footnote": [], + "bbox": [ + 540, + 199, + 733, + 306 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/0abe454e3086a43e7f3d82666ed2c9171206be9e398f081604647c1851011309.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 776, + 215, + 844, + 304 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "It is well known that optimizing (1) is difficult and requires good initialization to achieve convergence to the right solution. One of the difficulties is the division of $\\boldsymbol{x}_{ij}$ by $z_{ij}$ . This creates a barrier of objective values that goes to infinity and needs to be traversed when for example moving from $(\\boldsymbol{x}_{ij}, -z_{ij})$ to $(\\boldsymbol{x}_{ij}, z_{ij})$ . The blue curve of Figure 2 (top) shows a 2D example of this barrier. Here we used $m = (0.5, 0)$ and sampled the function $\\left( m - \\frac{x}{z} \\right)^2$ on the line segment $(\\boldsymbol{x}, z) = (1 - t)(0.5, 0, -1) + t(0.5, 0, 1)$ . The best value over this line is at $t = 1$ which gives $(\\boldsymbol{x}, z) = (0.5, 0, 1)$ . For comparison, we also plot the corresponding values of the pOSE model [15] (red) and the proposed formulation that we will describe below (yellow). In a calibrated setting the interpretation of $z_{ij}$ is the depth [12] of the observed 3D point. Hence, in practical cases, where observed points are in front of the camera, there is usually no reason to allow solutions with negative $z_{ij}$ . In the uncalibrated case $z_{ij}$ is referred to as a projective depth. It can be shown that when the data is noise free (with sufficiently many visible projections) there is always a solution where the projective depths are all positive [26] if the observed points are in front of the camera. Moreover, any other solution is projectively equivalent to this one, meaning that there is a projective 3D transformation that makes the projective depths positive [25, 26].", + "bbox": [ + 496, + 441, + 890, + 821 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.2. The pOSE Model", + "text_level": 1, + "bbox": [ + 500, + 830, + 669, + 847 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In view of the above, constraining the problem to positive depths is no practical restriction. Still finding a good starting solution where all depths are positive is not a trivial", + "bbox": [ + 498, + 854, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "8960", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "issue. In [15] the objective (1) is exchanged for the object space error (OSE)", + "bbox": [ + 75, + 90, + 468, + 121 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\ell_ {\\text {O S E}} \\left(\\boldsymbol {x} _ {i j}, z _ {i j}\\right) = \\left\\| z _ {i j} \\boldsymbol {m} _ {i j} - \\boldsymbol {x} _ {i j} \\right\\| ^ {2}. \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 153, + 126, + 468, + 143 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Here, the scale-invariant residual of (1) has been replaced with a linear error allowing points to switch from negative to positive projective depths. It can be shown [18] that the OSE residual $z_{i} \\pmb{m}_{ij} - \\pmb{x}_{ij}$ is the first order Taylor expansion of the projective residual $\\pmb{m}_{ij} - \\frac{\\pmb{x}_{ij}}{z_{ij}}$ around $(\\pmb{x}_{ij}, z_{ij}) = (\\pmb{m}_{ij}, 1)$ , and it is therefore in some sense the closest linear approximation that we can find. On the downside, the OSE is clearly minimized by the trivial solution $\\pmb{x}_{ij} = 0$ , $z_{ij} = 0$ for all $i, j$ . Therefore [15] adds the quadratic regularization", + "bbox": [ + 75, + 150, + 470, + 289 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\ell_ {\\text {a f f}} \\left(\\boldsymbol {x} _ {i j}\\right) = \\left\\| \\boldsymbol {x} _ {i j} - \\boldsymbol {m} _ {i j} \\right\\| ^ {2}, \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 181, + 301, + 468, + 319 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "which penalizes the trivial zero solution. Note that (3) and (2) both vanish when $(\\pmb{x}_{ij},z_{ij}) = (\\pmb{m}_{ij},1)$ . The proposed pOSE objective", + "bbox": [ + 75, + 323, + 468, + 368 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\sum_ {i j} \\left((1 - \\eta) \\ell_ {\\text {O S E}} \\left(\\boldsymbol {x} _ {i j}, z _ {i j}\\right) + \\eta \\ell_ {\\text {a f f}} \\left(\\boldsymbol {x} _ {i j}\\right)\\right), \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 133, + 375, + 468, + 407 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $0 < \\eta < 1$ , therefore allows arbitrary starting solutions but penalizes projective depths that deviate significantly from 1. The red curve of Figure 2 (top) shows pOSE values (with $\\eta = 0.1$ ) over the line $(1 - t)(0.5, 0, -1) + t(0.5, 0, 1)$ . In contrast to the reprojection error, the pOSE formulation does not give any barrier at $z = 0$ . It is experimentally shown in [15] that when optimized using VarPro [16] this leads to a method that converges to the right solution in the vast majority of cases starting from random initialization (including starting points with negative depths). Note that if we column-stack the camera matrices $P_{i}$ into a matrix $P$ with 4 columns, and similarly row-stack the 3D points into a matrix $U$ with 4 rows, the resulting product $X = PU$ is a matrix of rank 4. We can therefore formulate the pOSE objective as a low-rank recovery problem", + "bbox": [ + 75, + 411, + 468, + 638 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\min _ {\\operatorname {r a n k} (X) = 4} \\| \\mathcal {A} (X) - b \\| ^ {2}, \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 189, + 643, + 468, + 667 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\mathcal{A}$ is a linear operator. It is well known from compressed sensing that such formulations can often be solved optimally [6, 9-11, 19, 28, 30, 33]. The optimization problem becomes particularly easy for large values of $\\eta$ . On the other hand, the regularization term also introduces an undesirable penalty for large (positive) depths which may constrain the range of feasible depths too much to achieve satisfactory solutions. The bottom images in Figure 2 show the same evaluation as the top ones but over the line $(1 - t)(-0.5, 0, -1) + t(0.5, 0, 1)$ . All of the points on this line give 0 reprojection error (except at the camera center $(0, 0, 0)$ for which the projection is undefined). The pOSE formulation (red curve) clearly penalizes solutions of small or negative projective depth but its undesirable growth for large positive values is also visible.", + "bbox": [ + 75, + 672, + 470, + 900 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.3. Exponential Regularization", + "text_level": 1, + "bbox": [ + 500, + 90, + 746, + 107 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this paper, we instead propose to regularize the depth using an exponential function (yellow curves in Figure 2). Specifically, we replace the affine term (3) with", + "bbox": [ + 498, + 113, + 890, + 159 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\ell_ {\\exp} \\left(\\boldsymbol {x} _ {i j}, z _ {i j}\\right) = e ^ {- \\left(\\frac {\\boldsymbol {m} _ {i j} \\boldsymbol {x} _ {i j} + z _ {i j}}{\\sqrt {\\| \\boldsymbol {m} _ {i j} \\| ^ {2} + 1}}\\right)}. \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 578, + 167, + 890, + 205 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The term $\\frac{m_{ij}x_{ij} + z_{ij}}{\\sqrt{\\|m_{ij}\\|^2 + 1}}$ is the length (with sign) of the projection of the vector $(\\pmb{x}_{ij},z_{ij})$ onto $(m_{ij},1)$ . Note that its sign is negative when the angle between $(\\pmb{x}_{ij},z_{ij})$ and $(m_{ij},1)$ is larger than $90^{\\circ}$ . The exponential function will penalize such values heavily. Still, the penalty is finite for all values making it is possible to use start the optimization from anywhere. On the other hand for positive growing values the exponential function tends to 0 and therefore does not restrict the feasible projective depths as the affine term (3) does.", + "bbox": [ + 498, + 214, + 890, + 372 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The proposed expOSE objective is then", + "bbox": [ + 517, + 373, + 782, + 388 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\ell_ {\\text {e x p O S E}} = \\sum_ {i j} (1 - \\eta) \\ell_ {\\text {O S E}} \\left(\\boldsymbol {x} _ {i j}, z _ {i j}\\right) + \\eta \\ell_ {\\text {e x p}} \\left(\\boldsymbol {x} _ {i j}, z _ {i j}\\right). \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 508, + 398, + 890, + 431 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "At first glance it may seem as if replacing (3) with (6) will yield an ill-posed problem since large depths are hardly penalized by (6). Adding a small penalty for these values to ensure a well-posed problem may therefore be warranted. Note, however, that unless there is an exact solution (with zero reprojection errors) the OSE term is not scale invariant but has a weak shrinking bias. In practice, we empirically observe that this bias is generally enough for our proposed algorithm to converge well from random starting solutions.", + "bbox": [ + 496, + 441, + 890, + 577 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We conclude this section by noting that our proposed method is much less sensitive to parameter selection than the original pOSE model [15]. Since the shrinking bias of the OSE term is relatively weak, an increased regularization cost, due to a change of parameters, can often be compensated for by changing the scale of the reconstruction. In contrast, the choice of $\\eta$ in the original pOSE model is crucial. Figure 1 shows how $\\eta$ affects the reconstruction (more details about this figure are provided in Section 3.2).", + "bbox": [ + 496, + 578, + 890, + 714 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Optimization with VarPro", + "text_level": 1, + "bbox": [ + 500, + 727, + 743, + 743 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "One of the main benefits of the pOSE formulation [15] is that it is quadratic in the elements of $X$ . Therefore, given values for camera matrices $P$ the optimal 3D points $U^{*}(P)$ can be computed in closed form using a pseudo inverse. The VarPro method [16,27,35,41] solves the reduced problem", + "bbox": [ + 498, + 752, + 890, + 828 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\min _ {P} \\| \\mathcal {A} (P U ^ {*} (P)) - b \\| ^ {2}, \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 604, + 835, + 890, + 861 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "using the Levenberg-Marquardt method [12, 39]. In contrast to standard Gauss-Newton type methods that optimize", + "bbox": [ + 498, + 869, + 890, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "8961", + "bbox": [ + 482, + 944, + 513, + 955 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/0656bc076b95bd79594e43d9c2efdd1ac83972d8ac70d8edd4856df5f36cc0cc.jpg", + "image_caption": [ + "Figure 3. The exponential function and its Taylor approximation." + ], + "image_footnote": [], + "bbox": [ + 153, + 89, + 392, + 224 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "locally over both $U$ and $P$ , the main benefit of the elimination of $U$ is that dampening only needs to be applied to $P$ . This has been shown empirically to greatly improve convergence [14, 16]. The intuition is that small changes in $P$ will sometimes result in large changes in $U$ , but this is prevented by a dampening term which causes the algorithm to stall.", + "bbox": [ + 75, + 276, + 467, + 366 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Since the exponential regularization term is not quadratic VarPro is not directly applicable to our formulation. We, therefore, employ an iterative approach that locally approximates (6) with a quadratic function. Consider the 2nd order Taylor expansion of $e^{-\\boldsymbol{a}^T \\boldsymbol{y}}$ at a point $\\bar{\\boldsymbol{y}}$ given by", + "bbox": [ + 76, + 368, + 468, + 445 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\ne ^ {- \\boldsymbol {a} ^ {T} \\boldsymbol {y}} \\approx e ^ {- \\boldsymbol {a} ^ {T} \\bar {\\boldsymbol {y}}} \\left(1 - \\boldsymbol {a} ^ {T} (\\boldsymbol {y} - \\bar {\\boldsymbol {y}}) + \\frac {1}{2} \\left(\\boldsymbol {a} ^ {T} (\\boldsymbol {y} - \\bar {\\boldsymbol {y}})\\right) ^ {2}\\right). \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 83, + 455, + 468, + 501 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Completing squares gives the expression", + "bbox": [ + 76, + 501, + 346, + 516 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\ne ^ {- \\boldsymbol {a} ^ {T} \\boldsymbol {y}} \\approx \\frac {e ^ {- \\boldsymbol {a} ^ {T} \\bar {\\boldsymbol {y}}}}{2} \\left(\\boldsymbol {a} ^ {T} (\\boldsymbol {y} - \\bar {\\boldsymbol {y}}) - 1\\right) ^ {2} + e ^ {- \\boldsymbol {a} ^ {T} \\bar {\\boldsymbol {y}}}. \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 101, + 526, + 468, + 560 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Note that when minimizing with respect to $\\pmb{y}$ the last term is constant and can be ignored. Since the exponential function is positive the result is a weighted linear least squares term in the unknown $\\pmb{y}$ . With $\\pmb{y} = \\begin{bmatrix} \\pmb{x}_{ij} \\\\ z_{ij} \\end{bmatrix}$ and $\\pmb{a} = \\frac{1}{\\sqrt{\\|\\pmb{m}_{ij}\\|^2 + 1}} \\begin{bmatrix} \\pmb{m}_{ij} \\\\ 1 \\end{bmatrix}$ we get our approximation", + "bbox": [ + 75, + 570, + 468, + 678 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {\\ell} _ {\\exp} \\left(\\boldsymbol {x} _ {i j}, z _ {i j}\\right) \\approx \\frac {\\ell_ {\\exp} \\left(\\bar {\\boldsymbol {x}} _ {i j} , \\bar {z} _ {i j}\\right)}{2} \\left(\\frac {\\boldsymbol {m} _ {i j} ^ {T} \\Delta \\boldsymbol {x} _ {i j} + \\Delta z _ {i j}}{\\sqrt {\\| \\boldsymbol {m} _ {i j} \\| ^ {2} + 1}} - 1\\right) ^ {2}, \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 76, + 688, + 473, + 744 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\Delta \\pmb{x}_{ij} = \\pmb{x}_{ij} - \\bar{\\pmb{x}}_{ij}$ and $\\Delta z_{ij} = z_{ij} - \\bar{z}_{ij}$ . To the left in Figure 3 we show $e^{-ay}$ with $a = 1$ (blue curve), and the Taylor approximation at $\\bar{y} = 0$ (orange dashed curve). In the supplementary material, we compare level sets of the expOSE objective, its approximation, and pOSE.", + "bbox": [ + 75, + 744, + 468, + 820 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1. The EXPose Model", + "text_level": 1, + "bbox": [ + 76, + 829, + 264, + 845 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Replacing the exponential regularization in (7) with the quadratic approximation (11) at $\\bar{y}_{ij}$ results in a quadratic loss that can be written as $\\| \\mathcal{A}(PU) - b\\|^2$ , which can be", + "bbox": [ + 75, + 853, + 468, + 900 + ], + "page_idx": 3 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 1: VarPro for solving expOSE (7)" + ], + "code_body": "Normalize image measurements by removing the mean and dividing by 3 standard deviations; \nSelect the inputs $\\eta$ , and randomly initialize elements of $P$ from a normal distribution of unit std ; \nSet $\\bar{y}_{ij} = [m_{ij}^T,1]^T$ . \nSet up A and b by approximating the exponential regularization by a quadratic form around each $\\bar{y}_{ij}$ . \nCompute U by minimizing (7) with $P$ fixed; \nSet do update $= 0$ if scheduling update of regularization is considered, otherwise do update $= 1$ .. \nwhile true do \nCompute the Jacobians $J_{P} = A(U^{T}\\otimes \\mathcal{I})\\colon J_{U} = A(\\mathcal{I}\\otimes P)$ and the residuals $r = \\operatorname {Avec}(PU) - b$ . Compute $P_{\\mathrm{new}}$ and $U_{\\mathrm{new}}$ from $J_P,J_U,$ and r as $P_{\\mathrm{new}} = P + \\Delta P$ and $U_{\\mathrm{new}} = U + \\Delta U$ ,with $\\Delta P = (J_P^T (\\mathcal{I} - J_UJ_U^\\dagger)J_P + \\lambda \\mathcal{I})^{-1}J_P^T r,$ and $\\Delta U = -J_U^\\dagger (r + J_P\\Delta P)$ . Evaluate the loss $\\ell_{\\mathrm{new}}$ . \nif $\\ell_{\\mathrm{new}} < \\ell_{\\mathrm{best}}$ then $\\ell_{\\mathrm{best}} = \\ell_{\\mathrm{new}}$ . $P\\gets P_{\\mathrm{new}}$ ; and $U\\gets U_{\\mathrm{new}}$ . if do update then Set $\\bar{y}_{ij} = P_iU_j$ . Set up A and b by approximating the regularization by a quadratic form around each $\\bar{y}_{ij}$ end \nend \nif stopping criterion then if do update then break; else do update $= 1$ end \nend", + "bbox": [ + 506, + 111, + 866, + 551 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "optimized using VarPro as described in Algorithm 1. The linear operator $\\mathcal{A}$ and the vector $b$ can be computed in each iteration based on the image measurements $\\pmb{m}_{ij}$ , the current estimations $\\bar{\\pmb{y}}_{ij}$ and $\\eta$ . For the initial approximation of the regularization, we use $\\bar{\\pmb{y}}_{ij} = (\\pmb{m}_{ij},1)$ .", + "bbox": [ + 496, + 590, + 890, + 667 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Regularization update scheduling: In order to improve the convergence of the algorithm, we propose to keep the initial quadratic approximation of the regularization (11) either for a fixed number of iterations or until convergence of the initial approximation. This delays the approximation of the exponential regularization in each iteration until a stable initial solution with positive depths is found. In Section 3.2 we show empirically the advantage of doing so.", + "bbox": [ + 496, + 670, + 890, + 791 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Data normalization: Since our regularization term is geometrically motivated and our approach replaces reprojection error with OSE it is important to use normalization of the image data to achieve a well-conditioned formulation [13]. Here we follow standard approaches: We first subtract the image center from all image points, then divide them with the resulting standard deviation over the image.", + "bbox": [ + 496, + 795, + 890, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "8962", + "bbox": [ + 480, + 944, + 514, + 955 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/c4d0daee857254db3e212c78e3ce7dbf0f46922611429274057e5d383e53dae4.jpg", + "image_caption": [ + "Figure 4. Comparison of convergence rate and normalized 3D error of different methods on the Dino (a) and Fountain (b) datasets. The metrics are obtained by running 100 instances starting from random initializations. In dashed we should the metrics for the pOSE baseline." + ], + "image_footnote": [], + "bbox": [ + 89, + 85, + 465, + 257 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/f904330e5b3b84f6e45f0caa92a4efcd88b3602d0aa26df42fbed5ede8d32013.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 85, + 880, + 256 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.2. Performance evaluation of expOSE", + "text_level": 1, + "bbox": [ + 76, + 316, + 385, + 334 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Before presenting our model for radial distortion we evaluate the effects of using exponential regularization with the standard OSE. We use the Dino (Small) [3] (36 cameras, 319 points, $77\\%$ missing data) and Fountain [34] (11 cameras, 1167 points, $23\\%$ missing data) datasets to evaluate the performance of expOSE with varying parameters - the weight $\\eta$ and scheduling of regularization update-, and optimization strategies - VarPro, Levenberg-Marquardt (LM), and Alternating Minimization (AltMin) [7].", + "bbox": [ + 75, + 343, + 468, + 479 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The metrics used for the comparisons are convergence rate of the algorithm and relative 3D error to GT. The convergence rate is calculated by counting the number of times the algorithms converged to the lowest loss over 100 problem instances starting from random initializations (a threshold of $2\\%$ above the smallest loss value is used). The 3D error is computed as $e_{3D} = \\frac{\\|U' - U_{\\mathrm{GT}}\\|}{\\|U_{\\mathrm{GT}}\\|_F}$ where $U'$ is the result of performing projective registration of the factor $U$ to the ground-truth point cloud $U_{\\mathrm{GT}}$ . In this way, we are able to measure the quality of the factors $U$ that are outputted by each method. For a fair comparison, we compute the 3D errors for solutions that converged to the desired optimum.", + "bbox": [ + 75, + 482, + 468, + 667 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The methods are implemented in MATLAB, and we let each method perform a maximum of 500 iterations. For the case of regularization update scheduling, which we call $\\exp\\mathrm{OSE}(\\mathrm{S})$ , we delay the update of the regularization quadratic approximation by 250 iterations or until the initial optimization converges - whichever occurs first.", + "bbox": [ + 75, + 667, + 468, + 760 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Effect of $\\eta$ and scheduling: The performance of expOSE is evaluated for multiple values of $\\eta$ ranging from $10^{-4}$ to 0.5. The results are plotted in Figure 4. We show that expOSE is significantly more robust to $\\eta$ than pOSE in terms of 3D errors (see also Figure 1). We also show that delaying the update of the quadratic approximation of the regularization results in a significant boost in convergence rate, allowing us to achieve rates similar to pOSE.", + "bbox": [ + 75, + 762, + 468, + 883 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Comparison with other optimization strategies: We", + "bbox": [ + 96, + 885, + 468, + 901 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "compare the performance of expOSE (with and without scheduling) when using VarPro, LM and AltMin. The results confirm that, just like with pOSE, VarPro is the most reliable method for expOSE, while LM and AltMin achieve poor convergence rates.", + "bbox": [ + 496, + 316, + 893, + 395 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Robustness to Radial Distortion", + "text_level": 1, + "bbox": [ + 498, + 410, + 790, + 426 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In the previous sections, we considered modifications to the original pOSE model which assumes a regular pinhole camera. In [18] the RpOSE model which instead uses a radial camera [20, 21, 23, 24, 29, 37, 40] is presented. This model is invariant to radial distortion which the standard pOSE model does not handle. We note however that the radial model requires more data for parameter estimation since it essentially only measures errors in one direction of the image. To address this issue we introduce an intermediate model by decomposing the reprojection error into a tangential and a radial component. By down-weighting the tangential error we obtain a model that is more robust to radial distortion than the pinhole camera but less sensitive to missing data than the radial camera. We then introduce an exponential regularization term for this model.", + "bbox": [ + 496, + 436, + 893, + 664 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Decoupling Tangential and Radial Errors", + "text_level": 1, + "bbox": [ + 498, + 675, + 854, + 691 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "When working with the radial camera model it is typically assumed that the principal point and the distortion center are the center of the image and have coordinates $(0,0)$ . We make the same assumption here.", + "bbox": [ + 496, + 700, + 890, + 761 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The reprojection error is obtained by taking the length of the error vector $e(x, z) = \\frac{x}{z} - m$ . The coordinates of this vector are given in w.r.t. the canonical image basis (1,0) and (0,1) of the image and can be interpreted as errors in the $x$ - and $y$ -directions respectively. For a point $m$ we are interested in measuring the error in the radial direction $\\frac{m}{\\|m\\|}$ and the tangential direction $\\frac{m_{\\perp}}{\\|m\\|}$ , where $m_{\\perp}$ is the orthogonal vector to $m$ (see Figure 5). We, therefore, write the error vector as a linear combination of these. It is not diffi", + "bbox": [ + 496, + 761, + 892, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "8963", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/4cba7ccf2d3aa7df11ad7fd74ec869fb56d3fa7207ea8f8c32d64c2d06804bd7.jpg", + "image_caption": [ + "Figure 5. Levelsets (red ellipses) of $\\ell_{\\mathrm{wose}}$ for $\\alpha = 0.1$ and 0.9. Here $m = (0.6, 0.9)$ and $z = 1$ ." + ], + "image_footnote": [], + "bbox": [ + 179, + 94, + 370, + 234 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "cult to verify that", + "bbox": [ + 76, + 299, + 196, + 313 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {\\boldsymbol {x}}{z} - \\boldsymbol {m} = \\left(\\frac {\\boldsymbol {m} ^ {T} \\boldsymbol {x}}{z \\| \\boldsymbol {m} \\|} - \\| \\boldsymbol {m} \\|\\right) \\frac {\\boldsymbol {m}}{\\| \\boldsymbol {m} \\|} + \\frac {\\boldsymbol {m} _ {\\perp} ^ {T} \\boldsymbol {x}}{\\| \\boldsymbol {m} \\| z} \\frac {\\boldsymbol {m} _ {\\perp}}{\\| \\boldsymbol {m} \\|}. \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 84, + 321, + 468, + 359 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In the basis $\\frac{m}{\\|m\\|}$ , $\\frac{m_{\\perp}}{\\|m\\|}$ the error vector can be written as", + "bbox": [ + 76, + 366, + 449, + 386 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {e} (\\boldsymbol {x}, z) = \\frac {1}{\\| \\boldsymbol {m} \\|} \\left[ \\begin{array}{l} \\boldsymbol {m} ^ {T} \\\\ \\boldsymbol {m} _ {\\perp} ^ {T} \\end{array} \\right] \\frac {\\boldsymbol {x}}{z} - \\binom {\\| \\boldsymbol {m} \\|} {0}. \\tag {13}\n$$\n", + "text_format": "latex", + "bbox": [ + 140, + 393, + 468, + 429 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Independently of the basis chosen, the reprojection error is nonlinear due to the division by $z$ , making it unsuitable for optimization. The OSE in the new basis is obtained by rescaling the reprojection error $e(\\pmb{x},z)$ by the depth $z$ . The expression for OSE error in the new basis is therefore", + "bbox": [ + 76, + 436, + 468, + 512 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| z e (\\boldsymbol {x}, z) \\right\\| ^ {2} = \\left(\\frac {\\boldsymbol {m} ^ {T}}{\\| \\boldsymbol {m} \\|} \\boldsymbol {x} - \\| \\boldsymbol {m} \\| z\\right) ^ {2} + \\left(\\frac {\\boldsymbol {m} _ {\\perp} ^ {T}}{\\| \\boldsymbol {m} \\|} \\boldsymbol {x}\\right) ^ {2}. \\tag {14}\n$$\n", + "text_format": "latex", + "bbox": [ + 84, + 521, + 468, + 559 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Reweighting the Error Components", + "text_level": 1, + "bbox": [ + 76, + 575, + 387, + 593 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Radial distortion is usually modeled by modifying the projection according to", + "bbox": [ + 76, + 599, + 468, + 630 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\kappa_ {r} (\\boldsymbol {m}) \\boldsymbol {m} = \\frac {\\boldsymbol {x}}{z} \\tag {15}\n$$\n", + "text_format": "latex", + "bbox": [ + 218, + 638, + 468, + 666 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $\\kappa_{r}$ is a scalar that depends on the distance to the distortion center. It is clear that the second term of (14) vanishes when inserting $(\\pmb{x},z)$ fulfilling (15) for any $\\kappa_{r}$ , but not the first term. To handle radial distortion we could incorporate the additional parameter $\\kappa_{r}$ in (14) and explicitly estimate it. Unfortunately, this results in a more complex model (with trilinear interactions) making optimization difficult. Alternatively, to achieve robustness to radial distortion we can remove the first term, as in [18]. The downside of doing this is that it removes roughly half of the data (one out of two coordinates for each projection) available for use in inference. Therefore we here propose to compensate for the unknown radial distortion by down-weighting the first term or equivalently allowing a larger standard deviation in the radial direction.", + "bbox": [ + 76, + 674, + 468, + 898 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Let $\\sigma_r^2$ and $\\sigma_t^2$ denote the uncertainties of the reprojection error $\\epsilon = s\\pmb{x} / z - \\pmb{m}$ along the radial and tangential direction, respectively, and where $s$ is an unknown positive scalar that models radial distortion effects and focal length scaling. Assuming the reprojection error $\\epsilon$ is sampled from a 2D normal distribution $\\mathcal{N}(0,\\Sigma)$ , the probability of the model $\\{\\pmb{x},z\\}$ given $\\pmb{m}$ is", + "bbox": [ + 498, + 90, + 890, + 196 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nP (\\boldsymbol {x}, z \\mid \\boldsymbol {m}) = \\frac {1}{2 \\pi \\det (\\Sigma) ^ {1 / 2}} e ^ {- s ^ {2} \\left(\\frac {1}{s} \\boldsymbol {m} - \\boldsymbol {x} / z\\right) ^ {T} \\Sigma^ {- 1} \\left(\\frac {1}{s} \\boldsymbol {m} - \\boldsymbol {x} / z\\right)}. \\tag {16}\n$$\n", + "text_format": "latex", + "bbox": [ + 501, + 205, + 890, + 247 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Maximizing the likelihood (16) w.r.t. $\\{x,z\\}$ is equivalent to minimizing", + "bbox": [ + 498, + 250, + 890, + 279 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {s ^ {2}}{\\sigma_ {r} ^ {2}} \\left(\\frac {\\boldsymbol {m} ^ {T}}{\\| \\boldsymbol {m} \\|} \\frac {\\boldsymbol {x}}{z} - \\frac {1}{s} \\| \\boldsymbol {m} \\|\\right) ^ {2} + \\frac {s ^ {2}}{\\sigma_ {t} ^ {2}} \\left(\\frac {\\boldsymbol {m} _ {\\perp} ^ {T}}{\\| \\boldsymbol {m} \\|} \\frac {\\boldsymbol {x}}{z}\\right) ^ {2}, \\tag {17}\n$$\n", + "text_format": "latex", + "bbox": [ + 526, + 286, + 890, + 325 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $\\Sigma = R^T\\mathrm{diag}(\\sigma_r^2,\\sigma_t^2)R$ and $R$ is a rotation matrix that aligns the coordinate axis with $\\boldsymbol {m} / \\| \\boldsymbol {m}\\|$ and $\\boldsymbol {m}_{\\perp} / \\| \\boldsymbol {m}\\|$ . While the second term quadratic term of (17) is not affected by $s$ , in the first term $\\| \\boldsymbol {m}\\|$ is weighted by $1 / s$ , which is undesirable as previously motivated. We propose to approximate (17) by", + "bbox": [ + 498, + 332, + 890, + 422 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\underbrace {\\frac {1}{\\sigma_ {r} ^ {2}}} _ {(1 - \\alpha)} \\left(\\frac {\\boldsymbol {m} ^ {T}}{\\| \\boldsymbol {m} \\|} \\frac {\\boldsymbol {x}}{z} - \\| \\boldsymbol {m} \\|\\right) ^ {2} + \\underbrace {\\frac {1}{\\sigma_ {t} ^ {2}}} _ {\\alpha} \\left(\\frac {\\boldsymbol {m} _ {\\perp} ^ {T}}{\\| \\boldsymbol {m} \\|} \\frac {\\boldsymbol {x}}{z}\\right) ^ {2}. \\tag {18}\n$$\n", + "text_format": "latex", + "bbox": [ + 521, + 430, + 890, + 488 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "This approximation of the first term adds a bias to the obtained solution based on the unknown shift $\\left(\\frac{1}{s} - 1\\right)\\| \\boldsymbol{m}\\|$ . We regulate the effect of this bias - and thus the robustness to radial distortion - by controlling the relative weight of the first quadratic term (biased) versus the second quadratic term (unbiased) through the value of $\\alpha \\in [0,1]$ . For the extreme case of $\\alpha = 1$ the radial component of the error is completely dropped resulting in the loss presented in [18]. Linear residuals can be obtained by replacing (18) with its component-weighted OSE counterpart", + "bbox": [ + 498, + 496, + 890, + 648 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\ell_ {\\mathrm {w O S E}} = (1 - \\alpha) \\left(\\frac {\\boldsymbol {m} ^ {T}}{\\| \\boldsymbol {m} \\|} \\boldsymbol {x} - \\| \\boldsymbol {m} \\| z\\right) ^ {2} + \\alpha \\left(\\frac {\\boldsymbol {m} _ {\\perp} ^ {T}}{\\| \\boldsymbol {m} \\|} \\boldsymbol {x}\\right) ^ {2}. \\tag {19}\n$$\n", + "text_format": "latex", + "bbox": [ + 504, + 654, + 890, + 703 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Figure 5 shows an example of level sets (in the image plane $z = 1$ ) for $\\alpha = 0.1$ and 0.9.", + "bbox": [ + 498, + 704, + 890, + 733 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Note that the same approach can be used to handle unknown focal lengths. If we assume that the intrinsic calibration matrix of the camera is $\\mathbf{K} = \\mathrm{diag}(f, f, 1)$ , the relation between the reprojected point and the image measurement is $\\frac{\\kappa_r}{f} \\mathbf{m} = \\frac{\\mathbf{x}}{z}$ and therefore the re-weighted formulation can be applied to this setting as well. An unknown/varying focal length $f$ is however modeled by the standard pOSE model in contrast to $\\kappa_r$ which depends on the distance between the projection and the principal point and thus cannot be included in a factorization algorithm without adding extra variables.", + "bbox": [ + 496, + 734, + 890, + 898 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "8964", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3. Regularization for radial distortion invariance", + "text_level": 1, + "bbox": [ + 76, + 90, + 467, + 107 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Weighting differently the radial and tangential of the OSE does not change, in general, the exponential regularization described in Section 2. However, one must note that for the extreme case $\\alpha = 1$ , for a given $X = PU$ the variables in every third row of $X$ and $P$ vanish from the OSE. In other words, decreasing the total loss will always be possible by increasing $z$ through the third row of $P$ , and consequently decreasing the $e^{-\\frac{z}{\\sqrt{\\|m\\|^2 + 1}}}$ part of the exponential regularization. To avoid such undesirable behavior, we proposed an alternative exponential regularization for the particular case $\\alpha = 1$ acting only on $x$ and $y$ , i.e.,", + "bbox": [ + 75, + 114, + 468, + 285 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\ell_ {\\exp} = e ^ {- \\frac {\\boldsymbol {m} ^ {T}}{\\| \\boldsymbol {m} \\|} \\boldsymbol {x}} \\tag {20}\n$$\n", + "text_format": "latex", + "bbox": [ + 218, + 296, + 468, + 319 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "This alternative regularization enforces the reprojection $\\pmb{x}$ according to the 1D radial camera model $\\pmb{m} = \\lambda \\pmb{x}$ to have positive scale $\\lambda > 0$ , canceling out the shrinking bias of the OSE as in the general case.", + "bbox": [ + 76, + 332, + 468, + 391 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The expOSE loss for weighted radial and tangent components of the OSE can then be approximated as", + "bbox": [ + 76, + 393, + 468, + 422 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\ell_ {\\text {e x p O S E}} = \\sum_ {i j} (1 - \\eta) \\ell_ {\\text {w O S E}} \\left(\\boldsymbol {x} _ {i j}, z _ {i j}\\right) + \\eta \\tilde {\\ell} _ {\\text {e x p}} \\left(\\boldsymbol {x} _ {i j}, z _ {i j}\\right) \\tag {21}\n$$\n", + "text_format": "latex", + "bbox": [ + 84, + 435, + 468, + 467 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "with $\\tilde{\\ell}_{\\mathrm{exp}}$ defined as", + "bbox": [ + 76, + 481, + 209, + 500 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\{ \\begin{array}{l l} \\frac {\\ell_ {\\exp} \\left(\\bar {x} _ {i j} , \\bar {z} _ {i j}\\right)}{2} \\left(\\frac {\\boldsymbol {m} _ {i j} ^ {T} \\Delta \\boldsymbol {x} _ {i j} + \\Delta z _ {i j}}{\\sqrt {\\| \\boldsymbol {m} _ {i j} \\| ^ {2} + 1}} - 1\\right) ^ {2}, & \\alpha \\in [ 0, 1 [ \\\\ \\frac {\\ell_ {\\exp} (\\bar {x} _ {i j})}{2} \\left(\\frac {\\boldsymbol {m} _ {i j} ^ {T} \\Delta \\boldsymbol {x} _ {i j}}{\\| \\boldsymbol {m} _ {i j} \\|} - 1\\right) ^ {2}, & \\alpha = 1 \\end{array} . \\right. \\tag {22}\n$$\n", + "text_format": "latex", + "bbox": [ + 89, + 511, + 468, + 579 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "This radial distortion robust version of expOSE can be optimized following Algorithm 1 nonetheless since both the component-weighted OSE and the quadratic approximation of the regularization can still be written as $\\| \\mathcal{A}(PU) - b\\|^2$ .", + "bbox": [ + 76, + 590, + 468, + 652 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5. Outline of Full Reconstruction Pipeline", + "text_level": 1, + "bbox": [ + 76, + 666, + 428, + 684 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We propose to use expOSE as a solution to uncalibrated and radial distortion invariant Structure-from-Motion. A few Bundle Adjustment steps can be performed for further refinement. The pipeline takes as input 2D image measurements of points tracked along multiple views, just like any other factorization-based SfM pipeline. The proposed radial distortion-invariant pipeline can be decomposed into the following sequential modules:", + "bbox": [ + 75, + 691, + 468, + 813 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "1. expOSE factorization: Given a set of image points tracked along several images, we use Algorithm 1 to obtain estimations of the uncalibrated camera matrix, and the 3D points, up to projective ambiguity.", + "bbox": [ + 89, + 824, + 468, + 886 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "2. Radial distortion estimation (and camera matrix completion): Using the solution obtained with expOSE, the distortion parameters and, for $\\alpha = 1$ , the third row of the uncalibrated camera matrix are estimated from the equations in (15). Note that by assuming a Brown-Conrady radial distortion model [2] with $\\kappa(m) = \\sum_{j} k_{j} \\|m\\|^{2j}$ , for each camera a system of equations of the form", + "bbox": [ + 511, + 90, + 890, + 208 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\nM _ {i} \\left[ \\begin{array}{c} p _ {i} ^ {(3)} \\\\ \\mathbf {k} \\end{array} \\right] = b _ {i} \\tag {23}\n$$\n", + "text_format": "latex", + "bbox": [ + 658, + 208, + 890, + 242 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "can be obtained, where $p_i^{(3)}$ is the third row of the $i$ th camera matrix, and $\\mathbf{k}$ is a vector of the distortion parameters. Here we use a distortion model with three parameters, $k_j, j = 1,\\dots,3$ . Assuming that the distortion model is constant along all views, the overall system of equations can be written as $M[p^{(3)T},\\mathbf{k}^T ]^T = b$ , with $p$ being a $4\\times$ #views vector with all third rows of the camera matrices. For $\\alpha = 1$ both $p^{(3)}$ and $\\mathbf{k}$ are unknowns and are estimated in this step. For $\\alpha \\neq 1$ , the system can be simplified to $M\\mathbf{k} = b - Mp^{(3)}$ since $p^{(3)}$ is already estimated by expOSE. If it is assumed that there is no radial distortion and $\\alpha \\neq 1$ , then this step can be completely skipped.", + "bbox": [ + 529, + 250, + 890, + 450 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3. Bundle adjustment: We perform local optimization of", + "bbox": [ + 511, + 458, + 888, + 473 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\sum_ {i j} \\left\\| \\boldsymbol {m} _ {i j} - (1 + \\kappa (\\boldsymbol {m} _ {i j})) \\frac {\\boldsymbol {x} _ {i j}}{z _ {i j}} \\right\\| ^ {2} \\tag {24}\n$$\n", + "text_format": "latex", + "bbox": [ + 599, + 481, + 890, + 521 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "starting from the estimations of $P$ , $X$ , and $\\mathbf{k}$ found with the previous steps. The optimization is solved using Levenberg-Marquardt algorithm. If there is no radial distortion then the parameters $\\mathbf{k}$ can be set to zero and kept constant during optimization. For expOSE initialization, we observe that usually only a few steps are needed (5-10 steps).", + "bbox": [ + 529, + 529, + 890, + 635 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4. Euclidean update: Finally we estimate the projective transformation $H \\in \\mathbb{R}^{4 \\times 4}$ such that the factorization $\\{PH, H^{-1}X\\}$ is a Euclidean reconstruction. This is done by estimating the dual absolute conic as described in [12].", + "bbox": [ + 511, + 643, + 890, + 719 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.1. Experiments", + "text_level": 1, + "bbox": [ + 500, + 726, + 633, + 742 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The performance of the proposed pipeline is evaluated on 3 sequences from [24] with radial distortion: Grossmunster (19 cam., 1874 pts, $41\\%$ missing data), Kirchenge (30 cam., 1158 pts, $60\\%$ missing data), and Munterhof (20 cam., 2108 pts, $42\\%$ missing data). We compare the performance when using either $\\exp OSE$ ( $\\eta = 0.01$ ), $\\mathrm{pOSE}$ , or $\\mathrm{RpOSE}$ (both with $\\eta = 0.001$ ) in step 1 of the pipeline. We use $\\exp OSE$ with scheduling for regularization update, as described in Section 3.2. Refinement of the solutions is done by performing up to 50 iterations of BA.", + "bbox": [ + 496, + 750, + 890, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "8965", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/573df3627b32e8b669936dd217a28fb41e7094316edabcd5482d949197f38cec.jpg", + "image_caption": [ + "Figure 6. Visualization of reconstructions on the Grossmunster sequence. (Left) An example of one of the images on the sequence. At the bottom, we show a view of the 3D reconstruction of expOSE for $\\alpha = 1$ . (Right) Comparison between the top view reconstructions (black) obtained with pOSE, RpOSE and expOSE. In red we show the ground-truth 3D point cloud. All reconstructions shown here were not refined with bundle adjustment." + ], + "image_footnote": [], + "bbox": [ + 81, + 90, + 225, + 282 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/263cd2695e7d9d4ca5166ce4c2c5580b9ec9b7b2071a9239550e40fea3c9848a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 227, + 90, + 343, + 282 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/682f9d74e513e408789f632deb0127253eb2582fb69e09d4649e43ff4eb532fc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 344, + 90, + 467, + 282 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The metrics used are convergence rate (similarly to the experiments in Section 2), 2D reprojection error, rotation error, and 3D error. In order to compute the last two, we perform Euclidean registration on the output of the pipeline, i.e. after the Euclidean update, to the ground-truth 3D point cloud. The inverse of that Euclidean transformation is applied to the camera matrices. Rotation error is then computed as $e_{\\mathrm{rot}} = \\mathrm{acos}\\left(\\left(\\mathrm{trace}\\left(R_i^{GT}R_i^T\\right) - 1\\right) / 2\\right)$ and the 3D error as the median of all $\\| X_j - X_j^{GT}\\|$ . The values presented in Table 1 correspond to the average over all instances that converged to the desired optimum. The chosen metrics are evaluated at two points of the pipeline: after the radial distortion estimation (step 2), and after the bundle adjustment (step 3). At both stages, a metric update is performed in order to obtain a Euclidean reconstruction.", + "bbox": [ + 75, + 425, + 468, + 652 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The results show that expOSE clearly outperforms both pOSE and RpOSE. The difference in performance is even more evident when looking at the output of the factorizations, where expOSE was able to achieve reprojection errors that almost match the refined solution with BA. Note that in many cases expOSE even got better rotation and 3D errors than its refined counterpart. A visualization for the Grossmunter sequence is shown in Figure 6. It is also possible to notice the impact of using the regularization for radial distortion invariance as described in Section 4.3. For $\\alpha = 0.999$ the method has slow convergence, leading to poor solutions as can be seen by the high rotation and reprojection errors. Additional results for other values of $\\alpha$ and sequences are presented in the supplementary material.", + "bbox": [ + 75, + 655, + 467, + 866 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In practice, as seen in these experiments, we notice that $\\alpha = 1$ achieves the best results for images with radial", + "bbox": [ + 76, + 869, + 467, + 898 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/f652ec7db7bf4148137c3d4a3b3fcef719bb4abd10eecb944126e8a8555ee566.jpg", + "table_caption": [ + "Table 1. Results on the Grossmunster, Kirchenge, and Munsterhof datasets (over 10 instances). For each method two rows are presented: the first consists of the results for the output of the factorization method; the second of the output of the Bundle Adjustment (+BA). In green, we show the best results for each metric." + ], + "table_footnote": [], + "table_body": "
GrossmunsterConv. RateRot. [deg]3D [unit]2D [pix]
pOSE+ BA50%148.250.76218.48
50%27.610.2931.50
RpOSE+ BA90%2.240.0822.91
90%0.530.0111.48
ExpOSEα=0.999100%44.740.22741.51
α=0.999+BA100%0.430.0071.48
α=1100%0.180.0041.86
α=1+BA100%0.420.0061.48
", + "bbox": [ + 501, + 170, + 895, + 323 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/c45294355b6cfa9b10fd01e40e2bc8fb6b5ad6cab87ca5f60826d3d892f81ec3.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Kirchenge
pOSE+ BA100%160.386.84414.95
100%0.720.0241.22
RpOSE+ BA90%0.980.0621.94
90%1.060.0311.22
ExpOSEα=0.99960%24.710.02245.28
α=0.999+BA80%1.190.0211.22
α=180%0.510.0261.57
α=1+BA80%2.920.0501.22
", + "bbox": [ + 501, + 323, + 895, + 459 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/b55b6dc148005ae63391ae326781720c3ff2565f72a2786c9dbaed8cc00ab809.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Munsterhof
pOSE+ BA100%14.010.23012.08
100%0.440.0271.70
RpOSE+ BA60%1.000.07111.96
60%0.440.0271.70
ExpOSEα=0.999100%20.130.02147.71
α=0.999+BA100%0.470.0291.70
α=180%0.120.0133.43
α=1+BA90%0.450.0301.70
", + "bbox": [ + 501, + 459, + 895, + 595 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "distortion. In the supplementary material we provide additional experiments that show the benefit of using values $1/2 < \\alpha < 1$ in particular problem instances where data availability is too low for the stability of a pure radial model (e.g. few viewpoints and/or points per camera available).", + "bbox": [ + 498, + 619, + 890, + 696 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6. Conclusions", + "text_level": 1, + "bbox": [ + 500, + 709, + 625, + 724 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this paper, we propose the use of exponential regularization on projective factorization problems as a way to enforce Cheirality conditions on the reconstruction. Radial distortion robustness is achieved by weighting differently the radial and tangential components of the object space error. We show that the proposed regularization results in higher reconstruction quality (that matches bundle adjustment refined solutions) while keeping the same convergence properties as state-of-the-art factorization methods and being less sensitive to the choice of the weight $\\eta$ of the regularization.", + "bbox": [ + 496, + 734, + 890, + 898 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8966", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 174, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Srinadh Bhojanapalli, Behnam Neyshabur, and Nati Srebro. Global optimality of local search for low rank matrix recovery. In D. D. Lee, M. Sugiyama, U. V. Luxburg, I. Guyon, and R. Garnett, editors, Advances in Neural Information Processing Systems 29, pages 3873-3881. Curran Associates, Inc., 2016. 1", + "[2] Dean Brown. Decentering distortion of lenses. 1966. 7", + "[3] A. M. Buchanan and A. W. Fitzgibbon. Damped newton algorithms for matrix factorization with missing data. In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2005. 1, 5", + "[4] Alessio Del Bue, João M. F. Xavier, Lourdes Agapito, and Marco Paladini. Bilinear modeling via augmented lagrange multipliers (BALM). IEEE Trans. Pattern Anal. Mach. Intell., 34(8):1496-1508, 2012. 1", + "[5] R. Cabral, F. De la Torre, J. P. Costeira, and A. Bernardino. Unifying nuclear norm and bilinear factorization approaches for low-rank matrix decomposition. In International Conference on Computer Vision (ICCV), 2013. 1", + "[6] Emmanuel J. Candès and Benjamin Recht. Exact matrix completion via convex optimization. Foundations of Computational Mathematics, 9(6):717-772, 2009. 3", + "[7] I. Csiszar and G. Tusnády. Information Geometry and Alternating Minimization Procedures. 5", + "[8] Y. Dai, H. Li, and M. He. Projective multiview structure and motion from element-wise factorization. IEEE Transactions on Pattern Analysis and Machine Intelligence, 35(9):2238-2251, 2013. 1", + "[9] Rong Ge, Chi Jin, and Yi Zheng. No spurious local minima in nonconvex low rank problems: A unified geometric analysis. arXiv preprint, arxiv:1704.00708, 2017. 1, 3", + "[10] Rong Ge, Jason D. Lee, and Tengyu Ma. Matrix completion has no spurious local minimum. In Annual Conference on Neural Information Processing Systems (NIPS), 2016. 1, 3", + "[11] Christian Grussler, Anders Rantzer, and Pontus Giselsson. Low-rank optimization with convex constraints. IEEE Transactions on Automatic Control, 63(11):4000-4007, 2018. 3", + "[12] Richard Hartley and Andrew Zisserman. Multiple View Geometry in Computer Vision. Cambridge University Press, USA, 2 edition, 2003. 2, 3, 7", + "[13] Richard I. Hartley. In defense of the eight-point algorithm. IEEE Trans. Pattern Anal. Mach. Intell., 19(6):580-593, 1997. 4", + "[14] Je Hyeong Hong and Andrew Fitzgibbon. Secrets of matrix factorization: Approximations, numerics, manifold optimization and random restarts. In Int. Conf. on Computer Vision, 2015. 1, 4", + "[15] Je Hyeong Hong and Christopher Zach. pose: Pseudo object space error for initialization-free bundle adjustment. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), June 2018. 1, 2, 3", + "[16] J. H. Hong, C. Zach, and A. Fitzgibbon. Revisiting the variable projection method for separable nonlinear least squares problems. In 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 5939-5947, 2017. 1, 3, 4" + ], + "bbox": [ + 78, + 114, + 468, + 897 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[17] Je Hyeong Hong, Christopher Zach, Andrew W. Fitzgibbon, and Roberto Cipolla. Projective bundle adjustment from arbitrary initialization using the variable projection method. In European Conf. on Computer Vision, 2016. 1", + "[18] Jose Iglesias and Carl Olsson. Radial distortion invariant factorization for structure from motion. In Proceedings of the IEEE International Conference on Computer Vision, 2021. 1, 2, 3, 5, 6", + "[19] José Pedro Iglesias, Carl Olsson, and Marcus Valtonen Örnhag. Accurate optimization of weighted nuclear norm for non-rigid structure from motion. In European Conference on Computer Vision (ECCV), 2020. 1, 3", + "[20] Jae-Hak Kim, Yuchao Dai, Hongdong li, Xin Du, and Jonghyuk Kim. Multi-view 3d reconstruction from uncalibrated radially-symmetric cameras. In Proceedings of the IEEE International Conference on Computer Vision, pages 1896-1903, 12 2013. 5", + "[21] Z. Kukelova, M. Bujnak, and T. Pajdla. Real-time solution to the absolute pose problem with unknown radial distortion and focal length. In 2013 IEEE International Conference on Computer Vision, pages 2816-2823, 2013. 5", + "[22] Suryansh Kumar. Non-rigid structure from motion: Prior-free factorization method revisited. In IEEE Winter Conference on Applications of Computer Vision, WACV 2020, Snowmass Village, CO, USA, March 1-5, 2020, pages 51-60. IEEE, 2020. 1", + "[23] Viktor Larsson, Torsten Sattler, Zuzana Kukelova, and Marc Pollefeys. Revisiting radial distortion absolute pose. In International Conference on Computer Vision (ICCV). IEEE, September 2019. 5", + "[24] Viktor Larsson, Nicolcas Zobernig, Kasim Taskin, and Marc Pellefeys. Calibration-free structure-from-motion with calibrated radial trifocal tensors. In European Conference of Computer Vision, 2020. 5, 7", + "[25] Ludovic Magerand and Alessio Del Bue. Practical projective structure from motion (p2sfm). In 2017 IEEE International Conference on Computer Vision (ICCV), pages 39-47, 2017. 2", + "[26] Behrooz Nasihatkon, Richard I. Hartley, and Jochen Trumpf. A generalized projective reconstruction theorem and depth constraints for projective factorization. Int. J. Comput. Vis., 115(2):87-114, 2015. 2", + "[27] Takayuki Okatani and Koichiro Deguchi. On the wiberg algorithm for matrix factorization in the presence of missing components. International Journal of Computer Vision, 72(3):329-337, 2007. 3", + "[28] Carl Olsson, Daniele Gerosa, and Marcus Carlsson. Relaxations for non-separable cardinality/rank penalties. In 2021 IEEE/CVF International Conference on Computer Vision Workshops (ICCVW), IEEE International Conference on Computer Vision Workshops, pages 162-171, 2021. 3", + "[29] Carl Olsson, Viktor Larsson, and Fredrik Kahl. A quasiconvex formulation for radial cameras. In 2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 14571-14580, 2021. 5", + "[30] Marcus Valtonen Ornag, Carl Olsson, and Anders Heyden. Bilinear parameterization for differentiable rank-regularization. 2020 IEEE/CVF Conference on Computer" + ], + "bbox": [ + 501, + 92, + 890, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "8967", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Vision and Pattern Recognition Workshops (CVPRW), Jun 2020. 1, 3", + "[31] Dohyung Park, Anastasios Kyrillidis, Constantine Carmanis, and Sujay Sanghavi. Non-square matrix sensing without spurious local minima via the Burer-Monteiro approach. In Aarti Singh and Jerry Zhu, editors, Proceedings of the 20th International Conference on Artificial Intelligence and Statistics, volume 54 of Proceedings of Machine Learning Research, pages 65-74, Fort Lauderdale, FL, USA, 20-22 Apr 2017. PMLR. 1", + "[32] Conrad J. Poelman and Takeo Kanade. A parapspective factorization method for shape and motion recovery. IEEE Trans. Pattern Anal. Mach. Intell., 19(3):206-218, 1997. 1", + "[33] Benjamin Recht, Maryam Fazel, and Pablo A. Parrilo. Guaranteed minimum-rank solutions of linear matrix equations via nuclear norm minimization. SIAM Rev., 52(3):471-501, Aug. 2010. 3", + "[34] C. Strecha, W. von Hansen, L. Van Gool, P. Fua, and U. Thoennessen. On benchmarking camera calibration and multi-view stereo for high resolution imagery. In 2008 IEEE Conference on Computer Vision and Pattern Recognition, pages 1-8, 2008. 5", + "[35] D. Strelow, Q. Wang, L. Si, and A. Eriksson. General, nested, and constrained wiberg minimization. IEEE Transactions on Pattern Analysis and Machine Intelligence, 38(9):1803-1815, 2016. 3", + "[36] Peter F. Sturm and Bill Triggs. A factorization based algorithm for multi-image projective structure and motion. In Proceedings of the 4th European Conference on Computer Vision-Volume II - Volume II, ECCV '96, page 709-720, Berlin, Heidelberg, 1996. Springer-Verlag. 1", + "[37] SriRam Thirthala and Marc Pollefeys. Radial multi-focal tensors. International Journal of Computer Vision - IJCV, 96, 06 2012. 5", + "[38] Carlo Tomasi and Takeo Kanade. Shape and motion from image streams under orthography: A factorization method. International Journal of Computer Vision, 9(2):137-154, 1992. 1", + "[39] Bill Triggs, Philip F. McLauchlan, Richard I. Hartley, and Andrew W. Fitzgibbon. Bundle adjustment - a modern synthesis. In Proceedings of the International Workshop on Vision Algorithms: Theory and Practice, ICCV '99, pages 298-372. Springer-Verlag, 2000. 2, 3", + "[40] R. Tsai. A versatile camera calibration technique for high-accuracy 3d machine vision metrology using off-the-shelf tv cameras and lenses. IEEE Journal on Robotics and Automation, 3(4):323-344, August 1987. 5", + "[41] T. Wiberg. Computation of principal components when data are missing. In Proceedings of the Second Symposium of Computational Statistics, page 229-326, 1976. 3" + ], + "bbox": [ + 78, + 90, + 468, + 797 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "8968", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2023/expOSE_ Accurate Initialization-Free Projective Factorization Using Exponential Regularization/6f578c7a-ccab-49cb-bc75-04e328397fea_model.json b/2023/expOSE_ Accurate Initialization-Free Projective Factorization Using Exponential Regularization/6f578c7a-ccab-49cb-bc75-04e328397fea_model.json new file mode 100644 index 0000000000000000000000000000000000000000..72211ad91b066384a883904f01518c0f1f790db9 --- /dev/null +++ b/2023/expOSE_ Accurate Initialization-Free Projective Factorization Using Exponential Regularization/6f578c7a-ccab-49cb-bc75-04e328397fea_model.json @@ -0,0 +1,2266 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.808, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.079, + 0.131, + 0.89, + 0.175 + ], + "angle": 0, + "content": "expOSE: Accurate Initialization-Free Projective Factorization using Exponential Regularization" + }, + { + "type": "text", + "bbox": [ + 0.267, + 0.203, + 0.697, + 0.222 + ], + "angle": 0, + "content": "Jose Pedro Iglesias1, Amanda Nilsson2, Carl Olsson1,2" + }, + { + "type": "text", + "bbox": [ + 0.305, + 0.228, + 0.666, + 0.247 + ], + "angle": 0, + "content": "1Chalmers University of Technology, Sweden" + }, + { + "type": "text", + "bbox": [ + 0.381, + 0.247, + 0.59, + 0.264 + ], + "angle": 0, + "content": "\\(^{2}\\)Lund University, Sweden" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.299, + 0.313, + 0.315 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.332, + 0.474, + 0.604 + ], + "angle": 0, + "content": "Bundle adjustment is a key component in practically all available Structure from Motion systems. While it is crucial for achieving accurate reconstruction, convergence to the right solution hinges on good initialization. The recently introduced factorization-based \\(pOSE\\) methods formulate a surrogate for the bundle adjustment error without reliance on good initialization. In this paper, we show that \\(pOSE\\) has an undesirable penalization of large depths. To address this we propose \\(expOSE\\) which has an exponential regularization that is negligible for positive depths. To achieve efficient inference we use a quadratic approximation that allows an iterative solution with VarPro. Furthermore, we extend the method with radial distortion robustness by decomposing the Object Space Error into radial and tangential components. Experimental results confirm that the proposed method is robust to initialization and improves reconstruction quality compared to state-of-the-art methods even without bundle adjustment refinement." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.618, + 0.21, + 0.634 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.644, + 0.47, + 0.78 + ], + "angle": 0, + "content": "Factorization is a long-established method in Structure from Motion (SfM). It originates from [38] by Tomasi and Kanade showing how, under the orthographic camera model, structure and motion can be computed simultaneously from an image sequence using singular value decomposition (SVD). The method was later reformulated for affine cameras, including weak perspective projection [32]. Strum and Triggs [36] further extended factorization to projective cameras by accounting for projective depths." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.781, + 0.47, + 0.827 + ], + "angle": 0, + "content": "One appeal of these factorization algorithms is they can yield a closed-form solution by using the SVD. It is however only possible to use the SVD if every considered scene" + }, + { + "type": "image", + "bbox": [ + 0.507, + 0.302, + 0.642, + 0.453 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.653, + 0.301, + 0.888, + 0.454 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.468, + 0.892, + 0.538 + ], + "angle": 0, + "content": "Figure 1. (Left) Examples of two of the images in the Fountain sequence. (Right) Reconstruction obtained with expOSE (top) and pOSE (bottom) for 3 different values of \\(\\eta\\). Our method achieves the same convergence rate as pOSE while having a higher reconstruction quality and being less dependent on the choice of \\(\\eta\\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.567, + 0.893, + 0.748 + ], + "angle": 0, + "content": "point is visible throughout the whole image sequence. In cases of missing data, the SVD can be replaced with iterative methods. Simple splitting methods [4,8,22] are able to regularize singular values when computing a proximal operator, but can give rather erroneous solutions because of a low convergence rate close to the optimum. [5, 8] give an idea of convex formulation using the nuclear norm, but are usually too weak for SfM in the presence of noise [19, 30]. The papers [1, 9, 10, 31] suggest different ways to assure that direct bilinear optimization only has a global minimum. However, SfM problems with local minima do not fulfill their required conditions [3]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.75, + 0.895, + 0.902 + ], + "angle": 0, + "content": "It was recently shown by Hong et al. [14-17] that direct bilinear estimation of structure and motion can be made robust to local minima in combination with the Variable Projection (VarPro) method. In [15] the objective is exchanged for the Pseudo Object Space Error (pOSE) which is a tradeoff between the object space error and a quadratic regularization term. This was later extended to a radial distortion invariant version RpOSE, presented in [18]. With their bilinear factorization structure and a large basin of convergence when using VarPro, these pOSE models tend to find" + }, + { + "type": "page_footnote", + "bbox": [ + 0.076, + 0.839, + 0.47, + 0.901 + ], + "angle": 0, + "content": "1This work has been funded by the Swedish Research Council (grant no. 2018-05375), the Swedish Foundation for Strategic Research project, Semantic Mapping and Visual Navigation for Smart Robots (grant no. RIT15-0038), and the Wallenberg AI, Autonomous Systems and Software Program (WASP)." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "8959" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.151 + ], + "angle": 0, + "content": "a global minimum independently of the initialization. Additionally, both pOSE and RpOSE have in [18] been shown to be local approximations of the reprojection error, enabling iterative refinement to the maximum likelihood solution." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.152, + 0.471, + 0.302 + ], + "angle": 0, + "content": "In this paper, we show that the regularization term in the pOSE formulation overly penalizes large positive depths and can thereby limit the range of feasible depths too much to achieve satisfactory solutions. We instead propose regularization with an exponential penalty that is negligible for positive depths. To achieve efficient inference we use a quadratic approximation of the exponential term suitable for optimization with VarPro. Moreover, we extend the method with radial distortion robustness by decomposing the OSE into radial and tangent components." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.304, + 0.421, + 0.318 + ], + "angle": 0, + "content": "In short, the main contributions of this paper are:" + }, + { + "type": "text", + "bbox": [ + 0.096, + 0.324, + 0.469, + 0.37 + ], + "angle": 0, + "content": "- We investigate the pOSE models' undesirable penalization of large depths and propose expOSE which has negligible regularization of positive depths;" + }, + { + "type": "text", + "bbox": [ + 0.096, + 0.377, + 0.469, + 0.468 + ], + "angle": 0, + "content": "- We formulate a quadratic approximation of the exponential regularization term in expOSE to make it suitable for optimization with VarPro and show that, with random initialization, the model achieves convergence rates similar to pOSE with significantly higher reconstruction quality;" + }, + { + "type": "text", + "bbox": [ + 0.096, + 0.476, + 0.469, + 0.551 + ], + "angle": 0, + "content": "- We extend expOSE with radial distortion robustness by decomposing the Object Space Error (OSE) into radial and tangent components and propose an SfM pipeline that is able to obtain a complete and accurate Euclidean reconstruction from uncalibrated cameras." + }, + { + "type": "list", + "bbox": [ + 0.096, + 0.324, + 0.469, + 0.551 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.563, + 0.323, + 0.58 + ], + "angle": 0, + "content": "2. Reconstruction Objectives" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.588, + 0.469, + 0.664 + ], + "angle": 0, + "content": "In this section, we illustrate the problems with direct optimization of reprojection error and discuss how this is addressed using the pOSE model [15]. We then present our exponential regularization and show how this addresses the limitations of the pOSE model." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.671, + 0.379, + 0.687 + ], + "angle": 0, + "content": "2.1. Reprojection Error and Cheirality" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.695, + 0.469, + 0.752 + ], + "angle": 0, + "content": "Bundle adjustment [12, 39] is the standard routine when it comes to solving the Structure-from-Motion problem. Given measured point projections \\( m_{ij} \\) the goal is to attempt to minimize" + }, + { + "type": "equation", + "bbox": [ + 0.203, + 0.751, + 0.469, + 0.791 + ], + "angle": 0, + "content": "\\[\n\\sum_ {i j} \\left\\| \\boldsymbol {m} _ {i j} - \\frac {\\boldsymbol {x} _ {i j}}{z _ {i j}} \\right\\| ^ {2}, \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.795, + 0.469, + 0.901 + ], + "angle": 0, + "content": "where \\(\\left[ \\begin{array}{c} \\boldsymbol{x}_{ij} \\\\ z_{ij} \\end{array} \\right] = P_i U_j\\). Here \\(\\boldsymbol{x}_{ij}\\) is a 2 vector, \\(z_{ij}\\) is a number, referred to as the projective depth, \\(P_i\\) is a \\(3 \\times 4\\) camera matrix and \\(U_i\\) is a \\(4 \\times 1\\) vector containing homogeneous coordinates of the projected 3D point. Under the assumption of Gaussian image noise, this gives the maximal likelihood estimate of the camera matrices and 3D points [12]." + }, + { + "type": "image", + "bbox": [ + 0.541, + 0.089, + 0.735, + 0.2 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.781, + 0.109, + 0.844, + 0.196 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.541, + 0.2, + 0.734, + 0.308 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.777, + 0.216, + 0.846, + 0.305 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.318, + 0.892, + 0.416 + ], + "angle": 0, + "content": "Figure 2. Left: Objective values of the reprojection error (blue), the pOSE error \\((\\eta = 0.1, \\text{red})\\) and our proposed formulation \\((\\eta = 0.1, \\text{yellow})\\) on the lines \\((1 - t)(0.5, 0, -1) + t(0.5, 0, 1)\\) (top) and \\((1 - t)(-0.5, 0, -1) + t(0.5, 0, 1)\\) (bottom) when \\(m = (0.5, 0)\\). Note that the reprojection error is undefined at \\(z = 0\\) since this corresponds to the camera center. Right: Corresponding camera and sampling line." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.442, + 0.892, + 0.822 + ], + "angle": 0, + "content": "It is well known that optimizing (1) is difficult and requires good initialization to achieve convergence to the right solution. One of the difficulties is the division of \\( \\boldsymbol{x}_{ij} \\) by \\( z_{ij} \\). This creates a barrier of objective values that goes to infinity and needs to be traversed when for example moving from \\( (\\boldsymbol{x}_{ij}, -z_{ij}) \\) to \\( (\\boldsymbol{x}_{ij}, z_{ij}) \\). The blue curve of Figure 2 (top) shows a 2D example of this barrier. Here we used \\( m = (0.5, 0) \\) and sampled the function \\( \\left( m - \\frac{x}{z} \\right)^2 \\) on the line segment \\( (\\boldsymbol{x}, z) = (1 - t)(0.5, 0, -1) + t(0.5, 0, 1) \\). The best value over this line is at \\( t = 1 \\) which gives \\( (\\boldsymbol{x}, z) = (0.5, 0, 1) \\). For comparison, we also plot the corresponding values of the pOSE model [15] (red) and the proposed formulation that we will describe below (yellow). In a calibrated setting the interpretation of \\( z_{ij} \\) is the depth [12] of the observed 3D point. Hence, in practical cases, where observed points are in front of the camera, there is usually no reason to allow solutions with negative \\( z_{ij} \\). In the uncalibrated case \\( z_{ij} \\) is referred to as a projective depth. It can be shown that when the data is noise free (with sufficiently many visible projections) there is always a solution where the projective depths are all positive [26] if the observed points are in front of the camera. Moreover, any other solution is projectively equivalent to this one, meaning that there is a projective 3D transformation that makes the projective depths positive [25, 26]." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.832, + 0.671, + 0.848 + ], + "angle": 0, + "content": "2.2. The pOSE Model" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.856, + 0.892, + 0.901 + ], + "angle": 0, + "content": "In view of the above, constraining the problem to positive depths is no practical restriction. Still finding a good starting solution where all depths are positive is not a trivial" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "8960" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.122 + ], + "angle": 0, + "content": "issue. In [15] the objective (1) is exchanged for the object space error (OSE)" + }, + { + "type": "equation", + "bbox": [ + 0.155, + 0.127, + 0.47, + 0.145 + ], + "angle": 0, + "content": "\\[\n\\ell_ {\\text {O S E}} \\left(\\boldsymbol {x} _ {i j}, z _ {i j}\\right) = \\left\\| z _ {i j} \\boldsymbol {m} _ {i j} - \\boldsymbol {x} _ {i j} \\right\\| ^ {2}. \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.151, + 0.471, + 0.29 + ], + "angle": 0, + "content": "Here, the scale-invariant residual of (1) has been replaced with a linear error allowing points to switch from negative to positive projective depths. It can be shown [18] that the OSE residual \\( z_{i} \\pmb{m}_{ij} - \\pmb{x}_{ij} \\) is the first order Taylor expansion of the projective residual \\( \\pmb{m}_{ij} - \\frac{\\pmb{x}_{ij}}{z_{ij}} \\) around \\( (\\pmb{x}_{ij}, z_{ij}) = (\\pmb{m}_{ij}, 1) \\), and it is therefore in some sense the closest linear approximation that we can find. On the downside, the OSE is clearly minimized by the trivial solution \\( \\pmb{x}_{ij} = 0 \\), \\( z_{ij} = 0 \\) for all \\( i, j \\). Therefore [15] adds the quadratic regularization" + }, + { + "type": "equation", + "bbox": [ + 0.182, + 0.303, + 0.469, + 0.32 + ], + "angle": 0, + "content": "\\[\n\\ell_ {\\text {a f f}} \\left(\\boldsymbol {x} _ {i j}\\right) = \\left\\| \\boldsymbol {x} _ {i j} - \\boldsymbol {m} _ {i j} \\right\\| ^ {2}, \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.324, + 0.469, + 0.369 + ], + "angle": 0, + "content": "which penalizes the trivial zero solution. Note that (3) and (2) both vanish when \\((\\pmb{x}_{ij},z_{ij}) = (\\pmb{m}_{ij},1)\\). The proposed pOSE objective" + }, + { + "type": "equation", + "bbox": [ + 0.135, + 0.375, + 0.469, + 0.408 + ], + "angle": 0, + "content": "\\[\n\\sum_ {i j} \\left((1 - \\eta) \\ell_ {\\text {O S E}} \\left(\\boldsymbol {x} _ {i j}, z _ {i j}\\right) + \\eta \\ell_ {\\text {a f f}} \\left(\\boldsymbol {x} _ {i j}\\right)\\right), \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.412, + 0.47, + 0.639 + ], + "angle": 0, + "content": "where \\(0 < \\eta < 1\\), therefore allows arbitrary starting solutions but penalizes projective depths that deviate significantly from 1. The red curve of Figure 2 (top) shows pOSE values (with \\(\\eta = 0.1\\)) over the line \\((1 - t)(0.5, 0, -1) + t(0.5, 0, 1)\\). In contrast to the reprojection error, the pOSE formulation does not give any barrier at \\(z = 0\\). It is experimentally shown in [15] that when optimized using VarPro [16] this leads to a method that converges to the right solution in the vast majority of cases starting from random initialization (including starting points with negative depths). Note that if we column-stack the camera matrices \\(P_{i}\\) into a matrix \\(P\\) with 4 columns, and similarly row-stack the 3D points into a matrix \\(U\\) with 4 rows, the resulting product \\(X = PU\\) is a matrix of rank 4. We can therefore formulate the pOSE objective as a low-rank recovery problem" + }, + { + "type": "equation", + "bbox": [ + 0.19, + 0.644, + 0.469, + 0.669 + ], + "angle": 0, + "content": "\\[\n\\min _ {\\operatorname {r a n k} (X) = 4} \\| \\mathcal {A} (X) - b \\| ^ {2}, \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.674, + 0.471, + 0.901 + ], + "angle": 0, + "content": "where \\(\\mathcal{A}\\) is a linear operator. It is well known from compressed sensing that such formulations can often be solved optimally [6, 9-11, 19, 28, 30, 33]. The optimization problem becomes particularly easy for large values of \\(\\eta\\). On the other hand, the regularization term also introduces an undesirable penalty for large (positive) depths which may constrain the range of feasible depths too much to achieve satisfactory solutions. The bottom images in Figure 2 show the same evaluation as the top ones but over the line \\((1 - t)(-0.5, 0, -1) + t(0.5, 0, 1)\\). All of the points on this line give 0 reprojection error (except at the camera center \\((0, 0, 0)\\) for which the projection is undefined). The pOSE formulation (red curve) clearly penalizes solutions of small or negative projective depth but its undesirable growth for large positive values is also visible." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.091, + 0.747, + 0.108 + ], + "angle": 0, + "content": "2.3. Exponential Regularization" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.114, + 0.892, + 0.16 + ], + "angle": 0, + "content": "In this paper, we instead propose to regularize the depth using an exponential function (yellow curves in Figure 2). Specifically, we replace the affine term (3) with" + }, + { + "type": "equation", + "bbox": [ + 0.58, + 0.169, + 0.892, + 0.206 + ], + "angle": 0, + "content": "\\[\n\\ell_ {\\exp} \\left(\\boldsymbol {x} _ {i j}, z _ {i j}\\right) = e ^ {- \\left(\\frac {\\boldsymbol {m} _ {i j} \\boldsymbol {x} _ {i j} + z _ {i j}}{\\sqrt {\\| \\boldsymbol {m} _ {i j} \\| ^ {2} + 1}}\\right)}. \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.215, + 0.892, + 0.373 + ], + "angle": 0, + "content": "The term \\(\\frac{m_{ij}x_{ij} + z_{ij}}{\\sqrt{\\|m_{ij}\\|^2 + 1}}\\) is the length (with sign) of the projection of the vector \\((\\pmb{x}_{ij},z_{ij})\\) onto \\((m_{ij},1)\\). Note that its sign is negative when the angle between \\((\\pmb{x}_{ij},z_{ij})\\) and \\((m_{ij},1)\\) is larger than \\(90^{\\circ}\\). The exponential function will penalize such values heavily. Still, the penalty is finite for all values making it is possible to use start the optimization from anywhere. On the other hand for positive growing values the exponential function tends to 0 and therefore does not restrict the feasible projective depths as the affine term (3) does." + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.374, + 0.783, + 0.389 + ], + "angle": 0, + "content": "The proposed expOSE objective is then" + }, + { + "type": "equation", + "bbox": [ + 0.509, + 0.4, + 0.892, + 0.432 + ], + "angle": 0, + "content": "\\[\n\\ell_ {\\text {e x p O S E}} = \\sum_ {i j} (1 - \\eta) \\ell_ {\\text {O S E}} \\left(\\boldsymbol {x} _ {i j}, z _ {i j}\\right) + \\eta \\ell_ {\\text {e x p}} \\left(\\boldsymbol {x} _ {i j}, z _ {i j}\\right). \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.442, + 0.892, + 0.578 + ], + "angle": 0, + "content": "At first glance it may seem as if replacing (3) with (6) will yield an ill-posed problem since large depths are hardly penalized by (6). Adding a small penalty for these values to ensure a well-posed problem may therefore be warranted. Note, however, that unless there is an exact solution (with zero reprojection errors) the OSE term is not scale invariant but has a weak shrinking bias. In practice, we empirically observe that this bias is generally enough for our proposed algorithm to converge well from random starting solutions." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.579, + 0.892, + 0.715 + ], + "angle": 0, + "content": "We conclude this section by noting that our proposed method is much less sensitive to parameter selection than the original pOSE model [15]. Since the shrinking bias of the OSE term is relatively weak, an increased regularization cost, due to a change of parameters, can often be compensated for by changing the scale of the reconstruction. In contrast, the choice of \\(\\eta\\) in the original pOSE model is crucial. Figure 1 shows how \\(\\eta\\) affects the reconstruction (more details about this figure are provided in Section 3.2)." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.728, + 0.744, + 0.744 + ], + "angle": 0, + "content": "3. Optimization with VarPro" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.753, + 0.892, + 0.829 + ], + "angle": 0, + "content": "One of the main benefits of the pOSE formulation [15] is that it is quadratic in the elements of \\( X \\). Therefore, given values for camera matrices \\( P \\) the optimal 3D points \\( U^{*}(P) \\) can be computed in closed form using a pseudo inverse. The VarPro method [16,27,35,41] solves the reduced problem" + }, + { + "type": "equation", + "bbox": [ + 0.605, + 0.837, + 0.891, + 0.862 + ], + "angle": 0, + "content": "\\[\n\\min _ {P} \\| \\mathcal {A} (P U ^ {*} (P)) - b \\| ^ {2}, \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.871, + 0.892, + 0.901 + ], + "angle": 0, + "content": "using the Levenberg-Marquardt method [12, 39]. In contrast to standard Gauss-Newton type methods that optimize" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.514, + 0.957 + ], + "angle": 0, + "content": "8961" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.154, + 0.09, + 0.393, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.079, + 0.236, + 0.466, + 0.251 + ], + "angle": 0, + "content": "Figure 3. The exponential function and its Taylor approximation." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.277, + 0.468, + 0.367 + ], + "angle": 0, + "content": "locally over both \\( U \\) and \\( P \\), the main benefit of the elimination of \\( U \\) is that dampening only needs to be applied to \\( P \\). This has been shown empirically to greatly improve convergence [14, 16]. The intuition is that small changes in \\( P \\) will sometimes result in large changes in \\( U \\), but this is prevented by a dampening term which causes the algorithm to stall." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.369, + 0.469, + 0.446 + ], + "angle": 0, + "content": "Since the exponential regularization term is not quadratic VarPro is not directly applicable to our formulation. We, therefore, employ an iterative approach that locally approximates (6) with a quadratic function. Consider the 2nd order Taylor expansion of \\( e^{-\\boldsymbol{a}^T \\boldsymbol{y}} \\) at a point \\( \\bar{\\boldsymbol{y}} \\) given by" + }, + { + "type": "equation", + "bbox": [ + 0.084, + 0.456, + 0.469, + 0.502 + ], + "angle": 0, + "content": "\\[\ne ^ {- \\boldsymbol {a} ^ {T} \\boldsymbol {y}} \\approx e ^ {- \\boldsymbol {a} ^ {T} \\bar {\\boldsymbol {y}}} \\left(1 - \\boldsymbol {a} ^ {T} (\\boldsymbol {y} - \\bar {\\boldsymbol {y}}) + \\frac {1}{2} \\left(\\boldsymbol {a} ^ {T} (\\boldsymbol {y} - \\bar {\\boldsymbol {y}})\\right) ^ {2}\\right). \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.502, + 0.348, + 0.517 + ], + "angle": 0, + "content": "Completing squares gives the expression" + }, + { + "type": "equation", + "bbox": [ + 0.102, + 0.527, + 0.469, + 0.561 + ], + "angle": 0, + "content": "\\[\ne ^ {- \\boldsymbol {a} ^ {T} \\boldsymbol {y}} \\approx \\frac {e ^ {- \\boldsymbol {a} ^ {T} \\bar {\\boldsymbol {y}}}}{2} \\left(\\boldsymbol {a} ^ {T} (\\boldsymbol {y} - \\bar {\\boldsymbol {y}}) - 1\\right) ^ {2} + e ^ {- \\boldsymbol {a} ^ {T} \\bar {\\boldsymbol {y}}}. \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.571, + 0.469, + 0.679 + ], + "angle": 0, + "content": "Note that when minimizing with respect to \\(\\pmb{y}\\) the last term is constant and can be ignored. Since the exponential function is positive the result is a weighted linear least squares term in the unknown \\(\\pmb{y}\\). With \\(\\pmb{y} = \\begin{bmatrix} \\pmb{x}_{ij} \\\\ z_{ij} \\end{bmatrix}\\) and \\(\\pmb{a} = \\frac{1}{\\sqrt{\\|\\pmb{m}_{ij}\\|^2 + 1}} \\begin{bmatrix} \\pmb{m}_{ij} \\\\ 1 \\end{bmatrix}\\) we get our approximation" + }, + { + "type": "equation", + "bbox": [ + 0.077, + 0.689, + 0.474, + 0.745 + ], + "angle": 0, + "content": "\\[\n\\tilde {\\ell} _ {\\exp} \\left(\\boldsymbol {x} _ {i j}, z _ {i j}\\right) \\approx \\frac {\\ell_ {\\exp} \\left(\\bar {\\boldsymbol {x}} _ {i j} , \\bar {z} _ {i j}\\right)}{2} \\left(\\frac {\\boldsymbol {m} _ {i j} ^ {T} \\Delta \\boldsymbol {x} _ {i j} + \\Delta z _ {i j}}{\\sqrt {\\| \\boldsymbol {m} _ {i j} \\| ^ {2} + 1}} - 1\\right) ^ {2}, \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.746, + 0.469, + 0.821 + ], + "angle": 0, + "content": "where \\(\\Delta \\pmb{x}_{ij} = \\pmb{x}_{ij} - \\bar{\\pmb{x}}_{ij}\\) and \\(\\Delta z_{ij} = z_{ij} - \\bar{z}_{ij}\\). To the left in Figure 3 we show \\(e^{-ay}\\) with \\(a = 1\\) (blue curve), and the Taylor approximation at \\(\\bar{y} = 0\\) (orange dashed curve). In the supplementary material, we compare level sets of the expOSE objective, its approximation, and pOSE." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.83, + 0.265, + 0.847 + ], + "angle": 0, + "content": "3.1. The EXPose Model" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.854, + 0.469, + 0.901 + ], + "angle": 0, + "content": "Replacing the exponential regularization in (7) with the quadratic approximation (11) at \\(\\bar{y}_{ij}\\) results in a quadratic loss that can be written as \\(\\| \\mathcal{A}(PU) - b\\|^2\\), which can be" + }, + { + "type": "code_caption", + "bbox": [ + 0.51, + 0.095, + 0.812, + 0.11 + ], + "angle": 0, + "content": "Algorithm 1: VarPro for solving expOSE (7)" + }, + { + "type": "algorithm", + "bbox": [ + 0.508, + 0.112, + 0.867, + 0.553 + ], + "angle": 0, + "content": "Normalize image measurements by removing the mean and dividing by 3 standard deviations; \nSelect the inputs \\(\\eta\\) , and randomly initialize elements of \\(P\\) from a normal distribution of unit std ; \nSet \\(\\bar{y}_{ij} = [m_{ij}^T,1]^T\\) . \nSet up A and b by approximating the exponential regularization by a quadratic form around each \\(\\bar{y}_{ij}\\) . \nCompute U by minimizing (7) with \\(P\\) fixed; \nSet do update \\(= 0\\) if scheduling update of regularization is considered, otherwise do update \\(= 1\\) .. \nwhile true do \nCompute the Jacobians \\(J_{P} = A(U^{T}\\otimes \\mathcal{I})\\colon J_{U} = A(\\mathcal{I}\\otimes P)\\) and the residuals \\(r = \\operatorname {Avec}(PU) - b\\) . Compute \\(P_{\\mathrm{new}}\\) and \\(U_{\\mathrm{new}}\\) from \\(J_P,J_U,\\) and r as \\(P_{\\mathrm{new}} = P + \\Delta P\\) and \\(U_{\\mathrm{new}} = U + \\Delta U\\) ,with \\(\\Delta P = (J_P^T (\\mathcal{I} - J_UJ_U^\\dagger)J_P + \\lambda \\mathcal{I})^{-1}J_P^T r,\\) and \\(\\Delta U = -J_U^\\dagger (r + J_P\\Delta P)\\) . Evaluate the loss \\(\\ell_{\\mathrm{new}}\\) . \nif \\(\\ell_{\\mathrm{new}} < \\ell_{\\mathrm{best}}\\) then \\(\\ell_{\\mathrm{best}} = \\ell_{\\mathrm{new}}\\) . \\(P\\gets P_{\\mathrm{new}}\\) ; and \\(U\\gets U_{\\mathrm{new}}\\) . if do update then Set \\(\\bar{y}_{ij} = P_iU_j\\) . Set up A and b by approximating the regularization by a quadratic form around each \\(\\bar{y}_{ij}\\) end \nend \nif stopping criterion then if do update then break; else do update \\(= 1\\) end \nend" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.592, + 0.892, + 0.669 + ], + "angle": 0, + "content": "optimized using VarPro as described in Algorithm 1. The linear operator \\(\\mathcal{A}\\) and the vector \\(b\\) can be computed in each iteration based on the image measurements \\(\\pmb{m}_{ij}\\), the current estimations \\(\\bar{\\pmb{y}}_{ij}\\) and \\(\\eta\\). For the initial approximation of the regularization, we use \\(\\bar{\\pmb{y}}_{ij} = (\\pmb{m}_{ij},1)\\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.671, + 0.892, + 0.792 + ], + "angle": 0, + "content": "Regularization update scheduling: In order to improve the convergence of the algorithm, we propose to keep the initial quadratic approximation of the regularization (11) either for a fixed number of iterations or until convergence of the initial approximation. This delays the approximation of the exponential regularization in each iteration until a stable initial solution with positive depths is found. In Section 3.2 we show empirically the advantage of doing so." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.796, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Data normalization: Since our regularization term is geometrically motivated and our approach replaces reprojection error with OSE it is important to use normalization of the image data to achieve a well-conditioned formulation [13]. Here we follow standard approaches: We first subtract the image center from all image points, then divide them with the resulting standard deviation over the image." + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "8962" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.091, + 0.087, + 0.466, + 0.258 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.087, + 0.882, + 0.257 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.264, + 0.895, + 0.294 + ], + "angle": 0, + "content": "Figure 4. Comparison of convergence rate and normalized 3D error of different methods on the Dino (a) and Fountain (b) datasets. The metrics are obtained by running 100 instances starting from random initializations. In dashed we should the metrics for the pOSE baseline." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.318, + 0.386, + 0.335 + ], + "angle": 0, + "content": "3.2. Performance evaluation of expOSE" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.344, + 0.47, + 0.48 + ], + "angle": 0, + "content": "Before presenting our model for radial distortion we evaluate the effects of using exponential regularization with the standard OSE. We use the Dino (Small) [3] (36 cameras, 319 points, \\(77\\%\\) missing data) and Fountain [34] (11 cameras, 1167 points, \\(23\\%\\) missing data) datasets to evaluate the performance of expOSE with varying parameters - the weight \\(\\eta\\) and scheduling of regularization update-, and optimization strategies - VarPro, Levenberg-Marquardt (LM), and Alternating Minimization (AltMin) [7]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.483, + 0.47, + 0.668 + ], + "angle": 0, + "content": "The metrics used for the comparisons are convergence rate of the algorithm and relative 3D error to GT. The convergence rate is calculated by counting the number of times the algorithms converged to the lowest loss over 100 problem instances starting from random initializations (a threshold of \\(2\\%\\) above the smallest loss value is used). The 3D error is computed as \\(e_{3D} = \\frac{\\|U' - U_{\\mathrm{GT}}\\|}{\\|U_{\\mathrm{GT}}\\|_F}\\) where \\(U'\\) is the result of performing projective registration of the factor \\(U\\) to the ground-truth point cloud \\(U_{\\mathrm{GT}}\\). In this way, we are able to measure the quality of the factors \\(U\\) that are outputted by each method. For a fair comparison, we compute the 3D errors for solutions that converged to the desired optimum." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.669, + 0.47, + 0.761 + ], + "angle": 0, + "content": "The methods are implemented in MATLAB, and we let each method perform a maximum of 500 iterations. For the case of regularization update scheduling, which we call \\(\\exp\\mathrm{OSE}(\\mathrm{S})\\), we delay the update of the regularization quadratic approximation by 250 iterations or until the initial optimization converges - whichever occurs first." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.763, + 0.47, + 0.884 + ], + "angle": 0, + "content": "Effect of \\(\\eta\\) and scheduling: The performance of expOSE is evaluated for multiple values of \\(\\eta\\) ranging from \\(10^{-4}\\) to 0.5. The results are plotted in Figure 4. We show that expOSE is significantly more robust to \\(\\eta\\) than pOSE in terms of 3D errors (see also Figure 1). We also show that delaying the update of the quadratic approximation of the regularization results in a significant boost in convergence rate, allowing us to achieve rates similar to pOSE." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.886, + 0.47, + 0.902 + ], + "angle": 0, + "content": "Comparison with other optimization strategies: We" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.318, + 0.895, + 0.396 + ], + "angle": 0, + "content": "compare the performance of expOSE (with and without scheduling) when using VarPro, LM and AltMin. The results confirm that, just like with pOSE, VarPro is the most reliable method for expOSE, while LM and AltMin achieve poor convergence rates." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.411, + 0.792, + 0.427 + ], + "angle": 0, + "content": "4. Robustness to Radial Distortion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.438, + 0.895, + 0.665 + ], + "angle": 0, + "content": "In the previous sections, we considered modifications to the original pOSE model which assumes a regular pinhole camera. In [18] the RpOSE model which instead uses a radial camera [20, 21, 23, 24, 29, 37, 40] is presented. This model is invariant to radial distortion which the standard pOSE model does not handle. We note however that the radial model requires more data for parameter estimation since it essentially only measures errors in one direction of the image. To address this issue we introduce an intermediate model by decomposing the reprojection error into a tangential and a radial component. By down-weighting the tangential error we obtain a model that is more robust to radial distortion than the pinhole camera but less sensitive to missing data than the radial camera. We then introduce an exponential regularization term for this model." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.676, + 0.855, + 0.693 + ], + "angle": 0, + "content": "4.1. Decoupling Tangential and Radial Errors" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.701, + 0.892, + 0.762 + ], + "angle": 0, + "content": "When working with the radial camera model it is typically assumed that the principal point and the distortion center are the center of the image and have coordinates \\((0,0)\\). We make the same assumption here." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.762, + 0.893, + 0.901 + ], + "angle": 0, + "content": "The reprojection error is obtained by taking the length of the error vector \\( e(x, z) = \\frac{x}{z} - m \\). The coordinates of this vector are given in w.r.t. the canonical image basis (1,0) and (0,1) of the image and can be interpreted as errors in the \\( x \\)- and \\( y \\)-directions respectively. For a point \\( m \\) we are interested in measuring the error in the radial direction \\( \\frac{m}{\\|m\\|} \\) and the tangential direction \\( \\frac{m_{\\perp}}{\\|m\\|} \\), where \\( m_{\\perp} \\) is the orthogonal vector to \\( m \\) (see Figure 5). We, therefore, write the error vector as a linear combination of these. It is not diffi" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "8963" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.18, + 0.095, + 0.372, + 0.236 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.077, + 0.245, + 0.47, + 0.274 + ], + "angle": 0, + "content": "Figure 5. Levelsets (red ellipses) of \\(\\ell_{\\mathrm{wose}}\\) for \\(\\alpha = 0.1\\) and 0.9. Here \\(m = (0.6, 0.9)\\) and \\(z = 1\\)." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.3, + 0.197, + 0.314 + ], + "angle": 0, + "content": "cult to verify that" + }, + { + "type": "equation", + "bbox": [ + 0.086, + 0.323, + 0.47, + 0.36 + ], + "angle": 0, + "content": "\\[\n\\frac {\\boldsymbol {x}}{z} - \\boldsymbol {m} = \\left(\\frac {\\boldsymbol {m} ^ {T} \\boldsymbol {x}}{z \\| \\boldsymbol {m} \\|} - \\| \\boldsymbol {m} \\|\\right) \\frac {\\boldsymbol {m}}{\\| \\boldsymbol {m} \\|} + \\frac {\\boldsymbol {m} _ {\\perp} ^ {T} \\boldsymbol {x}}{\\| \\boldsymbol {m} \\| z} \\frac {\\boldsymbol {m} _ {\\perp}}{\\| \\boldsymbol {m} \\|}. \\tag {12}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.367, + 0.45, + 0.387 + ], + "angle": 0, + "content": "In the basis \\(\\frac{m}{\\|m\\|}\\), \\(\\frac{m_{\\perp}}{\\|m\\|}\\) the error vector can be written as" + }, + { + "type": "equation", + "bbox": [ + 0.142, + 0.395, + 0.47, + 0.43 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {e} (\\boldsymbol {x}, z) = \\frac {1}{\\| \\boldsymbol {m} \\|} \\left[ \\begin{array}{l} \\boldsymbol {m} ^ {T} \\\\ \\boldsymbol {m} _ {\\perp} ^ {T} \\end{array} \\right] \\frac {\\boldsymbol {x}}{z} - \\binom {\\| \\boldsymbol {m} \\|} {0}. \\tag {13}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.438, + 0.469, + 0.513 + ], + "angle": 0, + "content": "Independently of the basis chosen, the reprojection error is nonlinear due to the division by \\( z \\), making it unsuitable for optimization. The OSE in the new basis is obtained by rescaling the reprojection error \\( e(\\pmb{x},z) \\) by the depth \\( z \\). The expression for OSE error in the new basis is therefore" + }, + { + "type": "equation", + "bbox": [ + 0.085, + 0.522, + 0.47, + 0.56 + ], + "angle": 0, + "content": "\\[\n\\left\\| z e (\\boldsymbol {x}, z) \\right\\| ^ {2} = \\left(\\frac {\\boldsymbol {m} ^ {T}}{\\| \\boldsymbol {m} \\|} \\boldsymbol {x} - \\| \\boldsymbol {m} \\| z\\right) ^ {2} + \\left(\\frac {\\boldsymbol {m} _ {\\perp} ^ {T}}{\\| \\boldsymbol {m} \\|} \\boldsymbol {x}\\right) ^ {2}. \\tag {14}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.577, + 0.388, + 0.594 + ], + "angle": 0, + "content": "4.2. Reweighting the Error Components" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.601, + 0.469, + 0.631 + ], + "angle": 0, + "content": "Radial distortion is usually modeled by modifying the projection according to" + }, + { + "type": "equation", + "bbox": [ + 0.22, + 0.64, + 0.469, + 0.667 + ], + "angle": 0, + "content": "\\[\n\\kappa_ {r} (\\boldsymbol {m}) \\boldsymbol {m} = \\frac {\\boldsymbol {x}}{z} \\tag {15}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.675, + 0.47, + 0.9 + ], + "angle": 0, + "content": "where \\(\\kappa_{r}\\) is a scalar that depends on the distance to the distortion center. It is clear that the second term of (14) vanishes when inserting \\((\\pmb{x},z)\\) fulfilling (15) for any \\(\\kappa_{r}\\), but not the first term. To handle radial distortion we could incorporate the additional parameter \\(\\kappa_{r}\\) in (14) and explicitly estimate it. Unfortunately, this results in a more complex model (with trilinear interactions) making optimization difficult. Alternatively, to achieve robustness to radial distortion we can remove the first term, as in [18]. The downside of doing this is that it removes roughly half of the data (one out of two coordinates for each projection) available for use in inference. Therefore we here propose to compensate for the unknown radial distortion by down-weighting the first term or equivalently allowing a larger standard deviation in the radial direction." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.091, + 0.892, + 0.198 + ], + "angle": 0, + "content": "Let \\(\\sigma_r^2\\) and \\(\\sigma_t^2\\) denote the uncertainties of the reprojection error \\(\\epsilon = s\\pmb{x} / z - \\pmb{m}\\) along the radial and tangential direction, respectively, and where \\(s\\) is an unknown positive scalar that models radial distortion effects and focal length scaling. Assuming the reprojection error \\(\\epsilon\\) is sampled from a 2D normal distribution \\(\\mathcal{N}(0,\\Sigma)\\), the probability of the model \\(\\{\\pmb{x},z\\}\\) given \\(\\pmb{m}\\) is" + }, + { + "type": "equation", + "bbox": [ + 0.503, + 0.206, + 0.892, + 0.248 + ], + "angle": 0, + "content": "\\[\nP (\\boldsymbol {x}, z \\mid \\boldsymbol {m}) = \\frac {1}{2 \\pi \\det (\\Sigma) ^ {1 / 2}} e ^ {- s ^ {2} \\left(\\frac {1}{s} \\boldsymbol {m} - \\boldsymbol {x} / z\\right) ^ {T} \\Sigma^ {- 1} \\left(\\frac {1}{s} \\boldsymbol {m} - \\boldsymbol {x} / z\\right)}. \\tag {16}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.25, + 0.892, + 0.28 + ], + "angle": 0, + "content": "Maximizing the likelihood (16) w.r.t. \\(\\{x,z\\}\\) is equivalent to minimizing" + }, + { + "type": "equation", + "bbox": [ + 0.527, + 0.287, + 0.892, + 0.326 + ], + "angle": 0, + "content": "\\[\n\\frac {s ^ {2}}{\\sigma_ {r} ^ {2}} \\left(\\frac {\\boldsymbol {m} ^ {T}}{\\| \\boldsymbol {m} \\|} \\frac {\\boldsymbol {x}}{z} - \\frac {1}{s} \\| \\boldsymbol {m} \\|\\right) ^ {2} + \\frac {s ^ {2}}{\\sigma_ {t} ^ {2}} \\left(\\frac {\\boldsymbol {m} _ {\\perp} ^ {T}}{\\| \\boldsymbol {m} \\|} \\frac {\\boldsymbol {x}}{z}\\right) ^ {2}, \\tag {17}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.333, + 0.892, + 0.424 + ], + "angle": 0, + "content": "where \\(\\Sigma = R^T\\mathrm{diag}(\\sigma_r^2,\\sigma_t^2)R\\) and \\(R\\) is a rotation matrix that aligns the coordinate axis with \\(\\boldsymbol {m} / \\| \\boldsymbol {m}\\|\\) and \\(\\boldsymbol {m}_{\\perp} / \\| \\boldsymbol {m}\\|\\). While the second term quadratic term of (17) is not affected by \\(s\\), in the first term \\(\\| \\boldsymbol {m}\\|\\) is weighted by \\(1 / s\\), which is undesirable as previously motivated. We propose to approximate (17) by" + }, + { + "type": "equation", + "bbox": [ + 0.522, + 0.431, + 0.892, + 0.489 + ], + "angle": 0, + "content": "\\[\n\\underbrace {\\frac {1}{\\sigma_ {r} ^ {2}}} _ {(1 - \\alpha)} \\left(\\frac {\\boldsymbol {m} ^ {T}}{\\| \\boldsymbol {m} \\|} \\frac {\\boldsymbol {x}}{z} - \\| \\boldsymbol {m} \\|\\right) ^ {2} + \\underbrace {\\frac {1}{\\sigma_ {t} ^ {2}}} _ {\\alpha} \\left(\\frac {\\boldsymbol {m} _ {\\perp} ^ {T}}{\\| \\boldsymbol {m} \\|} \\frac {\\boldsymbol {x}}{z}\\right) ^ {2}. \\tag {18}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.497, + 0.892, + 0.649 + ], + "angle": 0, + "content": "This approximation of the first term adds a bias to the obtained solution based on the unknown shift \\(\\left(\\frac{1}{s} - 1\\right)\\| \\boldsymbol{m}\\|\\). We regulate the effect of this bias - and thus the robustness to radial distortion - by controlling the relative weight of the first quadratic term (biased) versus the second quadratic term (unbiased) through the value of \\(\\alpha \\in [0,1]\\). For the extreme case of \\(\\alpha = 1\\) the radial component of the error is completely dropped resulting in the loss presented in [18]. Linear residuals can be obtained by replacing (18) with its component-weighted OSE counterpart" + }, + { + "type": "equation", + "bbox": [ + 0.506, + 0.655, + 0.892, + 0.704 + ], + "angle": 0, + "content": "\\[\n\\ell_ {\\mathrm {w O S E}} = (1 - \\alpha) \\left(\\frac {\\boldsymbol {m} ^ {T}}{\\| \\boldsymbol {m} \\|} \\boldsymbol {x} - \\| \\boldsymbol {m} \\| z\\right) ^ {2} + \\alpha \\left(\\frac {\\boldsymbol {m} _ {\\perp} ^ {T}}{\\| \\boldsymbol {m} \\|} \\boldsymbol {x}\\right) ^ {2}. \\tag {19}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.705, + 0.892, + 0.734 + ], + "angle": 0, + "content": "Figure 5 shows an example of level sets (in the image plane \\( z = 1 \\)) for \\( \\alpha = 0.1 \\) and 0.9." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.735, + 0.892, + 0.9 + ], + "angle": 0, + "content": "Note that the same approach can be used to handle unknown focal lengths. If we assume that the intrinsic calibration matrix of the camera is \\(\\mathbf{K} = \\mathrm{diag}(f, f, 1)\\), the relation between the reprojected point and the image measurement is \\(\\frac{\\kappa_r}{f} \\mathbf{m} = \\frac{\\mathbf{x}}{z}\\) and therefore the re-weighted formulation can be applied to this setting as well. An unknown/varying focal length \\(f\\) is however modeled by the standard pOSE model in contrast to \\(\\kappa_r\\) which depends on the distance between the projection and the principal point and thus cannot be included in a factorization algorithm without adding extra variables." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "8964" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.077, + 0.091, + 0.468, + 0.108 + ], + "angle": 0, + "content": "4.3. Regularization for radial distortion invariance" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.115, + 0.47, + 0.286 + ], + "angle": 0, + "content": "Weighting differently the radial and tangential of the OSE does not change, in general, the exponential regularization described in Section 2. However, one must note that for the extreme case \\(\\alpha = 1\\), for a given \\(X = PU\\) the variables in every third row of \\(X\\) and \\(P\\) vanish from the OSE. In other words, decreasing the total loss will always be possible by increasing \\(z\\) through the third row of \\(P\\), and consequently decreasing the \\(e^{-\\frac{z}{\\sqrt{\\|m\\|^2 + 1}}}\\) part of the exponential regularization. To avoid such undesirable behavior, we proposed an alternative exponential regularization for the particular case \\(\\alpha = 1\\) acting only on \\(x\\) and \\(y\\), i.e.," + }, + { + "type": "equation", + "bbox": [ + 0.22, + 0.297, + 0.47, + 0.32 + ], + "angle": 0, + "content": "\\[\n\\ell_ {\\exp} = e ^ {- \\frac {\\boldsymbol {m} ^ {T}}{\\| \\boldsymbol {m} \\|} \\boldsymbol {x}} \\tag {20}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.333, + 0.469, + 0.392 + ], + "angle": 0, + "content": "This alternative regularization enforces the reprojection \\( \\pmb{x} \\) according to the 1D radial camera model \\( \\pmb{m} = \\lambda \\pmb{x} \\) to have positive scale \\( \\lambda > 0 \\), canceling out the shrinking bias of the OSE as in the general case." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.394, + 0.469, + 0.424 + ], + "angle": 0, + "content": "The expOSE loss for weighted radial and tangent components of the OSE can then be approximated as" + }, + { + "type": "equation", + "bbox": [ + 0.085, + 0.436, + 0.469, + 0.468 + ], + "angle": 0, + "content": "\\[\n\\ell_ {\\text {e x p O S E}} = \\sum_ {i j} (1 - \\eta) \\ell_ {\\text {w O S E}} \\left(\\boldsymbol {x} _ {i j}, z _ {i j}\\right) + \\eta \\tilde {\\ell} _ {\\text {e x p}} \\left(\\boldsymbol {x} _ {i j}, z _ {i j}\\right) \\tag {21}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.482, + 0.21, + 0.5 + ], + "angle": 0, + "content": "with \\(\\tilde{\\ell}_{\\mathrm{exp}}\\) defined as" + }, + { + "type": "equation", + "bbox": [ + 0.09, + 0.512, + 0.469, + 0.58 + ], + "angle": 0, + "content": "\\[\n\\left\\{ \\begin{array}{l l} \\frac {\\ell_ {\\exp} \\left(\\bar {x} _ {i j} , \\bar {z} _ {i j}\\right)}{2} \\left(\\frac {\\boldsymbol {m} _ {i j} ^ {T} \\Delta \\boldsymbol {x} _ {i j} + \\Delta z _ {i j}}{\\sqrt {\\| \\boldsymbol {m} _ {i j} \\| ^ {2} + 1}} - 1\\right) ^ {2}, & \\alpha \\in [ 0, 1 [ \\\\ \\frac {\\ell_ {\\exp} (\\bar {x} _ {i j})}{2} \\left(\\frac {\\boldsymbol {m} _ {i j} ^ {T} \\Delta \\boldsymbol {x} _ {i j}}{\\| \\boldsymbol {m} _ {i j} \\|} - 1\\right) ^ {2}, & \\alpha = 1 \\end{array} . \\right. \\tag {22}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.592, + 0.469, + 0.653 + ], + "angle": 0, + "content": "This radial distortion robust version of expOSE can be optimized following Algorithm 1 nonetheless since both the component-weighted OSE and the quadratic approximation of the regularization can still be written as \\(\\| \\mathcal{A}(PU) - b\\|^2\\)." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.667, + 0.429, + 0.685 + ], + "angle": 0, + "content": "5. Outline of Full Reconstruction Pipeline" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.693, + 0.469, + 0.814 + ], + "angle": 0, + "content": "We propose to use expOSE as a solution to uncalibrated and radial distortion invariant Structure-from-Motion. A few Bundle Adjustment steps can be performed for further refinement. The pipeline takes as input 2D image measurements of points tracked along multiple views, just like any other factorization-based SfM pipeline. The proposed radial distortion-invariant pipeline can be decomposed into the following sequential modules:" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.825, + 0.469, + 0.887 + ], + "angle": 0, + "content": "1. expOSE factorization: Given a set of image points tracked along several images, we use Algorithm 1 to obtain estimations of the uncalibrated camera matrix, and the 3D points, up to projective ambiguity." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.892, + 0.209 + ], + "angle": 0, + "content": "2. Radial distortion estimation (and camera matrix completion): Using the solution obtained with expOSE, the distortion parameters and, for \\(\\alpha = 1\\), the third row of the uncalibrated camera matrix are estimated from the equations in (15). Note that by assuming a Brown-Conrady radial distortion model [2] with \\(\\kappa(m) = \\sum_{j} k_{j} \\|m\\|^{2j}\\), for each camera a system of equations of the form" + }, + { + "type": "equation", + "bbox": [ + 0.659, + 0.209, + 0.891, + 0.243 + ], + "angle": 0, + "content": "\\[\nM _ {i} \\left[ \\begin{array}{c} p _ {i} ^ {(3)} \\\\ \\mathbf {k} \\end{array} \\right] = b _ {i} \\tag {23}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.53, + 0.25, + 0.892, + 0.451 + ], + "angle": 0, + "content": "can be obtained, where \\( p_i^{(3)} \\) is the third row of the \\( i \\)th camera matrix, and \\( \\mathbf{k} \\) is a vector of the distortion parameters. Here we use a distortion model with three parameters, \\( k_j, j = 1,\\dots,3 \\). Assuming that the distortion model is constant along all views, the overall system of equations can be written as \\( M[p^{(3)T},\\mathbf{k}^T ]^T = b \\), with \\( p \\) being a \\( 4\\times \\) #views vector with all third rows of the camera matrices. For \\( \\alpha = 1 \\) both \\( p^{(3)} \\) and \\( \\mathbf{k} \\) are unknowns and are estimated in this step. For \\( \\alpha \\neq 1 \\), the system can be simplified to \\( M\\mathbf{k} = b - Mp^{(3)} \\) since \\( p^{(3)} \\) is already estimated by expOSE. If it is assumed that there is no radial distortion and \\( \\alpha \\neq 1 \\), then this step can be completely skipped." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.459, + 0.889, + 0.474 + ], + "angle": 0, + "content": "3. Bundle adjustment: We perform local optimization of" + }, + { + "type": "equation", + "bbox": [ + 0.6, + 0.482, + 0.891, + 0.522 + ], + "angle": 0, + "content": "\\[\n\\sum_ {i j} \\left\\| \\boldsymbol {m} _ {i j} - (1 + \\kappa (\\boldsymbol {m} _ {i j})) \\frac {\\boldsymbol {x} _ {i j}}{z _ {i j}} \\right\\| ^ {2} \\tag {24}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.531, + 0.53, + 0.892, + 0.636 + ], + "angle": 0, + "content": "starting from the estimations of \\( P \\), \\( X \\), and \\( \\mathbf{k} \\) found with the previous steps. The optimization is solved using Levenberg-Marquardt algorithm. If there is no radial distortion then the parameters \\( \\mathbf{k} \\) can be set to zero and kept constant during optimization. For expOSE initialization, we observe that usually only a few steps are needed (5-10 steps)." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.645, + 0.891, + 0.72 + ], + "angle": 0, + "content": "4. Euclidean update: Finally we estimate the projective transformation \\( H \\in \\mathbb{R}^{4 \\times 4} \\) such that the factorization \\( \\{PH, H^{-1}X\\} \\) is a Euclidean reconstruction. This is done by estimating the dual absolute conic as described in [12]." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.727, + 0.635, + 0.743 + ], + "angle": 0, + "content": "5.1. Experiments" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.75, + 0.892, + 0.901 + ], + "angle": 0, + "content": "The performance of the proposed pipeline is evaluated on 3 sequences from [24] with radial distortion: Grossmunster (19 cam., 1874 pts, \\(41\\%\\) missing data), Kirchenge (30 cam., 1158 pts, \\(60\\%\\) missing data), and Munterhof (20 cam., 2108 pts, \\(42\\%\\) missing data). We compare the performance when using either \\(\\exp OSE\\) (\\(\\eta = 0.01\\)), \\(\\mathrm{pOSE}\\), or \\(\\mathrm{RpOSE}\\) (both with \\(\\eta = 0.001\\)) in step 1 of the pipeline. We use \\(\\exp OSE\\) with scheduling for regularization update, as described in Section 3.2. Refinement of the solutions is done by performing up to 50 iterations of BA." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.515, + 0.957 + ], + "angle": 0, + "content": "8965" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.082, + 0.091, + 0.227, + 0.284 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.228, + 0.091, + 0.344, + 0.284 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.346, + 0.091, + 0.468, + 0.284 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.296, + 0.47, + 0.394 + ], + "angle": 0, + "content": "Figure 6. Visualization of reconstructions on the Grossmunster sequence. (Left) An example of one of the images on the sequence. At the bottom, we show a view of the 3D reconstruction of expOSE for \\(\\alpha = 1\\). (Right) Comparison between the top view reconstructions (black) obtained with pOSE, RpOSE and expOSE. In red we show the ground-truth 3D point cloud. All reconstructions shown here were not refined with bundle adjustment." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.426, + 0.47, + 0.653 + ], + "angle": 0, + "content": "The metrics used are convergence rate (similarly to the experiments in Section 2), 2D reprojection error, rotation error, and 3D error. In order to compute the last two, we perform Euclidean registration on the output of the pipeline, i.e. after the Euclidean update, to the ground-truth 3D point cloud. The inverse of that Euclidean transformation is applied to the camera matrices. Rotation error is then computed as \\( e_{\\mathrm{rot}} = \\mathrm{acos}\\left(\\left(\\mathrm{trace}\\left(R_i^{GT}R_i^T\\right) - 1\\right) / 2\\right) \\) and the 3D error as the median of all \\( \\| X_j - X_j^{GT}\\| \\). The values presented in Table 1 correspond to the average over all instances that converged to the desired optimum. The chosen metrics are evaluated at two points of the pipeline: after the radial distortion estimation (step 2), and after the bundle adjustment (step 3). At both stages, a metric update is performed in order to obtain a Euclidean reconstruction." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.656, + 0.468, + 0.867 + ], + "angle": 0, + "content": "The results show that expOSE clearly outperforms both pOSE and RpOSE. The difference in performance is even more evident when looking at the output of the factorizations, where expOSE was able to achieve reprojection errors that almost match the refined solution with BA. Note that in many cases expOSE even got better rotation and 3D errors than its refined counterpart. A visualization for the Grossmunter sequence is shown in Figure 6. It is also possible to notice the impact of using the regularization for radial distortion invariance as described in Section 4.3. For \\(\\alpha = 0.999\\) the method has slow convergence, leading to poor solutions as can be seen by the high rotation and reprojection errors. Additional results for other values of \\(\\alpha\\) and sequences are presented in the supplementary material." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.871, + 0.468, + 0.9 + ], + "angle": 0, + "content": "In practice, as seen in these experiments, we notice that \\(\\alpha = 1\\) achieves the best results for images with radial" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.09, + 0.892, + 0.159 + ], + "angle": 0, + "content": "Table 1. Results on the Grossmunster, Kirchenge, and Munsterhof datasets (over 10 instances). For each method two rows are presented: the first consists of the results for the output of the factorization method; the second of the output of the Bundle Adjustment (+BA). In green, we show the best results for each metric." + }, + { + "type": "table", + "bbox": [ + 0.502, + 0.171, + 0.897, + 0.324 + ], + "angle": 0, + "content": "
GrossmunsterConv. RateRot. [deg]3D [unit]2D [pix]
pOSE+ BA50%148.250.76218.48
50%27.610.2931.50
RpOSE+ BA90%2.240.0822.91
90%0.530.0111.48
ExpOSEα=0.999100%44.740.22741.51
α=0.999+BA100%0.430.0071.48
α=1100%0.180.0041.86
α=1+BA100%0.420.0061.48
" + }, + { + "type": "table", + "bbox": [ + 0.502, + 0.324, + 0.897, + 0.46 + ], + "angle": 0, + "content": "
Kirchenge
pOSE+ BA100%160.386.84414.95
100%0.720.0241.22
RpOSE+ BA90%0.980.0621.94
90%1.060.0311.22
ExpOSEα=0.99960%24.710.02245.28
α=0.999+BA80%1.190.0211.22
α=180%0.510.0261.57
α=1+BA80%2.920.0501.22
" + }, + { + "type": "table", + "bbox": [ + 0.502, + 0.46, + 0.897, + 0.596 + ], + "angle": 0, + "content": "
Munsterhof
pOSE+ BA100%14.010.23012.08
100%0.440.0271.70
RpOSE+ BA60%1.000.07111.96
60%0.440.0271.70
ExpOSEα=0.999100%20.130.02147.71
α=0.999+BA100%0.470.0291.70
α=180%0.120.0133.43
α=1+BA90%0.450.0301.70
" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.621, + 0.892, + 0.697 + ], + "angle": 0, + "content": "distortion. In the supplementary material we provide additional experiments that show the benefit of using values \\(1/2 < \\alpha < 1\\) in particular problem instances where data availability is too low for the stability of a pure radial model (e.g. few viewpoints and/or points per camera available)." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.71, + 0.627, + 0.725 + ], + "angle": 0, + "content": "6. Conclusions" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.735, + 0.892, + 0.9 + ], + "angle": 0, + "content": "In this paper, we propose the use of exponential regularization on projective factorization problems as a way to enforce Cheirality conditions on the reconstruction. Radial distortion robustness is achieved by weighting differently the radial and tangential components of the object space error. We show that the proposed regularization results in higher reconstruction quality (that matches bundle adjustment refined solutions) while keeping the same convergence properties as state-of-the-art factorization methods and being less sensitive to the choice of the weight \\(\\eta\\) of the regularization." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "8966" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.175, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.198 + ], + "angle": 0, + "content": "[1] Srinadh Bhojanapalli, Behnam Neyshabur, and Nati Srebro. Global optimality of local search for low rank matrix recovery. In D. D. Lee, M. Sugiyama, U. V. Luxburg, I. Guyon, and R. Garnett, editors, Advances in Neural Information Processing Systems 29, pages 3873-3881. Curran Associates, Inc., 2016. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.2, + 0.44, + 0.213 + ], + "angle": 0, + "content": "[2] Dean Brown. Decentering distortion of lenses. 1966. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.214, + 0.47, + 0.268 + ], + "angle": 0, + "content": "[3] A. M. Buchanan and A. W. Fitzgibbon. Damped newton algorithms for matrix factorization with missing data. In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2005. 1, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.27, + 0.469, + 0.324 + ], + "angle": 0, + "content": "[4] Alessio Del Bue, João M. F. Xavier, Lourdes Agapito, and Marco Paladini. Bilinear modeling via augmented lagrange multipliers (BALM). IEEE Trans. Pattern Anal. Mach. Intell., 34(8):1496-1508, 2012. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.326, + 0.469, + 0.38 + ], + "angle": 0, + "content": "[5] R. Cabral, F. De la Torre, J. P. Costeira, and A. Bernardino. Unifying nuclear norm and bilinear factorization approaches for low-rank matrix decomposition. In International Conference on Computer Vision (ICCV), 2013. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.382, + 0.469, + 0.424 + ], + "angle": 0, + "content": "[6] Emmanuel J. Candès and Benjamin Recht. Exact matrix completion via convex optimization. Foundations of Computational Mathematics, 9(6):717-772, 2009. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.425, + 0.469, + 0.451 + ], + "angle": 0, + "content": "[7] I. Csiszar and G. Tusnády. Information Geometry and Alternating Minimization Procedures. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.453, + 0.469, + 0.507 + ], + "angle": 0, + "content": "[8] Y. Dai, H. Li, and M. He. Projective multiview structure and motion from element-wise factorization. IEEE Transactions on Pattern Analysis and Machine Intelligence, 35(9):2238-2251, 2013. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.509, + 0.469, + 0.55 + ], + "angle": 0, + "content": "[9] Rong Ge, Chi Jin, and Yi Zheng. No spurious local minima in nonconvex low rank problems: A unified geometric analysis. arXiv preprint, arxiv:1704.00708, 2017. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.551, + 0.469, + 0.592 + ], + "angle": 0, + "content": "[10] Rong Ge, Jason D. Lee, and Tengyu Ma. Matrix completion has no spurious local minimum. In Annual Conference on Neural Information Processing Systems (NIPS), 2016. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.593, + 0.469, + 0.634 + ], + "angle": 0, + "content": "[11] Christian Grussler, Anders Rantzer, and Pontus Giselsson. Low-rank optimization with convex constraints. IEEE Transactions on Automatic Control, 63(11):4000-4007, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.635, + 0.469, + 0.676 + ], + "angle": 0, + "content": "[12] Richard Hartley and Andrew Zisserman. Multiple View Geometry in Computer Vision. Cambridge University Press, USA, 2 edition, 2003. 2, 3, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.677, + 0.469, + 0.717 + ], + "angle": 0, + "content": "[13] Richard I. Hartley. In defense of the eight-point algorithm. IEEE Trans. Pattern Anal. Mach. Intell., 19(6):580-593, 1997. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.72, + 0.469, + 0.773 + ], + "angle": 0, + "content": "[14] Je Hyeong Hong and Andrew Fitzgibbon. Secrets of matrix factorization: Approximations, numerics, manifold optimization and random restarts. In Int. Conf. on Computer Vision, 2015. 1, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.776, + 0.469, + 0.83 + ], + "angle": 0, + "content": "[15] Je Hyeong Hong and Christopher Zach. pose: Pseudo object space error for initialization-free bundle adjustment. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), June 2018. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.832, + 0.469, + 0.898 + ], + "angle": 0, + "content": "[16] J. H. Hong, C. Zach, and A. Fitzgibbon. Revisiting the variable projection method for separable nonlinear least squares problems. In 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 5939-5947, 2017. 1, 3, 4" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.47, + 0.898 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.147 + ], + "angle": 0, + "content": "[17] Je Hyeong Hong, Christopher Zach, Andrew W. Fitzgibbon, and Roberto Cipolla. Projective bundle adjustment from arbitrary initialization using the variable projection method. In European Conf. on Computer Vision, 2016. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.149, + 0.892, + 0.203 + ], + "angle": 0, + "content": "[18] Jose Iglesias and Carl Olsson. Radial distortion invariant factorization for structure from motion. In Proceedings of the IEEE International Conference on Computer Vision, 2021. 1, 2, 3, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.205, + 0.892, + 0.259 + ], + "angle": 0, + "content": "[19] José Pedro Iglesias, Carl Olsson, and Marcus Valtonen Örnhag. Accurate optimization of weighted nuclear norm for non-rigid structure from motion. In European Conference on Computer Vision (ECCV), 2020. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.261, + 0.892, + 0.327 + ], + "angle": 0, + "content": "[20] Jae-Hak Kim, Yuchao Dai, Hongdong li, Xin Du, and Jonghyuk Kim. Multi-view 3d reconstruction from uncalibrated radially-symmetric cameras. In Proceedings of the IEEE International Conference on Computer Vision, pages 1896-1903, 12 2013. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.33, + 0.892, + 0.385 + ], + "angle": 0, + "content": "[21] Z. Kukelova, M. Bujnak, and T. Pajdla. Real-time solution to the absolute pose problem with unknown radial distortion and focal length. In 2013 IEEE International Conference on Computer Vision, pages 2816-2823, 2013. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.386, + 0.892, + 0.453 + ], + "angle": 0, + "content": "[22] Suryansh Kumar. Non-rigid structure from motion: Prior-free factorization method revisited. In IEEE Winter Conference on Applications of Computer Vision, WACV 2020, Snowmass Village, CO, USA, March 1-5, 2020, pages 51-60. IEEE, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.455, + 0.892, + 0.509 + ], + "angle": 0, + "content": "[23] Viktor Larsson, Torsten Sattler, Zuzana Kukelova, and Marc Pollefeys. Revisiting radial distortion absolute pose. In International Conference on Computer Vision (ICCV). IEEE, September 2019. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.511, + 0.892, + 0.566 + ], + "angle": 0, + "content": "[24] Viktor Larsson, Nicolcas Zobernig, Kasim Taskin, and Marc Pellefeys. Calibration-free structure-from-motion with calibrated radial trifocal tensors. In European Conference of Computer Vision, 2020. 5, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.567, + 0.892, + 0.62 + ], + "angle": 0, + "content": "[25] Ludovic Magerand and Alessio Del Bue. Practical projective structure from motion (p2sfm). In 2017 IEEE International Conference on Computer Vision (ICCV), pages 39-47, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.623, + 0.892, + 0.677 + ], + "angle": 0, + "content": "[26] Behrooz Nasihatkon, Richard I. Hartley, and Jochen Trumpf. A generalized projective reconstruction theorem and depth constraints for projective factorization. Int. J. Comput. Vis., 115(2):87-114, 2015. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.679, + 0.892, + 0.733 + ], + "angle": 0, + "content": "[27] Takayuki Okatani and Koichiro Deguchi. On the wiberg algorithm for matrix factorization in the presence of missing components. International Journal of Computer Vision, 72(3):329-337, 2007. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.735, + 0.892, + 0.803 + ], + "angle": 0, + "content": "[28] Carl Olsson, Daniele Gerosa, and Marcus Carlsson. Relaxations for non-separable cardinality/rank penalties. In 2021 IEEE/CVF International Conference on Computer Vision Workshops (ICCVW), IEEE International Conference on Computer Vision Workshops, pages 162-171, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.804, + 0.892, + 0.859 + ], + "angle": 0, + "content": "[29] Carl Olsson, Viktor Larsson, and Fredrik Kahl. A quasiconvex formulation for radial cameras. In 2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 14571-14580, 2021. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.86, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[30] Marcus Valtonen Ornag, Carl Olsson, and Anders Heyden. Bilinear parameterization for differentiable rank-regularization. 2020 IEEE/CVF Conference on Computer" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "8967" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.11, + 0.092, + 0.469, + 0.12 + ], + "angle": 0, + "content": "Vision and Pattern Recognition Workshops (CVPRW), Jun 2020. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.122, + 0.469, + 0.232 + ], + "angle": 0, + "content": "[31] Dohyung Park, Anastasios Kyrillidis, Constantine Carmanis, and Sujay Sanghavi. Non-square matrix sensing without spurious local minima via the Burer-Monteiro approach. In Aarti Singh and Jerry Zhu, editors, Proceedings of the 20th International Conference on Artificial Intelligence and Statistics, volume 54 of Proceedings of Machine Learning Research, pages 65-74, Fort Lauderdale, FL, USA, 20-22 Apr 2017. PMLR. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.234, + 0.469, + 0.275 + ], + "angle": 0, + "content": "[32] Conrad J. Poelman and Takeo Kanade. A parapspective factorization method for shape and motion recovery. IEEE Trans. Pattern Anal. Mach. Intell., 19(3):206-218, 1997. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.277, + 0.469, + 0.331 + ], + "angle": 0, + "content": "[33] Benjamin Recht, Maryam Fazel, and Pablo A. Parrilo. Guaranteed minimum-rank solutions of linear matrix equations via nuclear norm minimization. SIAM Rev., 52(3):471-501, Aug. 2010. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.333, + 0.469, + 0.403 + ], + "angle": 0, + "content": "[34] C. Strecha, W. von Hansen, L. Van Gool, P. Fua, and U. Thoennessen. On benchmarking camera calibration and multi-view stereo for high resolution imagery. In 2008 IEEE Conference on Computer Vision and Pattern Recognition, pages 1-8, 2008. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.404, + 0.469, + 0.459 + ], + "angle": 0, + "content": "[35] D. Strelow, Q. Wang, L. Si, and A. Eriksson. General, nested, and constrained wiberg minimization. IEEE Transactions on Pattern Analysis and Machine Intelligence, 38(9):1803-1815, 2016. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.46, + 0.469, + 0.53 + ], + "angle": 0, + "content": "[36] Peter F. Sturm and Bill Triggs. A factorization based algorithm for multi-image projective structure and motion. In Proceedings of the 4th European Conference on Computer Vision-Volume II - Volume II, ECCV '96, page 709-720, Berlin, Heidelberg, 1996. Springer-Verlag. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.531, + 0.469, + 0.572 + ], + "angle": 0, + "content": "[37] SriRam Thirthala and Marc Pollefeys. Radial multi-focal tensors. International Journal of Computer Vision - IJCV, 96, 06 2012. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.574, + 0.469, + 0.627 + ], + "angle": 0, + "content": "[38] Carlo Tomasi and Takeo Kanade. Shape and motion from image streams under orthography: A factorization method. International Journal of Computer Vision, 9(2):137-154, 1992. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.63, + 0.469, + 0.699 + ], + "angle": 0, + "content": "[39] Bill Triggs, Philip F. McLauchlan, Richard I. Hartley, and Andrew W. Fitzgibbon. Bundle adjustment - a modern synthesis. In Proceedings of the International Workshop on Vision Algorithms: Theory and Practice, ICCV '99, pages 298-372. Springer-Verlag, 2000. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.701, + 0.469, + 0.756 + ], + "angle": 0, + "content": "[40] R. Tsai. A versatile camera calibration technique for high-accuracy 3d machine vision metrology using off-the-shelf tv cameras and lenses. IEEE Journal on Robotics and Automation, 3(4):323-344, August 1987. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.757, + 0.47, + 0.799 + ], + "angle": 0, + "content": "[41] T. Wiberg. Computation of principal components when data are missing. In Proceedings of the Second Symposium of Computational Statistics, page 229-326, 1976. 3" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.799 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "8968" + } + ] +] \ No newline at end of file diff --git a/2023/expOSE_ Accurate Initialization-Free Projective Factorization Using Exponential Regularization/6f578c7a-ccab-49cb-bc75-04e328397fea_origin.pdf b/2023/expOSE_ Accurate Initialization-Free Projective Factorization Using Exponential Regularization/6f578c7a-ccab-49cb-bc75-04e328397fea_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..efbdf97c60d127d2f6dc1df66d1902b61585cd7e --- /dev/null +++ b/2023/expOSE_ Accurate Initialization-Free Projective Factorization Using Exponential Regularization/6f578c7a-ccab-49cb-bc75-04e328397fea_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:39b38d9fbfe3dc352a540feb3c1209029d7d55be7dd7a4c8351e31509fef8653 +size 2105245 diff --git a/2023/expOSE_ Accurate Initialization-Free Projective Factorization Using Exponential Regularization/full.md b/2023/expOSE_ Accurate Initialization-Free Projective Factorization Using Exponential Regularization/full.md new file mode 100644 index 0000000000000000000000000000000000000000..2d6b22415c9113ca5027413f6e67052eeb779ee5 --- /dev/null +++ b/2023/expOSE_ Accurate Initialization-Free Projective Factorization Using Exponential Regularization/full.md @@ -0,0 +1,384 @@ +# expOSE: Accurate Initialization-Free Projective Factorization using Exponential Regularization + +Jose Pedro Iglesias1, Amanda Nilsson2, Carl Olsson1,2 + +1Chalmers University of Technology, Sweden + +$^{2}$ Lund University, Sweden + +# Abstract + +Bundle adjustment is a key component in practically all available Structure from Motion systems. While it is crucial for achieving accurate reconstruction, convergence to the right solution hinges on good initialization. The recently introduced factorization-based $pOSE$ methods formulate a surrogate for the bundle adjustment error without reliance on good initialization. In this paper, we show that $pOSE$ has an undesirable penalization of large depths. To address this we propose $expOSE$ which has an exponential regularization that is negligible for positive depths. To achieve efficient inference we use a quadratic approximation that allows an iterative solution with VarPro. Furthermore, we extend the method with radial distortion robustness by decomposing the Object Space Error into radial and tangential components. Experimental results confirm that the proposed method is robust to initialization and improves reconstruction quality compared to state-of-the-art methods even without bundle adjustment refinement. + +# 1. Introduction + +Factorization is a long-established method in Structure from Motion (SfM). It originates from [38] by Tomasi and Kanade showing how, under the orthographic camera model, structure and motion can be computed simultaneously from an image sequence using singular value decomposition (SVD). The method was later reformulated for affine cameras, including weak perspective projection [32]. Strum and Triggs [36] further extended factorization to projective cameras by accounting for projective depths. + +One appeal of these factorization algorithms is they can yield a closed-form solution by using the SVD. It is however only possible to use the SVD if every considered scene + +![](images/e5965866ff80e2755348a5a545feec6711b8d143417fa7dc4355807b775ad01b.jpg) +Figure 1. (Left) Examples of two of the images in the Fountain sequence. (Right) Reconstruction obtained with expOSE (top) and pOSE (bottom) for 3 different values of $\eta$ . Our method achieves the same convergence rate as pOSE while having a higher reconstruction quality and being less dependent on the choice of $\eta$ . + +![](images/878e459cf6478bb9da95e7f536632ff8ac1e3886a3f05018ab6b5765ba1809d3.jpg) + +point is visible throughout the whole image sequence. In cases of missing data, the SVD can be replaced with iterative methods. Simple splitting methods [4,8,22] are able to regularize singular values when computing a proximal operator, but can give rather erroneous solutions because of a low convergence rate close to the optimum. [5, 8] give an idea of convex formulation using the nuclear norm, but are usually too weak for SfM in the presence of noise [19, 30]. The papers [1, 9, 10, 31] suggest different ways to assure that direct bilinear optimization only has a global minimum. However, SfM problems with local minima do not fulfill their required conditions [3]. + +It was recently shown by Hong et al. [14-17] that direct bilinear estimation of structure and motion can be made robust to local minima in combination with the Variable Projection (VarPro) method. In [15] the objective is exchanged for the Pseudo Object Space Error (pOSE) which is a tradeoff between the object space error and a quadratic regularization term. This was later extended to a radial distortion invariant version RpOSE, presented in [18]. With their bilinear factorization structure and a large basin of convergence when using VarPro, these pOSE models tend to find + +a global minimum independently of the initialization. Additionally, both pOSE and RpOSE have in [18] been shown to be local approximations of the reprojection error, enabling iterative refinement to the maximum likelihood solution. + +In this paper, we show that the regularization term in the pOSE formulation overly penalizes large positive depths and can thereby limit the range of feasible depths too much to achieve satisfactory solutions. We instead propose regularization with an exponential penalty that is negligible for positive depths. To achieve efficient inference we use a quadratic approximation of the exponential term suitable for optimization with VarPro. Moreover, we extend the method with radial distortion robustness by decomposing the OSE into radial and tangent components. + +In short, the main contributions of this paper are: + +- We investigate the pOSE models' undesirable penalization of large depths and propose expOSE which has negligible regularization of positive depths; +- We formulate a quadratic approximation of the exponential regularization term in expOSE to make it suitable for optimization with VarPro and show that, with random initialization, the model achieves convergence rates similar to pOSE with significantly higher reconstruction quality; +- We extend expOSE with radial distortion robustness by decomposing the Object Space Error (OSE) into radial and tangent components and propose an SfM pipeline that is able to obtain a complete and accurate Euclidean reconstruction from uncalibrated cameras. + +# 2. Reconstruction Objectives + +In this section, we illustrate the problems with direct optimization of reprojection error and discuss how this is addressed using the pOSE model [15]. We then present our exponential regularization and show how this addresses the limitations of the pOSE model. + +# 2.1. Reprojection Error and Cheirality + +Bundle adjustment [12, 39] is the standard routine when it comes to solving the Structure-from-Motion problem. Given measured point projections $m_{ij}$ the goal is to attempt to minimize + +$$ +\sum_ {i j} \left\| \boldsymbol {m} _ {i j} - \frac {\boldsymbol {x} _ {i j}}{z _ {i j}} \right\| ^ {2}, \tag {1} +$$ + +where $\left[ \begin{array}{c} \boldsymbol{x}_{ij} \\ z_{ij} \end{array} \right] = P_i U_j$ . Here $\boldsymbol{x}_{ij}$ is a 2 vector, $z_{ij}$ is a number, referred to as the projective depth, $P_i$ is a $3 \times 4$ camera matrix and $U_i$ is a $4 \times 1$ vector containing homogeneous coordinates of the projected 3D point. Under the assumption of Gaussian image noise, this gives the maximal likelihood estimate of the camera matrices and 3D points [12]. + +![](images/9a698cec3c2119d3eb24916444caab2a3ba0f6b7a79f0c218b5c8f41864c8c0b.jpg) + +![](images/a19912a48bd7222597cb5fab164211f60ce7d2e133bb513f3a479e5f51d9d917.jpg) + +![](images/b36e17128f4398513525dca37cb88b5fe33e4e82687f6e8c67a6563f95c4e1ba.jpg) +Figure 2. Left: Objective values of the reprojection error (blue), the pOSE error $(\eta = 0.1, \text{red})$ and our proposed formulation $(\eta = 0.1, \text{yellow})$ on the lines $(1 - t)(0.5, 0, -1) + t(0.5, 0, 1)$ (top) and $(1 - t)(-0.5, 0, -1) + t(0.5, 0, 1)$ (bottom) when $m = (0.5, 0)$ . Note that the reprojection error is undefined at $z = 0$ since this corresponds to the camera center. Right: Corresponding camera and sampling line. + +![](images/0abe454e3086a43e7f3d82666ed2c9171206be9e398f081604647c1851011309.jpg) + +It is well known that optimizing (1) is difficult and requires good initialization to achieve convergence to the right solution. One of the difficulties is the division of $\boldsymbol{x}_{ij}$ by $z_{ij}$ . This creates a barrier of objective values that goes to infinity and needs to be traversed when for example moving from $(\boldsymbol{x}_{ij}, -z_{ij})$ to $(\boldsymbol{x}_{ij}, z_{ij})$ . The blue curve of Figure 2 (top) shows a 2D example of this barrier. Here we used $m = (0.5, 0)$ and sampled the function $\left( m - \frac{x}{z} \right)^2$ on the line segment $(\boldsymbol{x}, z) = (1 - t)(0.5, 0, -1) + t(0.5, 0, 1)$ . The best value over this line is at $t = 1$ which gives $(\boldsymbol{x}, z) = (0.5, 0, 1)$ . For comparison, we also plot the corresponding values of the pOSE model [15] (red) and the proposed formulation that we will describe below (yellow). In a calibrated setting the interpretation of $z_{ij}$ is the depth [12] of the observed 3D point. Hence, in practical cases, where observed points are in front of the camera, there is usually no reason to allow solutions with negative $z_{ij}$ . In the uncalibrated case $z_{ij}$ is referred to as a projective depth. It can be shown that when the data is noise free (with sufficiently many visible projections) there is always a solution where the projective depths are all positive [26] if the observed points are in front of the camera. Moreover, any other solution is projectively equivalent to this one, meaning that there is a projective 3D transformation that makes the projective depths positive [25, 26]. + +# 2.2. The pOSE Model + +In view of the above, constraining the problem to positive depths is no practical restriction. Still finding a good starting solution where all depths are positive is not a trivial + +issue. In [15] the objective (1) is exchanged for the object space error (OSE) + +$$ +\ell_ {\text {O S E}} \left(\boldsymbol {x} _ {i j}, z _ {i j}\right) = \left\| z _ {i j} \boldsymbol {m} _ {i j} - \boldsymbol {x} _ {i j} \right\| ^ {2}. \tag {2} +$$ + +Here, the scale-invariant residual of (1) has been replaced with a linear error allowing points to switch from negative to positive projective depths. It can be shown [18] that the OSE residual $z_{i} \pmb{m}_{ij} - \pmb{x}_{ij}$ is the first order Taylor expansion of the projective residual $\pmb{m}_{ij} - \frac{\pmb{x}_{ij}}{z_{ij}}$ around $(\pmb{x}_{ij}, z_{ij}) = (\pmb{m}_{ij}, 1)$ , and it is therefore in some sense the closest linear approximation that we can find. On the downside, the OSE is clearly minimized by the trivial solution $\pmb{x}_{ij} = 0$ , $z_{ij} = 0$ for all $i, j$ . Therefore [15] adds the quadratic regularization + +$$ +\ell_ {\text {a f f}} \left(\boldsymbol {x} _ {i j}\right) = \left\| \boldsymbol {x} _ {i j} - \boldsymbol {m} _ {i j} \right\| ^ {2}, \tag {3} +$$ + +which penalizes the trivial zero solution. Note that (3) and (2) both vanish when $(\pmb{x}_{ij},z_{ij}) = (\pmb{m}_{ij},1)$ . The proposed pOSE objective + +$$ +\sum_ {i j} \left((1 - \eta) \ell_ {\text {O S E}} \left(\boldsymbol {x} _ {i j}, z _ {i j}\right) + \eta \ell_ {\text {a f f}} \left(\boldsymbol {x} _ {i j}\right)\right), \tag {4} +$$ + +where $0 < \eta < 1$ , therefore allows arbitrary starting solutions but penalizes projective depths that deviate significantly from 1. The red curve of Figure 2 (top) shows pOSE values (with $\eta = 0.1$ ) over the line $(1 - t)(0.5, 0, -1) + t(0.5, 0, 1)$ . In contrast to the reprojection error, the pOSE formulation does not give any barrier at $z = 0$ . It is experimentally shown in [15] that when optimized using VarPro [16] this leads to a method that converges to the right solution in the vast majority of cases starting from random initialization (including starting points with negative depths). Note that if we column-stack the camera matrices $P_{i}$ into a matrix $P$ with 4 columns, and similarly row-stack the 3D points into a matrix $U$ with 4 rows, the resulting product $X = PU$ is a matrix of rank 4. We can therefore formulate the pOSE objective as a low-rank recovery problem + +$$ +\min _ {\operatorname {r a n k} (X) = 4} \| \mathcal {A} (X) - b \| ^ {2}, \tag {5} +$$ + +where $\mathcal{A}$ is a linear operator. It is well known from compressed sensing that such formulations can often be solved optimally [6, 9-11, 19, 28, 30, 33]. The optimization problem becomes particularly easy for large values of $\eta$ . On the other hand, the regularization term also introduces an undesirable penalty for large (positive) depths which may constrain the range of feasible depths too much to achieve satisfactory solutions. The bottom images in Figure 2 show the same evaluation as the top ones but over the line $(1 - t)(-0.5, 0, -1) + t(0.5, 0, 1)$ . All of the points on this line give 0 reprojection error (except at the camera center $(0, 0, 0)$ for which the projection is undefined). The pOSE formulation (red curve) clearly penalizes solutions of small or negative projective depth but its undesirable growth for large positive values is also visible. + +# 2.3. Exponential Regularization + +In this paper, we instead propose to regularize the depth using an exponential function (yellow curves in Figure 2). Specifically, we replace the affine term (3) with + +$$ +\ell_ {\exp} \left(\boldsymbol {x} _ {i j}, z _ {i j}\right) = e ^ {- \left(\frac {\boldsymbol {m} _ {i j} \boldsymbol {x} _ {i j} + z _ {i j}}{\sqrt {\| \boldsymbol {m} _ {i j} \| ^ {2} + 1}}\right)}. \tag {6} +$$ + +The term $\frac{m_{ij}x_{ij} + z_{ij}}{\sqrt{\|m_{ij}\|^2 + 1}}$ is the length (with sign) of the projection of the vector $(\pmb{x}_{ij},z_{ij})$ onto $(m_{ij},1)$ . Note that its sign is negative when the angle between $(\pmb{x}_{ij},z_{ij})$ and $(m_{ij},1)$ is larger than $90^{\circ}$ . The exponential function will penalize such values heavily. Still, the penalty is finite for all values making it is possible to use start the optimization from anywhere. On the other hand for positive growing values the exponential function tends to 0 and therefore does not restrict the feasible projective depths as the affine term (3) does. + +The proposed expOSE objective is then + +$$ +\ell_ {\text {e x p O S E}} = \sum_ {i j} (1 - \eta) \ell_ {\text {O S E}} \left(\boldsymbol {x} _ {i j}, z _ {i j}\right) + \eta \ell_ {\text {e x p}} \left(\boldsymbol {x} _ {i j}, z _ {i j}\right). \tag {7} +$$ + +At first glance it may seem as if replacing (3) with (6) will yield an ill-posed problem since large depths are hardly penalized by (6). Adding a small penalty for these values to ensure a well-posed problem may therefore be warranted. Note, however, that unless there is an exact solution (with zero reprojection errors) the OSE term is not scale invariant but has a weak shrinking bias. In practice, we empirically observe that this bias is generally enough for our proposed algorithm to converge well from random starting solutions. + +We conclude this section by noting that our proposed method is much less sensitive to parameter selection than the original pOSE model [15]. Since the shrinking bias of the OSE term is relatively weak, an increased regularization cost, due to a change of parameters, can often be compensated for by changing the scale of the reconstruction. In contrast, the choice of $\eta$ in the original pOSE model is crucial. Figure 1 shows how $\eta$ affects the reconstruction (more details about this figure are provided in Section 3.2). + +# 3. Optimization with VarPro + +One of the main benefits of the pOSE formulation [15] is that it is quadratic in the elements of $X$ . Therefore, given values for camera matrices $P$ the optimal 3D points $U^{*}(P)$ can be computed in closed form using a pseudo inverse. The VarPro method [16,27,35,41] solves the reduced problem + +$$ +\min _ {P} \| \mathcal {A} (P U ^ {*} (P)) - b \| ^ {2}, \tag {8} +$$ + +using the Levenberg-Marquardt method [12, 39]. In contrast to standard Gauss-Newton type methods that optimize + +![](images/0656bc076b95bd79594e43d9c2efdd1ac83972d8ac70d8edd4856df5f36cc0cc.jpg) +Figure 3. The exponential function and its Taylor approximation. + +locally over both $U$ and $P$ , the main benefit of the elimination of $U$ is that dampening only needs to be applied to $P$ . This has been shown empirically to greatly improve convergence [14, 16]. The intuition is that small changes in $P$ will sometimes result in large changes in $U$ , but this is prevented by a dampening term which causes the algorithm to stall. + +Since the exponential regularization term is not quadratic VarPro is not directly applicable to our formulation. We, therefore, employ an iterative approach that locally approximates (6) with a quadratic function. Consider the 2nd order Taylor expansion of $e^{-\boldsymbol{a}^T \boldsymbol{y}}$ at a point $\bar{\boldsymbol{y}}$ given by + +$$ +e ^ {- \boldsymbol {a} ^ {T} \boldsymbol {y}} \approx e ^ {- \boldsymbol {a} ^ {T} \bar {\boldsymbol {y}}} \left(1 - \boldsymbol {a} ^ {T} (\boldsymbol {y} - \bar {\boldsymbol {y}}) + \frac {1}{2} \left(\boldsymbol {a} ^ {T} (\boldsymbol {y} - \bar {\boldsymbol {y}})\right) ^ {2}\right). \tag {9} +$$ + +Completing squares gives the expression + +$$ +e ^ {- \boldsymbol {a} ^ {T} \boldsymbol {y}} \approx \frac {e ^ {- \boldsymbol {a} ^ {T} \bar {\boldsymbol {y}}}}{2} \left(\boldsymbol {a} ^ {T} (\boldsymbol {y} - \bar {\boldsymbol {y}}) - 1\right) ^ {2} + e ^ {- \boldsymbol {a} ^ {T} \bar {\boldsymbol {y}}}. \tag {10} +$$ + +Note that when minimizing with respect to $\pmb{y}$ the last term is constant and can be ignored. Since the exponential function is positive the result is a weighted linear least squares term in the unknown $\pmb{y}$ . With $\pmb{y} = \begin{bmatrix} \pmb{x}_{ij} \\ z_{ij} \end{bmatrix}$ and $\pmb{a} = \frac{1}{\sqrt{\|\pmb{m}_{ij}\|^2 + 1}} \begin{bmatrix} \pmb{m}_{ij} \\ 1 \end{bmatrix}$ we get our approximation + +$$ +\tilde {\ell} _ {\exp} \left(\boldsymbol {x} _ {i j}, z _ {i j}\right) \approx \frac {\ell_ {\exp} \left(\bar {\boldsymbol {x}} _ {i j} , \bar {z} _ {i j}\right)}{2} \left(\frac {\boldsymbol {m} _ {i j} ^ {T} \Delta \boldsymbol {x} _ {i j} + \Delta z _ {i j}}{\sqrt {\| \boldsymbol {m} _ {i j} \| ^ {2} + 1}} - 1\right) ^ {2}, \tag {11} +$$ + +where $\Delta \pmb{x}_{ij} = \pmb{x}_{ij} - \bar{\pmb{x}}_{ij}$ and $\Delta z_{ij} = z_{ij} - \bar{z}_{ij}$ . To the left in Figure 3 we show $e^{-ay}$ with $a = 1$ (blue curve), and the Taylor approximation at $\bar{y} = 0$ (orange dashed curve). In the supplementary material, we compare level sets of the expOSE objective, its approximation, and pOSE. + +# 3.1. The EXPose Model + +Replacing the exponential regularization in (7) with the quadratic approximation (11) at $\bar{y}_{ij}$ results in a quadratic loss that can be written as $\| \mathcal{A}(PU) - b\|^2$ , which can be + +Algorithm 1: VarPro for solving expOSE (7) +Normalize image measurements by removing the mean and dividing by 3 standard deviations; +Select the inputs $\eta$ , and randomly initialize elements of $P$ from a normal distribution of unit std ; +Set $\bar{y}_{ij} = [m_{ij}^T,1]^T$ . +Set up A and b by approximating the exponential regularization by a quadratic form around each $\bar{y}_{ij}$ . +Compute U by minimizing (7) with $P$ fixed; +Set do update $= 0$ if scheduling update of regularization is considered, otherwise do update $= 1$ .. +while true do +Compute the Jacobians $J_{P} = A(U^{T}\otimes \mathcal{I})\colon J_{U} = A(\mathcal{I}\otimes P)$ and the residuals $r = \operatorname {Avec}(PU) - b$ . Compute $P_{\mathrm{new}}$ and $U_{\mathrm{new}}$ from $J_P,J_U,$ and r as $P_{\mathrm{new}} = P + \Delta P$ and $U_{\mathrm{new}} = U + \Delta U$ ,with $\Delta P = (J_P^T (\mathcal{I} - J_UJ_U^\dagger)J_P + \lambda \mathcal{I})^{-1}J_P^T r,$ and $\Delta U = -J_U^\dagger (r + J_P\Delta P)$ . Evaluate the loss $\ell_{\mathrm{new}}$ . +if $\ell_{\mathrm{new}} < \ell_{\mathrm{best}}$ then $\ell_{\mathrm{best}} = \ell_{\mathrm{new}}$ . $P\gets P_{\mathrm{new}}$ ; and $U\gets U_{\mathrm{new}}$ . if do update then Set $\bar{y}_{ij} = P_iU_j$ . Set up A and b by approximating the regularization by a quadratic form around each $\bar{y}_{ij}$ end +end +if stopping criterion then if do update then break; else do update $= 1$ end +end + +optimized using VarPro as described in Algorithm 1. The linear operator $\mathcal{A}$ and the vector $b$ can be computed in each iteration based on the image measurements $\pmb{m}_{ij}$ , the current estimations $\bar{\pmb{y}}_{ij}$ and $\eta$ . For the initial approximation of the regularization, we use $\bar{\pmb{y}}_{ij} = (\pmb{m}_{ij},1)$ . + +Regularization update scheduling: In order to improve the convergence of the algorithm, we propose to keep the initial quadratic approximation of the regularization (11) either for a fixed number of iterations or until convergence of the initial approximation. This delays the approximation of the exponential regularization in each iteration until a stable initial solution with positive depths is found. In Section 3.2 we show empirically the advantage of doing so. + +Data normalization: Since our regularization term is geometrically motivated and our approach replaces reprojection error with OSE it is important to use normalization of the image data to achieve a well-conditioned formulation [13]. Here we follow standard approaches: We first subtract the image center from all image points, then divide them with the resulting standard deviation over the image. + +![](images/c4d0daee857254db3e212c78e3ce7dbf0f46922611429274057e5d383e53dae4.jpg) +Figure 4. Comparison of convergence rate and normalized 3D error of different methods on the Dino (a) and Fountain (b) datasets. The metrics are obtained by running 100 instances starting from random initializations. In dashed we should the metrics for the pOSE baseline. + +![](images/f904330e5b3b84f6e45f0caa92a4efcd88b3602d0aa26df42fbed5ede8d32013.jpg) + +# 3.2. Performance evaluation of expOSE + +Before presenting our model for radial distortion we evaluate the effects of using exponential regularization with the standard OSE. We use the Dino (Small) [3] (36 cameras, 319 points, $77\%$ missing data) and Fountain [34] (11 cameras, 1167 points, $23\%$ missing data) datasets to evaluate the performance of expOSE with varying parameters - the weight $\eta$ and scheduling of regularization update-, and optimization strategies - VarPro, Levenberg-Marquardt (LM), and Alternating Minimization (AltMin) [7]. + +The metrics used for the comparisons are convergence rate of the algorithm and relative 3D error to GT. The convergence rate is calculated by counting the number of times the algorithms converged to the lowest loss over 100 problem instances starting from random initializations (a threshold of $2\%$ above the smallest loss value is used). The 3D error is computed as $e_{3D} = \frac{\|U' - U_{\mathrm{GT}}\|}{\|U_{\mathrm{GT}}\|_F}$ where $U'$ is the result of performing projective registration of the factor $U$ to the ground-truth point cloud $U_{\mathrm{GT}}$ . In this way, we are able to measure the quality of the factors $U$ that are outputted by each method. For a fair comparison, we compute the 3D errors for solutions that converged to the desired optimum. + +The methods are implemented in MATLAB, and we let each method perform a maximum of 500 iterations. For the case of regularization update scheduling, which we call $\exp\mathrm{OSE}(\mathrm{S})$ , we delay the update of the regularization quadratic approximation by 250 iterations or until the initial optimization converges - whichever occurs first. + +Effect of $\eta$ and scheduling: The performance of expOSE is evaluated for multiple values of $\eta$ ranging from $10^{-4}$ to 0.5. The results are plotted in Figure 4. We show that expOSE is significantly more robust to $\eta$ than pOSE in terms of 3D errors (see also Figure 1). We also show that delaying the update of the quadratic approximation of the regularization results in a significant boost in convergence rate, allowing us to achieve rates similar to pOSE. + +Comparison with other optimization strategies: We + +compare the performance of expOSE (with and without scheduling) when using VarPro, LM and AltMin. The results confirm that, just like with pOSE, VarPro is the most reliable method for expOSE, while LM and AltMin achieve poor convergence rates. + +# 4. Robustness to Radial Distortion + +In the previous sections, we considered modifications to the original pOSE model which assumes a regular pinhole camera. In [18] the RpOSE model which instead uses a radial camera [20, 21, 23, 24, 29, 37, 40] is presented. This model is invariant to radial distortion which the standard pOSE model does not handle. We note however that the radial model requires more data for parameter estimation since it essentially only measures errors in one direction of the image. To address this issue we introduce an intermediate model by decomposing the reprojection error into a tangential and a radial component. By down-weighting the tangential error we obtain a model that is more robust to radial distortion than the pinhole camera but less sensitive to missing data than the radial camera. We then introduce an exponential regularization term for this model. + +# 4.1. Decoupling Tangential and Radial Errors + +When working with the radial camera model it is typically assumed that the principal point and the distortion center are the center of the image and have coordinates $(0,0)$ . We make the same assumption here. + +The reprojection error is obtained by taking the length of the error vector $e(x, z) = \frac{x}{z} - m$ . The coordinates of this vector are given in w.r.t. the canonical image basis (1,0) and (0,1) of the image and can be interpreted as errors in the $x$ - and $y$ -directions respectively. For a point $m$ we are interested in measuring the error in the radial direction $\frac{m}{\|m\|}$ and the tangential direction $\frac{m_{\perp}}{\|m\|}$ , where $m_{\perp}$ is the orthogonal vector to $m$ (see Figure 5). We, therefore, write the error vector as a linear combination of these. It is not diffi + +![](images/4cba7ccf2d3aa7df11ad7fd74ec869fb56d3fa7207ea8f8c32d64c2d06804bd7.jpg) +Figure 5. Levelsets (red ellipses) of $\ell_{\mathrm{wose}}$ for $\alpha = 0.1$ and 0.9. Here $m = (0.6, 0.9)$ and $z = 1$ . + +cult to verify that + +$$ +\frac {\boldsymbol {x}}{z} - \boldsymbol {m} = \left(\frac {\boldsymbol {m} ^ {T} \boldsymbol {x}}{z \| \boldsymbol {m} \|} - \| \boldsymbol {m} \|\right) \frac {\boldsymbol {m}}{\| \boldsymbol {m} \|} + \frac {\boldsymbol {m} _ {\perp} ^ {T} \boldsymbol {x}}{\| \boldsymbol {m} \| z} \frac {\boldsymbol {m} _ {\perp}}{\| \boldsymbol {m} \|}. \tag {12} +$$ + +In the basis $\frac{m}{\|m\|}$ , $\frac{m_{\perp}}{\|m\|}$ the error vector can be written as + +$$ +\boldsymbol {e} (\boldsymbol {x}, z) = \frac {1}{\| \boldsymbol {m} \|} \left[ \begin{array}{l} \boldsymbol {m} ^ {T} \\ \boldsymbol {m} _ {\perp} ^ {T} \end{array} \right] \frac {\boldsymbol {x}}{z} - \binom {\| \boldsymbol {m} \|} {0}. \tag {13} +$$ + +Independently of the basis chosen, the reprojection error is nonlinear due to the division by $z$ , making it unsuitable for optimization. The OSE in the new basis is obtained by rescaling the reprojection error $e(\pmb{x},z)$ by the depth $z$ . The expression for OSE error in the new basis is therefore + +$$ +\left\| z e (\boldsymbol {x}, z) \right\| ^ {2} = \left(\frac {\boldsymbol {m} ^ {T}}{\| \boldsymbol {m} \|} \boldsymbol {x} - \| \boldsymbol {m} \| z\right) ^ {2} + \left(\frac {\boldsymbol {m} _ {\perp} ^ {T}}{\| \boldsymbol {m} \|} \boldsymbol {x}\right) ^ {2}. \tag {14} +$$ + +# 4.2. Reweighting the Error Components + +Radial distortion is usually modeled by modifying the projection according to + +$$ +\kappa_ {r} (\boldsymbol {m}) \boldsymbol {m} = \frac {\boldsymbol {x}}{z} \tag {15} +$$ + +where $\kappa_{r}$ is a scalar that depends on the distance to the distortion center. It is clear that the second term of (14) vanishes when inserting $(\pmb{x},z)$ fulfilling (15) for any $\kappa_{r}$ , but not the first term. To handle radial distortion we could incorporate the additional parameter $\kappa_{r}$ in (14) and explicitly estimate it. Unfortunately, this results in a more complex model (with trilinear interactions) making optimization difficult. Alternatively, to achieve robustness to radial distortion we can remove the first term, as in [18]. The downside of doing this is that it removes roughly half of the data (one out of two coordinates for each projection) available for use in inference. Therefore we here propose to compensate for the unknown radial distortion by down-weighting the first term or equivalently allowing a larger standard deviation in the radial direction. + +Let $\sigma_r^2$ and $\sigma_t^2$ denote the uncertainties of the reprojection error $\epsilon = s\pmb{x} / z - \pmb{m}$ along the radial and tangential direction, respectively, and where $s$ is an unknown positive scalar that models radial distortion effects and focal length scaling. Assuming the reprojection error $\epsilon$ is sampled from a 2D normal distribution $\mathcal{N}(0,\Sigma)$ , the probability of the model $\{\pmb{x},z\}$ given $\pmb{m}$ is + +$$ +P (\boldsymbol {x}, z \mid \boldsymbol {m}) = \frac {1}{2 \pi \det (\Sigma) ^ {1 / 2}} e ^ {- s ^ {2} \left(\frac {1}{s} \boldsymbol {m} - \boldsymbol {x} / z\right) ^ {T} \Sigma^ {- 1} \left(\frac {1}{s} \boldsymbol {m} - \boldsymbol {x} / z\right)}. \tag {16} +$$ + +Maximizing the likelihood (16) w.r.t. $\{x,z\}$ is equivalent to minimizing + +$$ +\frac {s ^ {2}}{\sigma_ {r} ^ {2}} \left(\frac {\boldsymbol {m} ^ {T}}{\| \boldsymbol {m} \|} \frac {\boldsymbol {x}}{z} - \frac {1}{s} \| \boldsymbol {m} \|\right) ^ {2} + \frac {s ^ {2}}{\sigma_ {t} ^ {2}} \left(\frac {\boldsymbol {m} _ {\perp} ^ {T}}{\| \boldsymbol {m} \|} \frac {\boldsymbol {x}}{z}\right) ^ {2}, \tag {17} +$$ + +where $\Sigma = R^T\mathrm{diag}(\sigma_r^2,\sigma_t^2)R$ and $R$ is a rotation matrix that aligns the coordinate axis with $\boldsymbol {m} / \| \boldsymbol {m}\|$ and $\boldsymbol {m}_{\perp} / \| \boldsymbol {m}\|$ . While the second term quadratic term of (17) is not affected by $s$ , in the first term $\| \boldsymbol {m}\|$ is weighted by $1 / s$ , which is undesirable as previously motivated. We propose to approximate (17) by + +$$ +\underbrace {\frac {1}{\sigma_ {r} ^ {2}}} _ {(1 - \alpha)} \left(\frac {\boldsymbol {m} ^ {T}}{\| \boldsymbol {m} \|} \frac {\boldsymbol {x}}{z} - \| \boldsymbol {m} \|\right) ^ {2} + \underbrace {\frac {1}{\sigma_ {t} ^ {2}}} _ {\alpha} \left(\frac {\boldsymbol {m} _ {\perp} ^ {T}}{\| \boldsymbol {m} \|} \frac {\boldsymbol {x}}{z}\right) ^ {2}. \tag {18} +$$ + +This approximation of the first term adds a bias to the obtained solution based on the unknown shift $\left(\frac{1}{s} - 1\right)\| \boldsymbol{m}\|$ . We regulate the effect of this bias - and thus the robustness to radial distortion - by controlling the relative weight of the first quadratic term (biased) versus the second quadratic term (unbiased) through the value of $\alpha \in [0,1]$ . For the extreme case of $\alpha = 1$ the radial component of the error is completely dropped resulting in the loss presented in [18]. Linear residuals can be obtained by replacing (18) with its component-weighted OSE counterpart + +$$ +\ell_ {\mathrm {w O S E}} = (1 - \alpha) \left(\frac {\boldsymbol {m} ^ {T}}{\| \boldsymbol {m} \|} \boldsymbol {x} - \| \boldsymbol {m} \| z\right) ^ {2} + \alpha \left(\frac {\boldsymbol {m} _ {\perp} ^ {T}}{\| \boldsymbol {m} \|} \boldsymbol {x}\right) ^ {2}. \tag {19} +$$ + +Figure 5 shows an example of level sets (in the image plane $z = 1$ ) for $\alpha = 0.1$ and 0.9. + +Note that the same approach can be used to handle unknown focal lengths. If we assume that the intrinsic calibration matrix of the camera is $\mathbf{K} = \mathrm{diag}(f, f, 1)$ , the relation between the reprojected point and the image measurement is $\frac{\kappa_r}{f} \mathbf{m} = \frac{\mathbf{x}}{z}$ and therefore the re-weighted formulation can be applied to this setting as well. An unknown/varying focal length $f$ is however modeled by the standard pOSE model in contrast to $\kappa_r$ which depends on the distance between the projection and the principal point and thus cannot be included in a factorization algorithm without adding extra variables. + +# 4.3. Regularization for radial distortion invariance + +Weighting differently the radial and tangential of the OSE does not change, in general, the exponential regularization described in Section 2. However, one must note that for the extreme case $\alpha = 1$ , for a given $X = PU$ the variables in every third row of $X$ and $P$ vanish from the OSE. In other words, decreasing the total loss will always be possible by increasing $z$ through the third row of $P$ , and consequently decreasing the $e^{-\frac{z}{\sqrt{\|m\|^2 + 1}}}$ part of the exponential regularization. To avoid such undesirable behavior, we proposed an alternative exponential regularization for the particular case $\alpha = 1$ acting only on $x$ and $y$ , i.e., + +$$ +\ell_ {\exp} = e ^ {- \frac {\boldsymbol {m} ^ {T}}{\| \boldsymbol {m} \|} \boldsymbol {x}} \tag {20} +$$ + +This alternative regularization enforces the reprojection $\pmb{x}$ according to the 1D radial camera model $\pmb{m} = \lambda \pmb{x}$ to have positive scale $\lambda > 0$ , canceling out the shrinking bias of the OSE as in the general case. + +The expOSE loss for weighted radial and tangent components of the OSE can then be approximated as + +$$ +\ell_ {\text {e x p O S E}} = \sum_ {i j} (1 - \eta) \ell_ {\text {w O S E}} \left(\boldsymbol {x} _ {i j}, z _ {i j}\right) + \eta \tilde {\ell} _ {\text {e x p}} \left(\boldsymbol {x} _ {i j}, z _ {i j}\right) \tag {21} +$$ + +with $\tilde{\ell}_{\mathrm{exp}}$ defined as + +$$ +\left\{ \begin{array}{l l} \frac {\ell_ {\exp} \left(\bar {x} _ {i j} , \bar {z} _ {i j}\right)}{2} \left(\frac {\boldsymbol {m} _ {i j} ^ {T} \Delta \boldsymbol {x} _ {i j} + \Delta z _ {i j}}{\sqrt {\| \boldsymbol {m} _ {i j} \| ^ {2} + 1}} - 1\right) ^ {2}, & \alpha \in [ 0, 1 [ \\ \frac {\ell_ {\exp} (\bar {x} _ {i j})}{2} \left(\frac {\boldsymbol {m} _ {i j} ^ {T} \Delta \boldsymbol {x} _ {i j}}{\| \boldsymbol {m} _ {i j} \|} - 1\right) ^ {2}, & \alpha = 1 \end{array} . \right. \tag {22} +$$ + +This radial distortion robust version of expOSE can be optimized following Algorithm 1 nonetheless since both the component-weighted OSE and the quadratic approximation of the regularization can still be written as $\| \mathcal{A}(PU) - b\|^2$ . + +# 5. Outline of Full Reconstruction Pipeline + +We propose to use expOSE as a solution to uncalibrated and radial distortion invariant Structure-from-Motion. A few Bundle Adjustment steps can be performed for further refinement. The pipeline takes as input 2D image measurements of points tracked along multiple views, just like any other factorization-based SfM pipeline. The proposed radial distortion-invariant pipeline can be decomposed into the following sequential modules: + +1. expOSE factorization: Given a set of image points tracked along several images, we use Algorithm 1 to obtain estimations of the uncalibrated camera matrix, and the 3D points, up to projective ambiguity. + +2. Radial distortion estimation (and camera matrix completion): Using the solution obtained with expOSE, the distortion parameters and, for $\alpha = 1$ , the third row of the uncalibrated camera matrix are estimated from the equations in (15). Note that by assuming a Brown-Conrady radial distortion model [2] with $\kappa(m) = \sum_{j} k_{j} \|m\|^{2j}$ , for each camera a system of equations of the form + +$$ +M _ {i} \left[ \begin{array}{c} p _ {i} ^ {(3)} \\ \mathbf {k} \end{array} \right] = b _ {i} \tag {23} +$$ + +can be obtained, where $p_i^{(3)}$ is the third row of the $i$ th camera matrix, and $\mathbf{k}$ is a vector of the distortion parameters. Here we use a distortion model with three parameters, $k_j, j = 1,\dots,3$ . Assuming that the distortion model is constant along all views, the overall system of equations can be written as $M[p^{(3)T},\mathbf{k}^T ]^T = b$ , with $p$ being a $4\times$ #views vector with all third rows of the camera matrices. For $\alpha = 1$ both $p^{(3)}$ and $\mathbf{k}$ are unknowns and are estimated in this step. For $\alpha \neq 1$ , the system can be simplified to $M\mathbf{k} = b - Mp^{(3)}$ since $p^{(3)}$ is already estimated by expOSE. If it is assumed that there is no radial distortion and $\alpha \neq 1$ , then this step can be completely skipped. + +3. Bundle adjustment: We perform local optimization of + +$$ +\sum_ {i j} \left\| \boldsymbol {m} _ {i j} - (1 + \kappa (\boldsymbol {m} _ {i j})) \frac {\boldsymbol {x} _ {i j}}{z _ {i j}} \right\| ^ {2} \tag {24} +$$ + +starting from the estimations of $P$ , $X$ , and $\mathbf{k}$ found with the previous steps. The optimization is solved using Levenberg-Marquardt algorithm. If there is no radial distortion then the parameters $\mathbf{k}$ can be set to zero and kept constant during optimization. For expOSE initialization, we observe that usually only a few steps are needed (5-10 steps). + +4. Euclidean update: Finally we estimate the projective transformation $H \in \mathbb{R}^{4 \times 4}$ such that the factorization $\{PH, H^{-1}X\}$ is a Euclidean reconstruction. This is done by estimating the dual absolute conic as described in [12]. + +# 5.1. Experiments + +The performance of the proposed pipeline is evaluated on 3 sequences from [24] with radial distortion: Grossmunster (19 cam., 1874 pts, $41\%$ missing data), Kirchenge (30 cam., 1158 pts, $60\%$ missing data), and Munterhof (20 cam., 2108 pts, $42\%$ missing data). We compare the performance when using either $\exp OSE$ ( $\eta = 0.01$ ), $\mathrm{pOSE}$ , or $\mathrm{RpOSE}$ (both with $\eta = 0.001$ ) in step 1 of the pipeline. We use $\exp OSE$ with scheduling for regularization update, as described in Section 3.2. Refinement of the solutions is done by performing up to 50 iterations of BA. + +![](images/573df3627b32e8b669936dd217a28fb41e7094316edabcd5482d949197f38cec.jpg) +Figure 6. Visualization of reconstructions on the Grossmunster sequence. (Left) An example of one of the images on the sequence. At the bottom, we show a view of the 3D reconstruction of expOSE for $\alpha = 1$ . (Right) Comparison between the top view reconstructions (black) obtained with pOSE, RpOSE and expOSE. In red we show the ground-truth 3D point cloud. All reconstructions shown here were not refined with bundle adjustment. + +![](images/263cd2695e7d9d4ca5166ce4c2c5580b9ec9b7b2071a9239550e40fea3c9848a.jpg) + +![](images/682f9d74e513e408789f632deb0127253eb2582fb69e09d4649e43ff4eb532fc.jpg) + +The metrics used are convergence rate (similarly to the experiments in Section 2), 2D reprojection error, rotation error, and 3D error. In order to compute the last two, we perform Euclidean registration on the output of the pipeline, i.e. after the Euclidean update, to the ground-truth 3D point cloud. The inverse of that Euclidean transformation is applied to the camera matrices. Rotation error is then computed as $e_{\mathrm{rot}} = \mathrm{acos}\left(\left(\mathrm{trace}\left(R_i^{GT}R_i^T\right) - 1\right) / 2\right)$ and the 3D error as the median of all $\| X_j - X_j^{GT}\|$ . The values presented in Table 1 correspond to the average over all instances that converged to the desired optimum. The chosen metrics are evaluated at two points of the pipeline: after the radial distortion estimation (step 2), and after the bundle adjustment (step 3). At both stages, a metric update is performed in order to obtain a Euclidean reconstruction. + +The results show that expOSE clearly outperforms both pOSE and RpOSE. The difference in performance is even more evident when looking at the output of the factorizations, where expOSE was able to achieve reprojection errors that almost match the refined solution with BA. Note that in many cases expOSE even got better rotation and 3D errors than its refined counterpart. A visualization for the Grossmunter sequence is shown in Figure 6. It is also possible to notice the impact of using the regularization for radial distortion invariance as described in Section 4.3. For $\alpha = 0.999$ the method has slow convergence, leading to poor solutions as can be seen by the high rotation and reprojection errors. Additional results for other values of $\alpha$ and sequences are presented in the supplementary material. + +In practice, as seen in these experiments, we notice that $\alpha = 1$ achieves the best results for images with radial + +Table 1. Results on the Grossmunster, Kirchenge, and Munsterhof datasets (over 10 instances). For each method two rows are presented: the first consists of the results for the output of the factorization method; the second of the output of the Bundle Adjustment (+BA). In green, we show the best results for each metric. + +
GrossmunsterConv. RateRot. [deg]3D [unit]2D [pix]
pOSE+ BA50%148.250.76218.48
50%27.610.2931.50
RpOSE+ BA90%2.240.0822.91
90%0.530.0111.48
ExpOSEα=0.999100%44.740.22741.51
α=0.999+BA100%0.430.0071.48
α=1100%0.180.0041.86
α=1+BA100%0.420.0061.48
+ +
Kirchenge
pOSE+ BA100%160.386.84414.95
100%0.720.0241.22
RpOSE+ BA90%0.980.0621.94
90%1.060.0311.22
ExpOSEα=0.99960%24.710.02245.28
α=0.999+BA80%1.190.0211.22
α=180%0.510.0261.57
α=1+BA80%2.920.0501.22
+ +
Munsterhof
pOSE+ BA100%14.010.23012.08
100%0.440.0271.70
RpOSE+ BA60%1.000.07111.96
60%0.440.0271.70
ExpOSEα=0.999100%20.130.02147.71
α=0.999+BA100%0.470.0291.70
α=180%0.120.0133.43
α=1+BA90%0.450.0301.70
+ +distortion. In the supplementary material we provide additional experiments that show the benefit of using values $1/2 < \alpha < 1$ in particular problem instances where data availability is too low for the stability of a pure radial model (e.g. few viewpoints and/or points per camera available). + +# 6. Conclusions + +In this paper, we propose the use of exponential regularization on projective factorization problems as a way to enforce Cheirality conditions on the reconstruction. Radial distortion robustness is achieved by weighting differently the radial and tangential components of the object space error. We show that the proposed regularization results in higher reconstruction quality (that matches bundle adjustment refined solutions) while keeping the same convergence properties as state-of-the-art factorization methods and being less sensitive to the choice of the weight $\eta$ of the regularization. + +# References + +[1] Srinadh Bhojanapalli, Behnam Neyshabur, and Nati Srebro. Global optimality of local search for low rank matrix recovery. In D. D. Lee, M. Sugiyama, U. V. Luxburg, I. Guyon, and R. Garnett, editors, Advances in Neural Information Processing Systems 29, pages 3873-3881. Curran Associates, Inc., 2016. 1 +[2] Dean Brown. Decentering distortion of lenses. 1966. 7 +[3] A. M. Buchanan and A. W. Fitzgibbon. Damped newton algorithms for matrix factorization with missing data. In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2005. 1, 5 +[4] Alessio Del Bue, João M. F. Xavier, Lourdes Agapito, and Marco Paladini. Bilinear modeling via augmented lagrange multipliers (BALM). IEEE Trans. Pattern Anal. Mach. Intell., 34(8):1496-1508, 2012. 1 +[5] R. Cabral, F. De la Torre, J. P. Costeira, and A. Bernardino. Unifying nuclear norm and bilinear factorization approaches for low-rank matrix decomposition. In International Conference on Computer Vision (ICCV), 2013. 1 +[6] Emmanuel J. Candès and Benjamin Recht. Exact matrix completion via convex optimization. Foundations of Computational Mathematics, 9(6):717-772, 2009. 3 +[7] I. Csiszar and G. Tusnády. Information Geometry and Alternating Minimization Procedures. 5 +[8] Y. Dai, H. Li, and M. He. Projective multiview structure and motion from element-wise factorization. IEEE Transactions on Pattern Analysis and Machine Intelligence, 35(9):2238-2251, 2013. 1 +[9] Rong Ge, Chi Jin, and Yi Zheng. No spurious local minima in nonconvex low rank problems: A unified geometric analysis. arXiv preprint, arxiv:1704.00708, 2017. 1, 3 +[10] Rong Ge, Jason D. Lee, and Tengyu Ma. Matrix completion has no spurious local minimum. In Annual Conference on Neural Information Processing Systems (NIPS), 2016. 1, 3 +[11] Christian Grussler, Anders Rantzer, and Pontus Giselsson. Low-rank optimization with convex constraints. IEEE Transactions on Automatic Control, 63(11):4000-4007, 2018. 3 +[12] Richard Hartley and Andrew Zisserman. Multiple View Geometry in Computer Vision. Cambridge University Press, USA, 2 edition, 2003. 2, 3, 7 +[13] Richard I. Hartley. In defense of the eight-point algorithm. IEEE Trans. Pattern Anal. Mach. Intell., 19(6):580-593, 1997. 4 +[14] Je Hyeong Hong and Andrew Fitzgibbon. Secrets of matrix factorization: Approximations, numerics, manifold optimization and random restarts. In Int. Conf. on Computer Vision, 2015. 1, 4 +[15] Je Hyeong Hong and Christopher Zach. pose: Pseudo object space error for initialization-free bundle adjustment. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), June 2018. 1, 2, 3 +[16] J. H. Hong, C. Zach, and A. Fitzgibbon. Revisiting the variable projection method for separable nonlinear least squares problems. In 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 5939-5947, 2017. 1, 3, 4 + +[17] Je Hyeong Hong, Christopher Zach, Andrew W. Fitzgibbon, and Roberto Cipolla. Projective bundle adjustment from arbitrary initialization using the variable projection method. In European Conf. on Computer Vision, 2016. 1 +[18] Jose Iglesias and Carl Olsson. Radial distortion invariant factorization for structure from motion. In Proceedings of the IEEE International Conference on Computer Vision, 2021. 1, 2, 3, 5, 6 +[19] José Pedro Iglesias, Carl Olsson, and Marcus Valtonen Örnhag. Accurate optimization of weighted nuclear norm for non-rigid structure from motion. In European Conference on Computer Vision (ECCV), 2020. 1, 3 +[20] Jae-Hak Kim, Yuchao Dai, Hongdong li, Xin Du, and Jonghyuk Kim. Multi-view 3d reconstruction from uncalibrated radially-symmetric cameras. In Proceedings of the IEEE International Conference on Computer Vision, pages 1896-1903, 12 2013. 5 +[21] Z. Kukelova, M. Bujnak, and T. Pajdla. Real-time solution to the absolute pose problem with unknown radial distortion and focal length. In 2013 IEEE International Conference on Computer Vision, pages 2816-2823, 2013. 5 +[22] Suryansh Kumar. Non-rigid structure from motion: Prior-free factorization method revisited. In IEEE Winter Conference on Applications of Computer Vision, WACV 2020, Snowmass Village, CO, USA, March 1-5, 2020, pages 51-60. IEEE, 2020. 1 +[23] Viktor Larsson, Torsten Sattler, Zuzana Kukelova, and Marc Pollefeys. Revisiting radial distortion absolute pose. In International Conference on Computer Vision (ICCV). IEEE, September 2019. 5 +[24] Viktor Larsson, Nicolcas Zobernig, Kasim Taskin, and Marc Pellefeys. Calibration-free structure-from-motion with calibrated radial trifocal tensors. In European Conference of Computer Vision, 2020. 5, 7 +[25] Ludovic Magerand and Alessio Del Bue. Practical projective structure from motion (p2sfm). In 2017 IEEE International Conference on Computer Vision (ICCV), pages 39-47, 2017. 2 +[26] Behrooz Nasihatkon, Richard I. Hartley, and Jochen Trumpf. A generalized projective reconstruction theorem and depth constraints for projective factorization. Int. J. Comput. Vis., 115(2):87-114, 2015. 2 +[27] Takayuki Okatani and Koichiro Deguchi. On the wiberg algorithm for matrix factorization in the presence of missing components. International Journal of Computer Vision, 72(3):329-337, 2007. 3 +[28] Carl Olsson, Daniele Gerosa, and Marcus Carlsson. Relaxations for non-separable cardinality/rank penalties. In 2021 IEEE/CVF International Conference on Computer Vision Workshops (ICCVW), IEEE International Conference on Computer Vision Workshops, pages 162-171, 2021. 3 +[29] Carl Olsson, Viktor Larsson, and Fredrik Kahl. A quasiconvex formulation for radial cameras. In 2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 14571-14580, 2021. 5 +[30] Marcus Valtonen Ornag, Carl Olsson, and Anders Heyden. Bilinear parameterization for differentiable rank-regularization. 2020 IEEE/CVF Conference on Computer + +Vision and Pattern Recognition Workshops (CVPRW), Jun 2020. 1, 3 +[31] Dohyung Park, Anastasios Kyrillidis, Constantine Carmanis, and Sujay Sanghavi. Non-square matrix sensing without spurious local minima via the Burer-Monteiro approach. In Aarti Singh and Jerry Zhu, editors, Proceedings of the 20th International Conference on Artificial Intelligence and Statistics, volume 54 of Proceedings of Machine Learning Research, pages 65-74, Fort Lauderdale, FL, USA, 20-22 Apr 2017. PMLR. 1 +[32] Conrad J. Poelman and Takeo Kanade. A parapspective factorization method for shape and motion recovery. IEEE Trans. Pattern Anal. Mach. Intell., 19(3):206-218, 1997. 1 +[33] Benjamin Recht, Maryam Fazel, and Pablo A. Parrilo. Guaranteed minimum-rank solutions of linear matrix equations via nuclear norm minimization. SIAM Rev., 52(3):471-501, Aug. 2010. 3 +[34] C. Strecha, W. von Hansen, L. Van Gool, P. Fua, and U. Thoennessen. On benchmarking camera calibration and multi-view stereo for high resolution imagery. In 2008 IEEE Conference on Computer Vision and Pattern Recognition, pages 1-8, 2008. 5 +[35] D. Strelow, Q. Wang, L. Si, and A. Eriksson. General, nested, and constrained wiberg minimization. IEEE Transactions on Pattern Analysis and Machine Intelligence, 38(9):1803-1815, 2016. 3 +[36] Peter F. Sturm and Bill Triggs. A factorization based algorithm for multi-image projective structure and motion. In Proceedings of the 4th European Conference on Computer Vision-Volume II - Volume II, ECCV '96, page 709-720, Berlin, Heidelberg, 1996. Springer-Verlag. 1 +[37] SriRam Thirthala and Marc Pollefeys. Radial multi-focal tensors. International Journal of Computer Vision - IJCV, 96, 06 2012. 5 +[38] Carlo Tomasi and Takeo Kanade. Shape and motion from image streams under orthography: A factorization method. International Journal of Computer Vision, 9(2):137-154, 1992. 1 +[39] Bill Triggs, Philip F. McLauchlan, Richard I. Hartley, and Andrew W. Fitzgibbon. Bundle adjustment - a modern synthesis. In Proceedings of the International Workshop on Vision Algorithms: Theory and Practice, ICCV '99, pages 298-372. Springer-Verlag, 2000. 2, 3 +[40] R. Tsai. A versatile camera calibration technique for high-accuracy 3d machine vision metrology using off-the-shelf tv cameras and lenses. IEEE Journal on Robotics and Automation, 3(4):323-344, August 1987. 5 +[41] T. Wiberg. Computation of principal components when data are missing. In Proceedings of the Second Symposium of Computational Statistics, page 229-326, 1976. 3 \ No newline at end of file diff --git a/2023/expOSE_ Accurate Initialization-Free Projective Factorization Using Exponential Regularization/images.zip b/2023/expOSE_ Accurate Initialization-Free Projective Factorization Using Exponential Regularization/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..848a9678dfa3bd2afe8c50eab4b8886ca0a2c008 --- /dev/null +++ b/2023/expOSE_ Accurate Initialization-Free Projective Factorization Using Exponential Regularization/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1bf6a2855155554c6673e2790a98f3aec930b8e6ea6ad92564d3a7f3dbb39ef9 +size 422937 diff --git a/2023/expOSE_ Accurate Initialization-Free Projective Factorization Using Exponential Regularization/layout.json b/2023/expOSE_ Accurate Initialization-Free Projective Factorization Using Exponential Regularization/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..1217291fd8cf7942be40c59ac0c0f49fec293726 --- /dev/null +++ b/2023/expOSE_ Accurate Initialization-Free Projective Factorization Using Exponential Regularization/layout.json @@ -0,0 +1,11514 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 48, + 103, + 544, + 138 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 103, + 544, + 138 + ], + "spans": [ + { + "bbox": [ + 48, + 103, + 544, + 138 + ], + "type": "text", + "content": "expOSE: Accurate Initialization-Free Projective Factorization using Exponential Regularization" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 163, + 160, + 426, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 163, + 160, + 426, + 175 + ], + "spans": [ + { + "bbox": [ + 163, + 160, + 426, + 175 + ], + "type": "text", + "content": "Jose Pedro Iglesias1, Amanda Nilsson2, Carl Olsson1,2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 186, + 180, + 407, + 195 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 186, + 180, + 407, + 195 + ], + "spans": [ + { + "bbox": [ + 186, + 180, + 407, + 195 + ], + "type": "text", + "content": "1Chalmers University of Technology, Sweden" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 233, + 195, + 361, + 209 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 195, + 361, + 209 + ], + "spans": [ + { + "bbox": [ + 233, + 195, + 361, + 209 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 233, + 195, + 361, + 209 + ], + "type": "text", + "content": "Lund University, Sweden" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 143, + 236, + 191, + 249 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 236, + 191, + 249 + ], + "spans": [ + { + "bbox": [ + 143, + 236, + 191, + 249 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 262, + 290, + 478 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 262, + 290, + 478 + ], + "spans": [ + { + "bbox": [ + 45, + 262, + 290, + 478 + ], + "type": "text", + "content": "Bundle adjustment is a key component in practically all available Structure from Motion systems. While it is crucial for achieving accurate reconstruction, convergence to the right solution hinges on good initialization. The recently introduced factorization-based " + }, + { + "bbox": [ + 45, + 262, + 290, + 478 + ], + "type": "inline_equation", + "content": "pOSE" + }, + { + "bbox": [ + 45, + 262, + 290, + 478 + ], + "type": "text", + "content": " methods formulate a surrogate for the bundle adjustment error without reliance on good initialization. In this paper, we show that " + }, + { + "bbox": [ + 45, + 262, + 290, + 478 + ], + "type": "inline_equation", + "content": "pOSE" + }, + { + "bbox": [ + 45, + 262, + 290, + 478 + ], + "type": "text", + "content": " has an undesirable penalization of large depths. To address this we propose " + }, + { + "bbox": [ + 45, + 262, + 290, + 478 + ], + "type": "inline_equation", + "content": "expOSE" + }, + { + "bbox": [ + 45, + 262, + 290, + 478 + ], + "type": "text", + "content": " which has an exponential regularization that is negligible for positive depths. To achieve efficient inference we use a quadratic approximation that allows an iterative solution with VarPro. Furthermore, we extend the method with radial distortion robustness by decomposing the Object Space Error into radial and tangential components. Experimental results confirm that the proposed method is robust to initialization and improves reconstruction quality compared to state-of-the-art methods even without bundle adjustment refinement." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 489, + 128, + 502 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 489, + 128, + 502 + ], + "spans": [ + { + "bbox": [ + 47, + 489, + 128, + 502 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 45, + 510, + 287, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 510, + 287, + 617 + ], + "spans": [ + { + "bbox": [ + 45, + 510, + 287, + 617 + ], + "type": "text", + "content": "Factorization is a long-established method in Structure from Motion (SfM). It originates from [38] by Tomasi and Kanade showing how, under the orthographic camera model, structure and motion can be computed simultaneously from an image sequence using singular value decomposition (SVD). The method was later reformulated for affine cameras, including weak perspective projection [32]. Strum and Triggs [36] further extended factorization to projective cameras by accounting for projective depths." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 618, + 287, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 618, + 287, + 654 + ], + "spans": [ + { + "bbox": [ + 46, + 618, + 287, + 654 + ], + "type": "text", + "content": "One appeal of these factorization algorithms is they can yield a closed-form solution by using the SVD. It is however only possible to use the SVD if every considered scene" + } + ] + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 310, + 239, + 392, + 358 + ], + "blocks": [ + { + "bbox": [ + 310, + 239, + 392, + 358 + ], + "lines": [ + { + "bbox": [ + 310, + 239, + 392, + 358 + ], + "spans": [ + { + "bbox": [ + 310, + 239, + 392, + 358 + ], + "type": "image", + "image_path": "e5965866ff80e2755348a5a545feec6711b8d143417fa7dc4355807b775ad01b.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 370, + 545, + 426 + ], + "lines": [ + { + "bbox": [ + 305, + 370, + 545, + 426 + ], + "spans": [ + { + "bbox": [ + 305, + 370, + 545, + 426 + ], + "type": "text", + "content": "Figure 1. (Left) Examples of two of the images in the Fountain sequence. (Right) Reconstruction obtained with expOSE (top) and pOSE (bottom) for 3 different values of " + }, + { + "bbox": [ + 305, + 370, + 545, + 426 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 305, + 370, + 545, + 426 + ], + "type": "text", + "content": ". Our method achieves the same convergence rate as pOSE while having a higher reconstruction quality and being less dependent on the choice of " + }, + { + "bbox": [ + 305, + 370, + 545, + 426 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 305, + 370, + 545, + 426 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 399, + 238, + 543, + 359 + ], + "blocks": [ + { + "bbox": [ + 399, + 238, + 543, + 359 + ], + "lines": [ + { + "bbox": [ + 399, + 238, + 543, + 359 + ], + "spans": [ + { + "bbox": [ + 399, + 238, + 543, + 359 + ], + "type": "image", + "image_path": "878e459cf6478bb9da95e7f536632ff8ac1e3886a3f05018ab6b5765ba1809d3.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 449, + 546, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 449, + 546, + 592 + ], + "spans": [ + { + "bbox": [ + 304, + 449, + 546, + 592 + ], + "type": "text", + "content": "point is visible throughout the whole image sequence. In cases of missing data, the SVD can be replaced with iterative methods. Simple splitting methods [4,8,22] are able to regularize singular values when computing a proximal operator, but can give rather erroneous solutions because of a low convergence rate close to the optimum. [5, 8] give an idea of convex formulation using the nuclear norm, but are usually too weak for SfM in the presence of noise [19, 30]. The papers [1, 9, 10, 31] suggest different ways to assure that direct bilinear optimization only has a global minimum. However, SfM problems with local minima do not fulfill their required conditions [3]." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 594, + 547, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 594, + 547, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 594, + 547, + 714 + ], + "type": "text", + "content": "It was recently shown by Hong et al. [14-17] that direct bilinear estimation of structure and motion can be made robust to local minima in combination with the Variable Projection (VarPro) method. In [15] the objective is exchanged for the Pseudo Object Space Error (pOSE) which is a tradeoff between the object space error and a quadratic regularization term. This was later extended to a radial distortion invariant version RpOSE, presented in [18]. With their bilinear factorization structure and a large basin of convergence when using VarPro, these pOSE models tend to find" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 664, + 287, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 664, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 664, + 287, + 713 + ], + "type": "text", + "content": "1This work has been funded by the Swedish Research Council (grant no. 2018-05375), the Swedish Foundation for Strategic Research project, Semantic Mapping and Visual Navigation for Smart Robots (grant no. RIT15-0038), and the Wallenberg AI, Autonomous Systems and Software Program (WASP)." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "8959" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 119 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 119 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 119 + ], + "type": "text", + "content": "a global minimum independently of the initialization. Additionally, both pOSE and RpOSE have in [18] been shown to be local approximations of the reprojection error, enabling iterative refinement to the maximum likelihood solution." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 120, + 288, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 120, + 288, + 239 + ], + "spans": [ + { + "bbox": [ + 46, + 120, + 288, + 239 + ], + "type": "text", + "content": "In this paper, we show that the regularization term in the pOSE formulation overly penalizes large positive depths and can thereby limit the range of feasible depths too much to achieve satisfactory solutions. We instead propose regularization with an exponential penalty that is negligible for positive depths. To achieve efficient inference we use a quadratic approximation of the exponential term suitable for optimization with VarPro. Moreover, we extend the method with radial distortion robustness by decomposing the OSE into radial and tangent components." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 59, + 240, + 257, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 240, + 257, + 251 + ], + "spans": [ + { + "bbox": [ + 59, + 240, + 257, + 251 + ], + "type": "text", + "content": "In short, the main contributions of this paper are:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 58, + 256, + 287, + 436 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 58, + 256, + 287, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 256, + 287, + 293 + ], + "spans": [ + { + "bbox": [ + 58, + 256, + 287, + 293 + ], + "type": "text", + "content": "- We investigate the pOSE models' undesirable penalization of large depths and propose expOSE which has negligible regularization of positive depths;" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 58, + 298, + 287, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 298, + 287, + 370 + ], + "spans": [ + { + "bbox": [ + 58, + 298, + 287, + 370 + ], + "type": "text", + "content": "- We formulate a quadratic approximation of the exponential regularization term in expOSE to make it suitable for optimization with VarPro and show that, with random initialization, the model achieves convergence rates similar to pOSE with significantly higher reconstruction quality;" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 58, + 376, + 287, + 436 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 376, + 287, + 436 + ], + "spans": [ + { + "bbox": [ + 58, + 376, + 287, + 436 + ], + "type": "text", + "content": "- We extend expOSE with radial distortion robustness by decomposing the Object Space Error (OSE) into radial and tangent components and propose an SfM pipeline that is able to obtain a complete and accurate Euclidean reconstruction from uncalibrated cameras." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 47, + 445, + 197, + 459 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 445, + 197, + 459 + ], + "spans": [ + { + "bbox": [ + 47, + 445, + 197, + 459 + ], + "type": "text", + "content": "2. Reconstruction Objectives" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 465, + 287, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 465, + 287, + 525 + ], + "spans": [ + { + "bbox": [ + 46, + 465, + 287, + 525 + ], + "type": "text", + "content": "In this section, we illustrate the problems with direct optimization of reprojection error and discuss how this is addressed using the pOSE model [15]. We then present our exponential regularization and show how this addresses the limitations of the pOSE model." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 531, + 231, + 544 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 531, + 231, + 544 + ], + "spans": [ + { + "bbox": [ + 47, + 531, + 231, + 544 + ], + "type": "text", + "content": "2.1. Reprojection Error and Cheirality" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 550, + 287, + 595 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 550, + 287, + 595 + ], + "spans": [ + { + "bbox": [ + 46, + 550, + 287, + 595 + ], + "type": "text", + "content": "Bundle adjustment [12, 39] is the standard routine when it comes to solving the Structure-from-Motion problem. Given measured point projections " + }, + { + "bbox": [ + 46, + 550, + 287, + 595 + ], + "type": "inline_equation", + "content": "m_{ij}" + }, + { + "bbox": [ + 46, + 550, + 287, + 595 + ], + "type": "text", + "content": " the goal is to attempt to minimize" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 124, + 594, + 287, + 626 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 124, + 594, + 287, + 626 + ], + "spans": [ + { + "bbox": [ + 124, + 594, + 287, + 626 + ], + "type": "interline_equation", + "content": "\\sum_ {i j} \\left\\| \\boldsymbol {m} _ {i j} - \\frac {\\boldsymbol {x} _ {i j}}{z _ {i j}} \\right\\| ^ {2}, \\tag {1}", + "image_path": "a5eb341c2cdf8f320e0f13556ceaec8b18214ea2af8d019c80d4837411d6047b.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 46, + 629, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 629, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 629, + 287, + 713 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 629, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\left[ \\begin{array}{c} \\boldsymbol{x}_{ij} \\\\ z_{ij} \\end{array} \\right] = P_i U_j" + }, + { + "bbox": [ + 46, + 629, + 287, + 713 + ], + "type": "text", + "content": ". Here " + }, + { + "bbox": [ + 46, + 629, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_{ij}" + }, + { + "bbox": [ + 46, + 629, + 287, + 713 + ], + "type": "text", + "content": " is a 2 vector, " + }, + { + "bbox": [ + 46, + 629, + 287, + 713 + ], + "type": "inline_equation", + "content": "z_{ij}" + }, + { + "bbox": [ + 46, + 629, + 287, + 713 + ], + "type": "text", + "content": " is a number, referred to as the projective depth, " + }, + { + "bbox": [ + 46, + 629, + 287, + 713 + ], + "type": "inline_equation", + "content": "P_i" + }, + { + "bbox": [ + 46, + 629, + 287, + 713 + ], + "type": "text", + "content": " is a " + }, + { + "bbox": [ + 46, + 629, + 287, + 713 + ], + "type": "inline_equation", + "content": "3 \\times 4" + }, + { + "bbox": [ + 46, + 629, + 287, + 713 + ], + "type": "text", + "content": " camera matrix and " + }, + { + "bbox": [ + 46, + 629, + 287, + 713 + ], + "type": "inline_equation", + "content": "U_i" + }, + { + "bbox": [ + 46, + 629, + 287, + 713 + ], + "type": "text", + "content": " is a " + }, + { + "bbox": [ + 46, + 629, + 287, + 713 + ], + "type": "inline_equation", + "content": "4 \\times 1" + }, + { + "bbox": [ + 46, + 629, + 287, + 713 + ], + "type": "text", + "content": " vector containing homogeneous coordinates of the projected 3D point. Under the assumption of Gaussian image noise, this gives the maximal likelihood estimate of the camera matrices and 3D points [12]." + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 331, + 70, + 449, + 158 + ], + "blocks": [ + { + "bbox": [ + 331, + 70, + 449, + 158 + ], + "lines": [ + { + "bbox": [ + 331, + 70, + 449, + 158 + ], + "spans": [ + { + "bbox": [ + 331, + 70, + 449, + 158 + ], + "type": "image", + "image_path": "9a698cec3c2119d3eb24916444caab2a3ba0f6b7a79f0c218b5c8f41864c8c0b.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 477, + 86, + 516, + 155 + ], + "blocks": [ + { + "bbox": [ + 477, + 86, + 516, + 155 + ], + "lines": [ + { + "bbox": [ + 477, + 86, + 516, + 155 + ], + "spans": [ + { + "bbox": [ + 477, + 86, + 516, + 155 + ], + "type": "image", + "image_path": "a19912a48bd7222597cb5fab164211f60ce7d2e133bb513f3a479e5f51d9d917.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 331, + 158, + 449, + 243 + ], + "blocks": [ + { + "bbox": [ + 331, + 158, + 449, + 243 + ], + "lines": [ + { + "bbox": [ + 331, + 158, + 449, + 243 + ], + "spans": [ + { + "bbox": [ + 331, + 158, + 449, + 243 + ], + "type": "image", + "image_path": "b36e17128f4398513525dca37cb88b5fe33e4e82687f6e8c67a6563f95c4e1ba.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 251, + 545, + 329 + ], + "lines": [ + { + "bbox": [ + 305, + 251, + 545, + 329 + ], + "spans": [ + { + "bbox": [ + 305, + 251, + 545, + 329 + ], + "type": "text", + "content": "Figure 2. Left: Objective values of the reprojection error (blue), the pOSE error " + }, + { + "bbox": [ + 305, + 251, + 545, + 329 + ], + "type": "inline_equation", + "content": "(\\eta = 0.1, \\text{red})" + }, + { + "bbox": [ + 305, + 251, + 545, + 329 + ], + "type": "text", + "content": " and our proposed formulation " + }, + { + "bbox": [ + 305, + 251, + 545, + 329 + ], + "type": "inline_equation", + "content": "(\\eta = 0.1, \\text{yellow})" + }, + { + "bbox": [ + 305, + 251, + 545, + 329 + ], + "type": "text", + "content": " on the lines " + }, + { + "bbox": [ + 305, + 251, + 545, + 329 + ], + "type": "inline_equation", + "content": "(1 - t)(0.5, 0, -1) + t(0.5, 0, 1)" + }, + { + "bbox": [ + 305, + 251, + 545, + 329 + ], + "type": "text", + "content": " (top) and " + }, + { + "bbox": [ + 305, + 251, + 545, + 329 + ], + "type": "inline_equation", + "content": "(1 - t)(-0.5, 0, -1) + t(0.5, 0, 1)" + }, + { + "bbox": [ + 305, + 251, + 545, + 329 + ], + "type": "text", + "content": " (bottom) when " + }, + { + "bbox": [ + 305, + 251, + 545, + 329 + ], + "type": "inline_equation", + "content": "m = (0.5, 0)" + }, + { + "bbox": [ + 305, + 251, + 545, + 329 + ], + "type": "text", + "content": ". Note that the reprojection error is undefined at " + }, + { + "bbox": [ + 305, + 251, + 545, + 329 + ], + "type": "inline_equation", + "content": "z = 0" + }, + { + "bbox": [ + 305, + 251, + 545, + 329 + ], + "type": "text", + "content": " since this corresponds to the camera center. Right: Corresponding camera and sampling line." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 475, + 171, + 517, + 241 + ], + "blocks": [ + { + "bbox": [ + 475, + 171, + 517, + 241 + ], + "lines": [ + { + "bbox": [ + 475, + 171, + 517, + 241 + ], + "spans": [ + { + "bbox": [ + 475, + 171, + 517, + 241 + ], + "type": "image", + "image_path": "0abe454e3086a43e7f3d82666ed2c9171206be9e398f081604647c1851011309.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 350, + 545, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 350, + 545, + 651 + ], + "spans": [ + { + "bbox": [ + 304, + 350, + 545, + 651 + ], + "type": "text", + "content": "It is well known that optimizing (1) is difficult and requires good initialization to achieve convergence to the right solution. One of the difficulties is the division of " + }, + { + "bbox": [ + 304, + 350, + 545, + 651 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_{ij}" + }, + { + "bbox": [ + 304, + 350, + 545, + 651 + ], + "type": "text", + "content": " by " + }, + { + "bbox": [ + 304, + 350, + 545, + 651 + ], + "type": "inline_equation", + "content": "z_{ij}" + }, + { + "bbox": [ + 304, + 350, + 545, + 651 + ], + "type": "text", + "content": ". This creates a barrier of objective values that goes to infinity and needs to be traversed when for example moving from " + }, + { + "bbox": [ + 304, + 350, + 545, + 651 + ], + "type": "inline_equation", + "content": "(\\boldsymbol{x}_{ij}, -z_{ij})" + }, + { + "bbox": [ + 304, + 350, + 545, + 651 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 304, + 350, + 545, + 651 + ], + "type": "inline_equation", + "content": "(\\boldsymbol{x}_{ij}, z_{ij})" + }, + { + "bbox": [ + 304, + 350, + 545, + 651 + ], + "type": "text", + "content": ". The blue curve of Figure 2 (top) shows a 2D example of this barrier. Here we used " + }, + { + "bbox": [ + 304, + 350, + 545, + 651 + ], + "type": "inline_equation", + "content": "m = (0.5, 0)" + }, + { + "bbox": [ + 304, + 350, + 545, + 651 + ], + "type": "text", + "content": " and sampled the function " + }, + { + "bbox": [ + 304, + 350, + 545, + 651 + ], + "type": "inline_equation", + "content": "\\left( m - \\frac{x}{z} \\right)^2" + }, + { + "bbox": [ + 304, + 350, + 545, + 651 + ], + "type": "text", + "content": " on the line segment " + }, + { + "bbox": [ + 304, + 350, + 545, + 651 + ], + "type": "inline_equation", + "content": "(\\boldsymbol{x}, z) = (1 - t)(0.5, 0, -1) + t(0.5, 0, 1)" + }, + { + "bbox": [ + 304, + 350, + 545, + 651 + ], + "type": "text", + "content": ". The best value over this line is at " + }, + { + "bbox": [ + 304, + 350, + 545, + 651 + ], + "type": "inline_equation", + "content": "t = 1" + }, + { + "bbox": [ + 304, + 350, + 545, + 651 + ], + "type": "text", + "content": " which gives " + }, + { + "bbox": [ + 304, + 350, + 545, + 651 + ], + "type": "inline_equation", + "content": "(\\boldsymbol{x}, z) = (0.5, 0, 1)" + }, + { + "bbox": [ + 304, + 350, + 545, + 651 + ], + "type": "text", + "content": ". For comparison, we also plot the corresponding values of the pOSE model [15] (red) and the proposed formulation that we will describe below (yellow). In a calibrated setting the interpretation of " + }, + { + "bbox": [ + 304, + 350, + 545, + 651 + ], + "type": "inline_equation", + "content": "z_{ij}" + }, + { + "bbox": [ + 304, + 350, + 545, + 651 + ], + "type": "text", + "content": " is the depth [12] of the observed 3D point. Hence, in practical cases, where observed points are in front of the camera, there is usually no reason to allow solutions with negative " + }, + { + "bbox": [ + 304, + 350, + 545, + 651 + ], + "type": "inline_equation", + "content": "z_{ij}" + }, + { + "bbox": [ + 304, + 350, + 545, + 651 + ], + "type": "text", + "content": ". In the uncalibrated case " + }, + { + "bbox": [ + 304, + 350, + 545, + 651 + ], + "type": "inline_equation", + "content": "z_{ij}" + }, + { + "bbox": [ + 304, + 350, + 545, + 651 + ], + "type": "text", + "content": " is referred to as a projective depth. It can be shown that when the data is noise free (with sufficiently many visible projections) there is always a solution where the projective depths are all positive [26] if the observed points are in front of the camera. Moreover, any other solution is projectively equivalent to this one, meaning that there is a projective 3D transformation that makes the projective depths positive [25, 26]." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 306, + 658, + 410, + 671 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 658, + 410, + 671 + ], + "spans": [ + { + "bbox": [ + 306, + 658, + 410, + 671 + ], + "type": "text", + "content": "2.2. The pOSE Model" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 305, + 677, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 677, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 677, + 545, + 713 + ], + "type": "text", + "content": "In view of the above, constraining the problem to positive depths is no practical restriction. Still finding a good starting solution where all depths are positive is not a trivial" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "8960" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 96 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 96 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 96 + ], + "type": "text", + "content": "issue. In [15] the objective (1) is exchanged for the object space error (OSE)" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 94, + 100, + 287, + 114 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 100, + 287, + 114 + ], + "spans": [ + { + "bbox": [ + 94, + 100, + 287, + 114 + ], + "type": "interline_equation", + "content": "\\ell_ {\\text {O S E}} \\left(\\boldsymbol {x} _ {i j}, z _ {i j}\\right) = \\left\\| z _ {i j} \\boldsymbol {m} _ {i j} - \\boldsymbol {x} _ {i j} \\right\\| ^ {2}. \\tag {2}", + "image_path": "5bf6bdc7df433a266a55a500475a1ab2fc01e6eae3141cbc676370d926d6abd0.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 119, + 288, + 229 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 119, + 288, + 229 + ], + "spans": [ + { + "bbox": [ + 46, + 119, + 288, + 229 + ], + "type": "text", + "content": "Here, the scale-invariant residual of (1) has been replaced with a linear error allowing points to switch from negative to positive projective depths. It can be shown [18] that the OSE residual " + }, + { + "bbox": [ + 46, + 119, + 288, + 229 + ], + "type": "inline_equation", + "content": "z_{i} \\pmb{m}_{ij} - \\pmb{x}_{ij}" + }, + { + "bbox": [ + 46, + 119, + 288, + 229 + ], + "type": "text", + "content": " is the first order Taylor expansion of the projective residual " + }, + { + "bbox": [ + 46, + 119, + 288, + 229 + ], + "type": "inline_equation", + "content": "\\pmb{m}_{ij} - \\frac{\\pmb{x}_{ij}}{z_{ij}}" + }, + { + "bbox": [ + 46, + 119, + 288, + 229 + ], + "type": "text", + "content": " around " + }, + { + "bbox": [ + 46, + 119, + 288, + 229 + ], + "type": "inline_equation", + "content": "(\\pmb{x}_{ij}, z_{ij}) = (\\pmb{m}_{ij}, 1)" + }, + { + "bbox": [ + 46, + 119, + 288, + 229 + ], + "type": "text", + "content": ", and it is therefore in some sense the closest linear approximation that we can find. On the downside, the OSE is clearly minimized by the trivial solution " + }, + { + "bbox": [ + 46, + 119, + 288, + 229 + ], + "type": "inline_equation", + "content": "\\pmb{x}_{ij} = 0" + }, + { + "bbox": [ + 46, + 119, + 288, + 229 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 119, + 288, + 229 + ], + "type": "inline_equation", + "content": "z_{ij} = 0" + }, + { + "bbox": [ + 46, + 119, + 288, + 229 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 46, + 119, + 288, + 229 + ], + "type": "inline_equation", + "content": "i, j" + }, + { + "bbox": [ + 46, + 119, + 288, + 229 + ], + "type": "text", + "content": ". Therefore [15] adds the quadratic regularization" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 239, + 287, + 253 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 239, + 287, + 253 + ], + "spans": [ + { + "bbox": [ + 111, + 239, + 287, + 253 + ], + "type": "interline_equation", + "content": "\\ell_ {\\text {a f f}} \\left(\\boldsymbol {x} _ {i j}\\right) = \\left\\| \\boldsymbol {x} _ {i j} - \\boldsymbol {m} _ {i j} \\right\\| ^ {2}, \\tag {3}", + "image_path": "a0e3026eb64da2cd965a3361b6e265221bd46aca3f768249c86a6e3eff1a8609.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 256, + 287, + 292 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 256, + 287, + 292 + ], + "spans": [ + { + "bbox": [ + 46, + 256, + 287, + 292 + ], + "type": "text", + "content": "which penalizes the trivial zero solution. Note that (3) and (2) both vanish when " + }, + { + "bbox": [ + 46, + 256, + 287, + 292 + ], + "type": "inline_equation", + "content": "(\\pmb{x}_{ij},z_{ij}) = (\\pmb{m}_{ij},1)" + }, + { + "bbox": [ + 46, + 256, + 287, + 292 + ], + "type": "text", + "content": ". The proposed pOSE objective" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 82, + 297, + 287, + 323 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 297, + 287, + 323 + ], + "spans": [ + { + "bbox": [ + 82, + 297, + 287, + 323 + ], + "type": "interline_equation", + "content": "\\sum_ {i j} \\left((1 - \\eta) \\ell_ {\\text {O S E}} \\left(\\boldsymbol {x} _ {i j}, z _ {i j}\\right) + \\eta \\ell_ {\\text {a f f}} \\left(\\boldsymbol {x} _ {i j}\\right)\\right), \\tag {4}", + "image_path": "824f6492ac72e22cba120aea8e71d2d584973d9727b3ba747778c92d531890c8.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 326, + 287, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 326, + 287, + 506 + ], + "spans": [ + { + "bbox": [ + 46, + 326, + 287, + 506 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 326, + 287, + 506 + ], + "type": "inline_equation", + "content": "0 < \\eta < 1" + }, + { + "bbox": [ + 46, + 326, + 287, + 506 + ], + "type": "text", + "content": ", therefore allows arbitrary starting solutions but penalizes projective depths that deviate significantly from 1. The red curve of Figure 2 (top) shows pOSE values (with " + }, + { + "bbox": [ + 46, + 326, + 287, + 506 + ], + "type": "inline_equation", + "content": "\\eta = 0.1" + }, + { + "bbox": [ + 46, + 326, + 287, + 506 + ], + "type": "text", + "content": ") over the line " + }, + { + "bbox": [ + 46, + 326, + 287, + 506 + ], + "type": "inline_equation", + "content": "(1 - t)(0.5, 0, -1) + t(0.5, 0, 1)" + }, + { + "bbox": [ + 46, + 326, + 287, + 506 + ], + "type": "text", + "content": ". In contrast to the reprojection error, the pOSE formulation does not give any barrier at " + }, + { + "bbox": [ + 46, + 326, + 287, + 506 + ], + "type": "inline_equation", + "content": "z = 0" + }, + { + "bbox": [ + 46, + 326, + 287, + 506 + ], + "type": "text", + "content": ". It is experimentally shown in [15] that when optimized using VarPro [16] this leads to a method that converges to the right solution in the vast majority of cases starting from random initialization (including starting points with negative depths). Note that if we column-stack the camera matrices " + }, + { + "bbox": [ + 46, + 326, + 287, + 506 + ], + "type": "inline_equation", + "content": "P_{i}" + }, + { + "bbox": [ + 46, + 326, + 287, + 506 + ], + "type": "text", + "content": " into a matrix " + }, + { + "bbox": [ + 46, + 326, + 287, + 506 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 46, + 326, + 287, + 506 + ], + "type": "text", + "content": " with 4 columns, and similarly row-stack the 3D points into a matrix " + }, + { + "bbox": [ + 46, + 326, + 287, + 506 + ], + "type": "inline_equation", + "content": "U" + }, + { + "bbox": [ + 46, + 326, + 287, + 506 + ], + "type": "text", + "content": " with 4 rows, the resulting product " + }, + { + "bbox": [ + 46, + 326, + 287, + 506 + ], + "type": "inline_equation", + "content": "X = PU" + }, + { + "bbox": [ + 46, + 326, + 287, + 506 + ], + "type": "text", + "content": " is a matrix of rank 4. We can therefore formulate the pOSE objective as a low-rank recovery problem" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 116, + 510, + 287, + 529 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 510, + 287, + 529 + ], + "spans": [ + { + "bbox": [ + 116, + 510, + 287, + 529 + ], + "type": "interline_equation", + "content": "\\min _ {\\operatorname {r a n k} (X) = 4} \\| \\mathcal {A} (X) - b \\| ^ {2}, \\tag {5}", + "image_path": "d448e73af328258fd4231bc480f645156888ceca6f071d919a396143775334d5.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 533, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 533, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 533, + 288, + 713 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 533, + 288, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 46, + 533, + 288, + 713 + ], + "type": "text", + "content": " is a linear operator. It is well known from compressed sensing that such formulations can often be solved optimally [6, 9-11, 19, 28, 30, 33]. The optimization problem becomes particularly easy for large values of " + }, + { + "bbox": [ + 46, + 533, + 288, + 713 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 46, + 533, + 288, + 713 + ], + "type": "text", + "content": ". On the other hand, the regularization term also introduces an undesirable penalty for large (positive) depths which may constrain the range of feasible depths too much to achieve satisfactory solutions. The bottom images in Figure 2 show the same evaluation as the top ones but over the line " + }, + { + "bbox": [ + 46, + 533, + 288, + 713 + ], + "type": "inline_equation", + "content": "(1 - t)(-0.5, 0, -1) + t(0.5, 0, 1)" + }, + { + "bbox": [ + 46, + 533, + 288, + 713 + ], + "type": "text", + "content": ". All of the points on this line give 0 reprojection error (except at the camera center " + }, + { + "bbox": [ + 46, + 533, + 288, + 713 + ], + "type": "inline_equation", + "content": "(0, 0, 0)" + }, + { + "bbox": [ + 46, + 533, + 288, + 713 + ], + "type": "text", + "content": " for which the projection is undefined). The pOSE formulation (red curve) clearly penalizes solutions of small or negative projective depth but its undesirable growth for large positive values is also visible." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 72, + 457, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 72, + 457, + 85 + ], + "spans": [ + { + "bbox": [ + 306, + 72, + 457, + 85 + ], + "type": "text", + "content": "2.3. Exponential Regularization" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 90, + 545, + 126 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 90, + 545, + 126 + ], + "spans": [ + { + "bbox": [ + 305, + 90, + 545, + 126 + ], + "type": "text", + "content": "In this paper, we instead propose to regularize the depth using an exponential function (yellow curves in Figure 2). Specifically, we replace the affine term (3) with" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 354, + 133, + 545, + 163 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 354, + 133, + 545, + 163 + ], + "spans": [ + { + "bbox": [ + 354, + 133, + 545, + 163 + ], + "type": "interline_equation", + "content": "\\ell_ {\\exp} \\left(\\boldsymbol {x} _ {i j}, z _ {i j}\\right) = e ^ {- \\left(\\frac {\\boldsymbol {m} _ {i j} \\boldsymbol {x} _ {i j} + z _ {i j}}{\\sqrt {\\| \\boldsymbol {m} _ {i j} \\| ^ {2} + 1}}\\right)}. \\tag {6}", + "image_path": "18ce539801cb4ac39dac4b9b6841e98cb114c0e3ff8467ec15de82ca296117f5.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 170, + 545, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 170, + 545, + 295 + ], + "spans": [ + { + "bbox": [ + 305, + 170, + 545, + 295 + ], + "type": "text", + "content": "The term " + }, + { + "bbox": [ + 305, + 170, + 545, + 295 + ], + "type": "inline_equation", + "content": "\\frac{m_{ij}x_{ij} + z_{ij}}{\\sqrt{\\|m_{ij}\\|^2 + 1}}" + }, + { + "bbox": [ + 305, + 170, + 545, + 295 + ], + "type": "text", + "content": " is the length (with sign) of the projection of the vector " + }, + { + "bbox": [ + 305, + 170, + 545, + 295 + ], + "type": "inline_equation", + "content": "(\\pmb{x}_{ij},z_{ij})" + }, + { + "bbox": [ + 305, + 170, + 545, + 295 + ], + "type": "text", + "content": " onto " + }, + { + "bbox": [ + 305, + 170, + 545, + 295 + ], + "type": "inline_equation", + "content": "(m_{ij},1)" + }, + { + "bbox": [ + 305, + 170, + 545, + 295 + ], + "type": "text", + "content": ". Note that its sign is negative when the angle between " + }, + { + "bbox": [ + 305, + 170, + 545, + 295 + ], + "type": "inline_equation", + "content": "(\\pmb{x}_{ij},z_{ij})" + }, + { + "bbox": [ + 305, + 170, + 545, + 295 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 170, + 545, + 295 + ], + "type": "inline_equation", + "content": "(m_{ij},1)" + }, + { + "bbox": [ + 305, + 170, + 545, + 295 + ], + "type": "text", + "content": " is larger than " + }, + { + "bbox": [ + 305, + 170, + 545, + 295 + ], + "type": "inline_equation", + "content": "90^{\\circ}" + }, + { + "bbox": [ + 305, + 170, + 545, + 295 + ], + "type": "text", + "content": ". The exponential function will penalize such values heavily. Still, the penalty is finite for all values making it is possible to use start the optimization from anywhere. On the other hand for positive growing values the exponential function tends to 0 and therefore does not restrict the feasible projective depths as the affine term (3) does." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 317, + 296, + 479, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 296, + 479, + 308 + ], + "spans": [ + { + "bbox": [ + 317, + 296, + 479, + 308 + ], + "type": "text", + "content": "The proposed expOSE objective is then" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 311, + 316, + 545, + 342 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 316, + 545, + 342 + ], + "spans": [ + { + "bbox": [ + 311, + 316, + 545, + 342 + ], + "type": "interline_equation", + "content": "\\ell_ {\\text {e x p O S E}} = \\sum_ {i j} (1 - \\eta) \\ell_ {\\text {O S E}} \\left(\\boldsymbol {x} _ {i j}, z _ {i j}\\right) + \\eta \\ell_ {\\text {e x p}} \\left(\\boldsymbol {x} _ {i j}, z _ {i j}\\right). \\tag {7}", + "image_path": "a8e41e837375fbf2c49a6451d7d1eb802f4c5bdbc75b4f3670104da397c435f1.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 350, + 545, + 457 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 350, + 545, + 457 + ], + "spans": [ + { + "bbox": [ + 304, + 350, + 545, + 457 + ], + "type": "text", + "content": "At first glance it may seem as if replacing (3) with (6) will yield an ill-posed problem since large depths are hardly penalized by (6). Adding a small penalty for these values to ensure a well-posed problem may therefore be warranted. Note, however, that unless there is an exact solution (with zero reprojection errors) the OSE term is not scale invariant but has a weak shrinking bias. In practice, we empirically observe that this bias is generally enough for our proposed algorithm to converge well from random starting solutions." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 458, + 545, + 566 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 458, + 545, + 566 + ], + "spans": [ + { + "bbox": [ + 304, + 458, + 545, + 566 + ], + "type": "text", + "content": "We conclude this section by noting that our proposed method is much less sensitive to parameter selection than the original pOSE model [15]. Since the shrinking bias of the OSE term is relatively weak, an increased regularization cost, due to a change of parameters, can often be compensated for by changing the scale of the reconstruction. In contrast, the choice of " + }, + { + "bbox": [ + 304, + 458, + 545, + 566 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 304, + 458, + 545, + 566 + ], + "type": "text", + "content": " in the original pOSE model is crucial. Figure 1 shows how " + }, + { + "bbox": [ + 304, + 458, + 545, + 566 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 304, + 458, + 545, + 566 + ], + "type": "text", + "content": " affects the reconstruction (more details about this figure are provided in Section 3.2)." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 576, + 455, + 589 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 576, + 455, + 589 + ], + "spans": [ + { + "bbox": [ + 306, + 576, + 455, + 589 + ], + "type": "text", + "content": "3. Optimization with VarPro" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 305, + 596, + 545, + 656 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 596, + 545, + 656 + ], + "spans": [ + { + "bbox": [ + 305, + 596, + 545, + 656 + ], + "type": "text", + "content": "One of the main benefits of the pOSE formulation [15] is that it is quadratic in the elements of " + }, + { + "bbox": [ + 305, + 596, + 545, + 656 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 305, + 596, + 545, + 656 + ], + "type": "text", + "content": ". Therefore, given values for camera matrices " + }, + { + "bbox": [ + 305, + 596, + 545, + 656 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 305, + 596, + 545, + 656 + ], + "type": "text", + "content": " the optimal 3D points " + }, + { + "bbox": [ + 305, + 596, + 545, + 656 + ], + "type": "inline_equation", + "content": "U^{*}(P)" + }, + { + "bbox": [ + 305, + 596, + 545, + 656 + ], + "type": "text", + "content": " can be computed in closed form using a pseudo inverse. The VarPro method [16,27,35,41] solves the reduced problem" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 370, + 662, + 545, + 682 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 370, + 662, + 545, + 682 + ], + "spans": [ + { + "bbox": [ + 370, + 662, + 545, + 682 + ], + "type": "interline_equation", + "content": "\\min _ {P} \\| \\mathcal {A} (P U ^ {*} (P)) - b \\| ^ {2}, \\tag {8}", + "image_path": "ff6ef735c35f478c5deb571b5849d4d22f07d438a1ca908d1475526319d47b39.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "text", + "content": "using the Levenberg-Marquardt method [12, 39]. In contrast to standard Gauss-Newton type methods that optimize" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "type": "text", + "content": "8961" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 94, + 71, + 240, + 178 + ], + "blocks": [ + { + "bbox": [ + 94, + 71, + 240, + 178 + ], + "lines": [ + { + "bbox": [ + 94, + 71, + 240, + 178 + ], + "spans": [ + { + "bbox": [ + 94, + 71, + 240, + 178 + ], + "type": "image", + "image_path": "0656bc076b95bd79594e43d9c2efdd1ac83972d8ac70d8edd4856df5f36cc0cc.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 48, + 186, + 285, + 198 + ], + "lines": [ + { + "bbox": [ + 48, + 186, + 285, + 198 + ], + "spans": [ + { + "bbox": [ + 48, + 186, + 285, + 198 + ], + "type": "text", + "content": "Figure 3. The exponential function and its Taylor approximation." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 219, + 286, + 290 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 219, + 286, + 290 + ], + "spans": [ + { + "bbox": [ + 46, + 219, + 286, + 290 + ], + "type": "text", + "content": "locally over both " + }, + { + "bbox": [ + 46, + 219, + 286, + 290 + ], + "type": "inline_equation", + "content": "U" + }, + { + "bbox": [ + 46, + 219, + 286, + 290 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 219, + 286, + 290 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 46, + 219, + 286, + 290 + ], + "type": "text", + "content": ", the main benefit of the elimination of " + }, + { + "bbox": [ + 46, + 219, + 286, + 290 + ], + "type": "inline_equation", + "content": "U" + }, + { + "bbox": [ + 46, + 219, + 286, + 290 + ], + "type": "text", + "content": " is that dampening only needs to be applied to " + }, + { + "bbox": [ + 46, + 219, + 286, + 290 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 46, + 219, + 286, + 290 + ], + "type": "text", + "content": ". This has been shown empirically to greatly improve convergence [14, 16]. The intuition is that small changes in " + }, + { + "bbox": [ + 46, + 219, + 286, + 290 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 46, + 219, + 286, + 290 + ], + "type": "text", + "content": " will sometimes result in large changes in " + }, + { + "bbox": [ + 46, + 219, + 286, + 290 + ], + "type": "inline_equation", + "content": "U" + }, + { + "bbox": [ + 46, + 219, + 286, + 290 + ], + "type": "text", + "content": ", but this is prevented by a dampening term which causes the algorithm to stall." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 292, + 287, + 353 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 292, + 287, + 353 + ], + "spans": [ + { + "bbox": [ + 47, + 292, + 287, + 353 + ], + "type": "text", + "content": "Since the exponential regularization term is not quadratic VarPro is not directly applicable to our formulation. We, therefore, employ an iterative approach that locally approximates (6) with a quadratic function. Consider the 2nd order Taylor expansion of " + }, + { + "bbox": [ + 47, + 292, + 287, + 353 + ], + "type": "inline_equation", + "content": "e^{-\\boldsymbol{a}^T \\boldsymbol{y}}" + }, + { + "bbox": [ + 47, + 292, + 287, + 353 + ], + "type": "text", + "content": " at a point " + }, + { + "bbox": [ + 47, + 292, + 287, + 353 + ], + "type": "inline_equation", + "content": "\\bar{\\boldsymbol{y}}" + }, + { + "bbox": [ + 47, + 292, + 287, + 353 + ], + "type": "text", + "content": " given by" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 51, + 361, + 287, + 397 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 361, + 287, + 397 + ], + "spans": [ + { + "bbox": [ + 51, + 361, + 287, + 397 + ], + "type": "interline_equation", + "content": "e ^ {- \\boldsymbol {a} ^ {T} \\boldsymbol {y}} \\approx e ^ {- \\boldsymbol {a} ^ {T} \\bar {\\boldsymbol {y}}} \\left(1 - \\boldsymbol {a} ^ {T} (\\boldsymbol {y} - \\bar {\\boldsymbol {y}}) + \\frac {1}{2} \\left(\\boldsymbol {a} ^ {T} (\\boldsymbol {y} - \\bar {\\boldsymbol {y}})\\right) ^ {2}\\right). \\tag {9}", + "image_path": "8bf70dff0d0b173163d423fa1225242e98bfbd9ae1fa60c239bfbac92c6629f0.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 397, + 212, + 409 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 397, + 212, + 409 + ], + "spans": [ + { + "bbox": [ + 47, + 397, + 212, + 409 + ], + "type": "text", + "content": "Completing squares gives the expression" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 62, + 417, + 287, + 444 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 417, + 287, + 444 + ], + "spans": [ + { + "bbox": [ + 62, + 417, + 287, + 444 + ], + "type": "interline_equation", + "content": "e ^ {- \\boldsymbol {a} ^ {T} \\boldsymbol {y}} \\approx \\frac {e ^ {- \\boldsymbol {a} ^ {T} \\bar {\\boldsymbol {y}}}}{2} \\left(\\boldsymbol {a} ^ {T} (\\boldsymbol {y} - \\bar {\\boldsymbol {y}}) - 1\\right) ^ {2} + e ^ {- \\boldsymbol {a} ^ {T} \\bar {\\boldsymbol {y}}}. \\tag {10}", + "image_path": "68e14679b0848127dda635a99a73027d00d381e4f73289b96248933e3501b24a.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 452, + 287, + 537 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 452, + 287, + 537 + ], + "spans": [ + { + "bbox": [ + 46, + 452, + 287, + 537 + ], + "type": "text", + "content": "Note that when minimizing with respect to " + }, + { + "bbox": [ + 46, + 452, + 287, + 537 + ], + "type": "inline_equation", + "content": "\\pmb{y}" + }, + { + "bbox": [ + 46, + 452, + 287, + 537 + ], + "type": "text", + "content": " the last term is constant and can be ignored. Since the exponential function is positive the result is a weighted linear least squares term in the unknown " + }, + { + "bbox": [ + 46, + 452, + 287, + 537 + ], + "type": "inline_equation", + "content": "\\pmb{y}" + }, + { + "bbox": [ + 46, + 452, + 287, + 537 + ], + "type": "text", + "content": ". With " + }, + { + "bbox": [ + 46, + 452, + 287, + 537 + ], + "type": "inline_equation", + "content": "\\pmb{y} = \\begin{bmatrix} \\pmb{x}_{ij} \\\\ z_{ij} \\end{bmatrix}" + }, + { + "bbox": [ + 46, + 452, + 287, + 537 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 452, + 287, + 537 + ], + "type": "inline_equation", + "content": "\\pmb{a} = \\frac{1}{\\sqrt{\\|\\pmb{m}_{ij}\\|^2 + 1}} \\begin{bmatrix} \\pmb{m}_{ij} \\\\ 1 \\end{bmatrix}" + }, + { + "bbox": [ + 46, + 452, + 287, + 537 + ], + "type": "text", + "content": " we get our approximation" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 545, + 290, + 590 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 545, + 290, + 590 + ], + "spans": [ + { + "bbox": [ + 47, + 545, + 290, + 590 + ], + "type": "interline_equation", + "content": "\\tilde {\\ell} _ {\\exp} \\left(\\boldsymbol {x} _ {i j}, z _ {i j}\\right) \\approx \\frac {\\ell_ {\\exp} \\left(\\bar {\\boldsymbol {x}} _ {i j} , \\bar {z} _ {i j}\\right)}{2} \\left(\\frac {\\boldsymbol {m} _ {i j} ^ {T} \\Delta \\boldsymbol {x} _ {i j} + \\Delta z _ {i j}}{\\sqrt {\\| \\boldsymbol {m} _ {i j} \\| ^ {2} + 1}} - 1\\right) ^ {2}, \\tag {11}", + "image_path": "152c0b73905402bede23687ce12943fa1406b584796ca795e42bceeec3ab45cb.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 590, + 287, + 650 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 590, + 287, + 650 + ], + "spans": [ + { + "bbox": [ + 46, + 590, + 287, + 650 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 590, + 287, + 650 + ], + "type": "inline_equation", + "content": "\\Delta \\pmb{x}_{ij} = \\pmb{x}_{ij} - \\bar{\\pmb{x}}_{ij}" + }, + { + "bbox": [ + 46, + 590, + 287, + 650 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 590, + 287, + 650 + ], + "type": "inline_equation", + "content": "\\Delta z_{ij} = z_{ij} - \\bar{z}_{ij}" + }, + { + "bbox": [ + 46, + 590, + 287, + 650 + ], + "type": "text", + "content": ". To the left in Figure 3 we show " + }, + { + "bbox": [ + 46, + 590, + 287, + 650 + ], + "type": "inline_equation", + "content": "e^{-ay}" + }, + { + "bbox": [ + 46, + 590, + 287, + 650 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 46, + 590, + 287, + 650 + ], + "type": "inline_equation", + "content": "a = 1" + }, + { + "bbox": [ + 46, + 590, + 287, + 650 + ], + "type": "text", + "content": " (blue curve), and the Taylor approximation at " + }, + { + "bbox": [ + 46, + 590, + 287, + 650 + ], + "type": "inline_equation", + "content": "\\bar{y} = 0" + }, + { + "bbox": [ + 46, + 590, + 287, + 650 + ], + "type": "text", + "content": " (orange dashed curve). In the supplementary material, we compare level sets of the expOSE objective, its approximation, and pOSE." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 657, + 162, + 670 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 657, + 162, + 670 + ], + "spans": [ + { + "bbox": [ + 47, + 657, + 162, + 670 + ], + "type": "text", + "content": "3.1. The EXPose Model" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 676, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 676, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 676, + 287, + 713 + ], + "type": "text", + "content": "Replacing the exponential regularization in (7) with the quadratic approximation (11) at " + }, + { + "bbox": [ + 46, + 676, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\bar{y}_{ij}" + }, + { + "bbox": [ + 46, + 676, + 287, + 713 + ], + "type": "text", + "content": " results in a quadratic loss that can be written as " + }, + { + "bbox": [ + 46, + 676, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\| \\mathcal{A}(PU) - b\\|^2" + }, + { + "bbox": [ + 46, + 676, + 287, + 713 + ], + "type": "text", + "content": ", which can be" + } + ] + } + ], + "index": 11 + }, + { + "type": "code", + "bbox": [ + 310, + 88, + 530, + 437 + ], + "blocks": [ + { + "bbox": [ + 312, + 75, + 496, + 87 + ], + "lines": [ + { + "bbox": [ + 312, + 75, + 496, + 87 + ], + "spans": [ + { + "bbox": [ + 312, + 75, + 496, + 87 + ], + "type": "text", + "content": "Algorithm 1: VarPro for solving expOSE (7)" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "lines": [ + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "spans": [ + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "type": "text", + "content": "Normalize image measurements by removing the mean and dividing by 3 standard deviations; \nSelect the inputs " + }, + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "type": "text", + "content": " , and randomly initialize elements of " + }, + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "type": "text", + "content": " from a normal distribution of unit std ; \nSet " + }, + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "type": "inline_equation", + "content": "\\bar{y}_{ij} = [m_{ij}^T,1]^T" + }, + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "type": "text", + "content": " . \nSet up A and b by approximating the exponential regularization by a quadratic form around each " + }, + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "type": "inline_equation", + "content": "\\bar{y}_{ij}" + }, + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "type": "text", + "content": " . \nCompute U by minimizing (7) with " + }, + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "type": "text", + "content": " fixed; \nSet do update " + }, + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "type": "inline_equation", + "content": "= 0" + }, + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "type": "text", + "content": " if scheduling update of regularization is considered, otherwise do update " + }, + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "type": "inline_equation", + "content": "= 1" + }, + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "type": "text", + "content": " .. \nwhile true do \nCompute the Jacobians " + }, + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "type": "inline_equation", + "content": "J_{P} = A(U^{T}\\otimes \\mathcal{I})\\colon J_{U} = A(\\mathcal{I}\\otimes P)" + }, + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "type": "text", + "content": " and the residuals " + }, + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "type": "inline_equation", + "content": "r = \\operatorname {Avec}(PU) - b" + }, + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "type": "text", + "content": " . Compute " + }, + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "type": "inline_equation", + "content": "P_{\\mathrm{new}}" + }, + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "type": "inline_equation", + "content": "U_{\\mathrm{new}}" + }, + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "type": "inline_equation", + "content": "J_P,J_U," + }, + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "type": "text", + "content": " and r as " + }, + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "type": "inline_equation", + "content": "P_{\\mathrm{new}} = P + \\Delta P" + }, + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "type": "inline_equation", + "content": "U_{\\mathrm{new}} = U + \\Delta U" + }, + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "type": "text", + "content": " ,with " + }, + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "type": "inline_equation", + "content": "\\Delta P = (J_P^T (\\mathcal{I} - J_UJ_U^\\dagger)J_P + \\lambda \\mathcal{I})^{-1}J_P^T r," + }, + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "type": "inline_equation", + "content": "\\Delta U = -J_U^\\dagger (r + J_P\\Delta P)" + }, + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "type": "text", + "content": " . Evaluate the loss " + }, + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "type": "inline_equation", + "content": "\\ell_{\\mathrm{new}}" + }, + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "type": "text", + "content": " . \nif " + }, + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "type": "inline_equation", + "content": "\\ell_{\\mathrm{new}} < \\ell_{\\mathrm{best}}" + }, + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "type": "text", + "content": " then " + }, + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "type": "inline_equation", + "content": "\\ell_{\\mathrm{best}} = \\ell_{\\mathrm{new}}" + }, + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "type": "text", + "content": " . " + }, + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "type": "inline_equation", + "content": "P\\gets P_{\\mathrm{new}}" + }, + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "type": "text", + "content": " ; and " + }, + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "type": "inline_equation", + "content": "U\\gets U_{\\mathrm{new}}" + }, + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "type": "text", + "content": " . if do update then Set " + }, + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "type": "inline_equation", + "content": "\\bar{y}_{ij} = P_iU_j" + }, + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "type": "text", + "content": " . Set up A and b by approximating the regularization by a quadratic form around each " + }, + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "type": "inline_equation", + "content": "\\bar{y}_{ij}" + }, + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "type": "text", + "content": " end \nend \nif stopping criterion then if do update then break; else do update " + }, + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "type": "inline_equation", + "content": "= 1" + }, + { + "bbox": [ + 310, + 88, + 530, + 437 + ], + "type": "text", + "content": " end \nend" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "code_body" + } + ], + "index": 13, + "sub_type": "algorithm" + }, + { + "bbox": [ + 304, + 468, + 545, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 468, + 545, + 529 + ], + "spans": [ + { + "bbox": [ + 304, + 468, + 545, + 529 + ], + "type": "text", + "content": "optimized using VarPro as described in Algorithm 1. The linear operator " + }, + { + "bbox": [ + 304, + 468, + 545, + 529 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 304, + 468, + 545, + 529 + ], + "type": "text", + "content": " and the vector " + }, + { + "bbox": [ + 304, + 468, + 545, + 529 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 304, + 468, + 545, + 529 + ], + "type": "text", + "content": " can be computed in each iteration based on the image measurements " + }, + { + "bbox": [ + 304, + 468, + 545, + 529 + ], + "type": "inline_equation", + "content": "\\pmb{m}_{ij}" + }, + { + "bbox": [ + 304, + 468, + 545, + 529 + ], + "type": "text", + "content": ", the current estimations " + }, + { + "bbox": [ + 304, + 468, + 545, + 529 + ], + "type": "inline_equation", + "content": "\\bar{\\pmb{y}}_{ij}" + }, + { + "bbox": [ + 304, + 468, + 545, + 529 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 468, + 545, + 529 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 304, + 468, + 545, + 529 + ], + "type": "text", + "content": ". For the initial approximation of the regularization, we use " + }, + { + "bbox": [ + 304, + 468, + 545, + 529 + ], + "type": "inline_equation", + "content": "\\bar{\\pmb{y}}_{ij} = (\\pmb{m}_{ij},1)" + }, + { + "bbox": [ + 304, + 468, + 545, + 529 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 531, + 545, + 627 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 531, + 545, + 627 + ], + "spans": [ + { + "bbox": [ + 304, + 531, + 545, + 627 + ], + "type": "text", + "content": "Regularization update scheduling: In order to improve the convergence of the algorithm, we propose to keep the initial quadratic approximation of the regularization (11) either for a fixed number of iterations or until convergence of the initial approximation. This delays the approximation of the exponential regularization in each iteration until a stable initial solution with positive depths is found. In Section 3.2 we show empirically the advantage of doing so." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 630, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 630, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 630, + 545, + 713 + ], + "type": "text", + "content": "Data normalization: Since our regularization term is geometrically motivated and our approach replaces reprojection error with OSE it is important to use normalization of the image data to achieve a well-conditioned formulation [13]. Here we follow standard approaches: We first subtract the image center from all image points, then divide them with the resulting standard deviation over the image." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "8962" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 55, + 68, + 285, + 204 + ], + "blocks": [ + { + "bbox": [ + 55, + 68, + 285, + 204 + ], + "lines": [ + { + "bbox": [ + 55, + 68, + 285, + 204 + ], + "spans": [ + { + "bbox": [ + 55, + 68, + 285, + 204 + ], + "type": "image", + "image_path": "c4d0daee857254db3e212c78e3ce7dbf0f46922611429274057e5d383e53dae4.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 209, + 547, + 232 + ], + "lines": [ + { + "bbox": [ + 46, + 209, + 547, + 232 + ], + "spans": [ + { + "bbox": [ + 46, + 209, + 547, + 232 + ], + "type": "text", + "content": "Figure 4. Comparison of convergence rate and normalized 3D error of different methods on the Dino (a) and Fountain (b) datasets. The metrics are obtained by running 100 instances starting from random initializations. In dashed we should the metrics for the pOSE baseline." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 306, + 68, + 539, + 203 + ], + "blocks": [ + { + "bbox": [ + 306, + 68, + 539, + 203 + ], + "lines": [ + { + "bbox": [ + 306, + 68, + 539, + 203 + ], + "spans": [ + { + "bbox": [ + 306, + 68, + 539, + 203 + ], + "type": "image", + "image_path": "f904330e5b3b84f6e45f0caa92a4efcd88b3602d0aa26df42fbed5ede8d32013.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 251, + 236, + 265 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 251, + 236, + 265 + ], + "spans": [ + { + "bbox": [ + 47, + 251, + 236, + 265 + ], + "type": "text", + "content": "3.2. Performance evaluation of expOSE" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 272, + 287, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 272, + 287, + 380 + ], + "spans": [ + { + "bbox": [ + 46, + 272, + 287, + 380 + ], + "type": "text", + "content": "Before presenting our model for radial distortion we evaluate the effects of using exponential regularization with the standard OSE. We use the Dino (Small) [3] (36 cameras, 319 points, " + }, + { + "bbox": [ + 46, + 272, + 287, + 380 + ], + "type": "inline_equation", + "content": "77\\%" + }, + { + "bbox": [ + 46, + 272, + 287, + 380 + ], + "type": "text", + "content": " missing data) and Fountain [34] (11 cameras, 1167 points, " + }, + { + "bbox": [ + 46, + 272, + 287, + 380 + ], + "type": "inline_equation", + "content": "23\\%" + }, + { + "bbox": [ + 46, + 272, + 287, + 380 + ], + "type": "text", + "content": " missing data) datasets to evaluate the performance of expOSE with varying parameters - the weight " + }, + { + "bbox": [ + 46, + 272, + 287, + 380 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 46, + 272, + 287, + 380 + ], + "type": "text", + "content": " and scheduling of regularization update-, and optimization strategies - VarPro, Levenberg-Marquardt (LM), and Alternating Minimization (AltMin) [7]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 382, + 287, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 382, + 287, + 529 + ], + "spans": [ + { + "bbox": [ + 46, + 382, + 287, + 529 + ], + "type": "text", + "content": "The metrics used for the comparisons are convergence rate of the algorithm and relative 3D error to GT. The convergence rate is calculated by counting the number of times the algorithms converged to the lowest loss over 100 problem instances starting from random initializations (a threshold of " + }, + { + "bbox": [ + 46, + 382, + 287, + 529 + ], + "type": "inline_equation", + "content": "2\\%" + }, + { + "bbox": [ + 46, + 382, + 287, + 529 + ], + "type": "text", + "content": " above the smallest loss value is used). The 3D error is computed as " + }, + { + "bbox": [ + 46, + 382, + 287, + 529 + ], + "type": "inline_equation", + "content": "e_{3D} = \\frac{\\|U' - U_{\\mathrm{GT}}\\|}{\\|U_{\\mathrm{GT}}\\|_F}" + }, + { + "bbox": [ + 46, + 382, + 287, + 529 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 46, + 382, + 287, + 529 + ], + "type": "inline_equation", + "content": "U'" + }, + { + "bbox": [ + 46, + 382, + 287, + 529 + ], + "type": "text", + "content": " is the result of performing projective registration of the factor " + }, + { + "bbox": [ + 46, + 382, + 287, + 529 + ], + "type": "inline_equation", + "content": "U" + }, + { + "bbox": [ + 46, + 382, + 287, + 529 + ], + "type": "text", + "content": " to the ground-truth point cloud " + }, + { + "bbox": [ + 46, + 382, + 287, + 529 + ], + "type": "inline_equation", + "content": "U_{\\mathrm{GT}}" + }, + { + "bbox": [ + 46, + 382, + 287, + 529 + ], + "type": "text", + "content": ". In this way, we are able to measure the quality of the factors " + }, + { + "bbox": [ + 46, + 382, + 287, + 529 + ], + "type": "inline_equation", + "content": "U" + }, + { + "bbox": [ + 46, + 382, + 287, + 529 + ], + "type": "text", + "content": " that are outputted by each method. For a fair comparison, we compute the 3D errors for solutions that converged to the desired optimum." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 529, + 287, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 529, + 287, + 602 + ], + "spans": [ + { + "bbox": [ + 46, + 529, + 287, + 602 + ], + "type": "text", + "content": "The methods are implemented in MATLAB, and we let each method perform a maximum of 500 iterations. For the case of regularization update scheduling, which we call " + }, + { + "bbox": [ + 46, + 529, + 287, + 602 + ], + "type": "inline_equation", + "content": "\\exp\\mathrm{OSE}(\\mathrm{S})" + }, + { + "bbox": [ + 46, + 529, + 287, + 602 + ], + "type": "text", + "content": ", we delay the update of the regularization quadratic approximation by 250 iterations or until the initial optimization converges - whichever occurs first." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 604, + 287, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 604, + 287, + 700 + ], + "spans": [ + { + "bbox": [ + 46, + 604, + 287, + 700 + ], + "type": "text", + "content": "Effect of " + }, + { + "bbox": [ + 46, + 604, + 287, + 700 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 46, + 604, + 287, + 700 + ], + "type": "text", + "content": " and scheduling: The performance of expOSE is evaluated for multiple values of " + }, + { + "bbox": [ + 46, + 604, + 287, + 700 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 46, + 604, + 287, + 700 + ], + "type": "text", + "content": " ranging from " + }, + { + "bbox": [ + 46, + 604, + 287, + 700 + ], + "type": "inline_equation", + "content": "10^{-4}" + }, + { + "bbox": [ + 46, + 604, + 287, + 700 + ], + "type": "text", + "content": " to 0.5. The results are plotted in Figure 4. We show that expOSE is significantly more robust to " + }, + { + "bbox": [ + 46, + 604, + 287, + 700 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 46, + 604, + 287, + 700 + ], + "type": "text", + "content": " than pOSE in terms of 3D errors (see also Figure 1). We also show that delaying the update of the quadratic approximation of the regularization results in a significant boost in convergence rate, allowing us to achieve rates similar to pOSE." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 59, + 701, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 701, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 59, + 701, + 287, + 714 + ], + "type": "text", + "content": "Comparison with other optimization strategies: We" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 251, + 547, + 313 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 251, + 547, + 313 + ], + "spans": [ + { + "bbox": [ + 304, + 251, + 547, + 313 + ], + "type": "text", + "content": "compare the performance of expOSE (with and without scheduling) when using VarPro, LM and AltMin. The results confirm that, just like with pOSE, VarPro is the most reliable method for expOSE, while LM and AltMin achieve poor convergence rates." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 325, + 484, + 338 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 325, + 484, + 338 + ], + "spans": [ + { + "bbox": [ + 305, + 325, + 484, + 338 + ], + "type": "text", + "content": "4. Robustness to Radial Distortion" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 346, + 547, + 526 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 346, + 547, + 526 + ], + "spans": [ + { + "bbox": [ + 304, + 346, + 547, + 526 + ], + "type": "text", + "content": "In the previous sections, we considered modifications to the original pOSE model which assumes a regular pinhole camera. In [18] the RpOSE model which instead uses a radial camera [20, 21, 23, 24, 29, 37, 40] is presented. This model is invariant to radial distortion which the standard pOSE model does not handle. We note however that the radial model requires more data for parameter estimation since it essentially only measures errors in one direction of the image. To address this issue we introduce an intermediate model by decomposing the reprojection error into a tangential and a radial component. By down-weighting the tangential error we obtain a model that is more robust to radial distortion than the pinhole camera but less sensitive to missing data than the radial camera. We then introduce an exponential regularization term for this model." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 535, + 523, + 548 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 535, + 523, + 548 + ], + "spans": [ + { + "bbox": [ + 305, + 535, + 523, + 548 + ], + "type": "text", + "content": "4.1. Decoupling Tangential and Radial Errors" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 555, + 545, + 603 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 555, + 545, + 603 + ], + "spans": [ + { + "bbox": [ + 304, + 555, + 545, + 603 + ], + "type": "text", + "content": "When working with the radial camera model it is typically assumed that the principal point and the distortion center are the center of the image and have coordinates " + }, + { + "bbox": [ + 304, + 555, + 545, + 603 + ], + "type": "inline_equation", + "content": "(0,0)" + }, + { + "bbox": [ + 304, + 555, + 545, + 603 + ], + "type": "text", + "content": ". We make the same assumption here." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 603, + 546, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 603, + 546, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 603, + 546, + 713 + ], + "type": "text", + "content": "The reprojection error is obtained by taking the length of the error vector " + }, + { + "bbox": [ + 304, + 603, + 546, + 713 + ], + "type": "inline_equation", + "content": "e(x, z) = \\frac{x}{z} - m" + }, + { + "bbox": [ + 304, + 603, + 546, + 713 + ], + "type": "text", + "content": ". The coordinates of this vector are given in w.r.t. the canonical image basis (1,0) and (0,1) of the image and can be interpreted as errors in the " + }, + { + "bbox": [ + 304, + 603, + 546, + 713 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 304, + 603, + 546, + 713 + ], + "type": "text", + "content": "- and " + }, + { + "bbox": [ + 304, + 603, + 546, + 713 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 304, + 603, + 546, + 713 + ], + "type": "text", + "content": "-directions respectively. For a point " + }, + { + "bbox": [ + 304, + 603, + 546, + 713 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 304, + 603, + 546, + 713 + ], + "type": "text", + "content": " we are interested in measuring the error in the radial direction " + }, + { + "bbox": [ + 304, + 603, + 546, + 713 + ], + "type": "inline_equation", + "content": "\\frac{m}{\\|m\\|}" + }, + { + "bbox": [ + 304, + 603, + 546, + 713 + ], + "type": "text", + "content": " and the tangential direction " + }, + { + "bbox": [ + 304, + 603, + 546, + 713 + ], + "type": "inline_equation", + "content": "\\frac{m_{\\perp}}{\\|m\\|}" + }, + { + "bbox": [ + 304, + 603, + 546, + 713 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 603, + 546, + 713 + ], + "type": "inline_equation", + "content": "m_{\\perp}" + }, + { + "bbox": [ + 304, + 603, + 546, + 713 + ], + "type": "text", + "content": " is the orthogonal vector to " + }, + { + "bbox": [ + 304, + 603, + 546, + 713 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 304, + 603, + 546, + 713 + ], + "type": "text", + "content": " (see Figure 5). We, therefore, write the error vector as a linear combination of these. It is not diffi" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "8963" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 110, + 75, + 227, + 186 + ], + "blocks": [ + { + "bbox": [ + 110, + 75, + 227, + 186 + ], + "lines": [ + { + "bbox": [ + 110, + 75, + 227, + 186 + ], + "spans": [ + { + "bbox": [ + 110, + 75, + 227, + 186 + ], + "type": "image", + "image_path": "4cba7ccf2d3aa7df11ad7fd74ec869fb56d3fa7207ea8f8c32d64c2d06804bd7.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 47, + 194, + 287, + 217 + ], + "lines": [ + { + "bbox": [ + 47, + 194, + 287, + 217 + ], + "spans": [ + { + "bbox": [ + 47, + 194, + 287, + 217 + ], + "type": "text", + "content": "Figure 5. Levelsets (red ellipses) of " + }, + { + "bbox": [ + 47, + 194, + 287, + 217 + ], + "type": "inline_equation", + "content": "\\ell_{\\mathrm{wose}}" + }, + { + "bbox": [ + 47, + 194, + 287, + 217 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 47, + 194, + 287, + 217 + ], + "type": "inline_equation", + "content": "\\alpha = 0.1" + }, + { + "bbox": [ + 47, + 194, + 287, + 217 + ], + "type": "text", + "content": " and 0.9. Here " + }, + { + "bbox": [ + 47, + 194, + 287, + 217 + ], + "type": "inline_equation", + "content": "m = (0.6, 0.9)" + }, + { + "bbox": [ + 47, + 194, + 287, + 217 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 194, + 287, + 217 + ], + "type": "inline_equation", + "content": "z = 1" + }, + { + "bbox": [ + 47, + 194, + 287, + 217 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 237, + 120, + 248 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 237, + 120, + 248 + ], + "spans": [ + { + "bbox": [ + 47, + 237, + 120, + 248 + ], + "type": "text", + "content": "cult to verify that" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 255, + 287, + 285 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 255, + 287, + 285 + ], + "spans": [ + { + "bbox": [ + 52, + 255, + 287, + 285 + ], + "type": "interline_equation", + "content": "\\frac {\\boldsymbol {x}}{z} - \\boldsymbol {m} = \\left(\\frac {\\boldsymbol {m} ^ {T} \\boldsymbol {x}}{z \\| \\boldsymbol {m} \\|} - \\| \\boldsymbol {m} \\|\\right) \\frac {\\boldsymbol {m}}{\\| \\boldsymbol {m} \\|} + \\frac {\\boldsymbol {m} _ {\\perp} ^ {T} \\boldsymbol {x}}{\\| \\boldsymbol {m} \\| z} \\frac {\\boldsymbol {m} _ {\\perp}}{\\| \\boldsymbol {m} \\|}. \\tag {12}", + "image_path": "ede63255e159181b8c65344d4d7c3c24425e8640d161a3931b65d24ce970bc6b.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 290, + 275, + 306 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 290, + 275, + 306 + ], + "spans": [ + { + "bbox": [ + 47, + 290, + 275, + 306 + ], + "type": "text", + "content": "In the basis " + }, + { + "bbox": [ + 47, + 290, + 275, + 306 + ], + "type": "inline_equation", + "content": "\\frac{m}{\\|m\\|}" + }, + { + "bbox": [ + 47, + 290, + 275, + 306 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 47, + 290, + 275, + 306 + ], + "type": "inline_equation", + "content": "\\frac{m_{\\perp}}{\\|m\\|}" + }, + { + "bbox": [ + 47, + 290, + 275, + 306 + ], + "type": "text", + "content": " the error vector can be written as" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 86, + 312, + 287, + 340 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 312, + 287, + 340 + ], + "spans": [ + { + "bbox": [ + 86, + 312, + 287, + 340 + ], + "type": "interline_equation", + "content": "\\boldsymbol {e} (\\boldsymbol {x}, z) = \\frac {1}{\\| \\boldsymbol {m} \\|} \\left[ \\begin{array}{l} \\boldsymbol {m} ^ {T} \\\\ \\boldsymbol {m} _ {\\perp} ^ {T} \\end{array} \\right] \\frac {\\boldsymbol {x}}{z} - \\binom {\\| \\boldsymbol {m} \\|} {0}. \\tag {13}", + "image_path": "5e3a149aedfc7b118c0fda2b8b190eea29950e7344b5bb678f80bdb81f479bab.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 346, + 287, + 406 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 346, + 287, + 406 + ], + "spans": [ + { + "bbox": [ + 47, + 346, + 287, + 406 + ], + "type": "text", + "content": "Independently of the basis chosen, the reprojection error is nonlinear due to the division by " + }, + { + "bbox": [ + 47, + 346, + 287, + 406 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 47, + 346, + 287, + 406 + ], + "type": "text", + "content": ", making it unsuitable for optimization. The OSE in the new basis is obtained by rescaling the reprojection error " + }, + { + "bbox": [ + 47, + 346, + 287, + 406 + ], + "type": "inline_equation", + "content": "e(\\pmb{x},z)" + }, + { + "bbox": [ + 47, + 346, + 287, + 406 + ], + "type": "text", + "content": " by the depth " + }, + { + "bbox": [ + 47, + 346, + 287, + 406 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 47, + 346, + 287, + 406 + ], + "type": "text", + "content": ". The expression for OSE error in the new basis is therefore" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 413, + 287, + 443 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 413, + 287, + 443 + ], + "spans": [ + { + "bbox": [ + 52, + 413, + 287, + 443 + ], + "type": "interline_equation", + "content": "\\left\\| z e (\\boldsymbol {x}, z) \\right\\| ^ {2} = \\left(\\frac {\\boldsymbol {m} ^ {T}}{\\| \\boldsymbol {m} \\|} \\boldsymbol {x} - \\| \\boldsymbol {m} \\| z\\right) ^ {2} + \\left(\\frac {\\boldsymbol {m} _ {\\perp} ^ {T}}{\\| \\boldsymbol {m} \\|} \\boldsymbol {x}\\right) ^ {2}. \\tag {14}", + "image_path": "2cda43e8fd74c03a8253102766386fb6ebbc7f2688320172e16fe383773fbdce.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 456, + 237, + 470 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 456, + 237, + 470 + ], + "spans": [ + { + "bbox": [ + 47, + 456, + 237, + 470 + ], + "type": "text", + "content": "4.2. Reweighting the Error Components" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 475, + 287, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 475, + 287, + 499 + ], + "spans": [ + { + "bbox": [ + 47, + 475, + 287, + 499 + ], + "type": "text", + "content": "Radial distortion is usually modeled by modifying the projection according to" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 134, + 506, + 287, + 528 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 506, + 287, + 528 + ], + "spans": [ + { + "bbox": [ + 134, + 506, + 287, + 528 + ], + "type": "interline_equation", + "content": "\\kappa_ {r} (\\boldsymbol {m}) \\boldsymbol {m} = \\frac {\\boldsymbol {x}}{z} \\tag {15}", + "image_path": "a3d3a372196c92b88baf7a24b2a93a9d7a047f4396b5d280fe768570ccc19c87.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 534, + 287, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 534, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 47, + 534, + 287, + 712 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 534, + 287, + 712 + ], + "type": "inline_equation", + "content": "\\kappa_{r}" + }, + { + "bbox": [ + 47, + 534, + 287, + 712 + ], + "type": "text", + "content": " is a scalar that depends on the distance to the distortion center. It is clear that the second term of (14) vanishes when inserting " + }, + { + "bbox": [ + 47, + 534, + 287, + 712 + ], + "type": "inline_equation", + "content": "(\\pmb{x},z)" + }, + { + "bbox": [ + 47, + 534, + 287, + 712 + ], + "type": "text", + "content": " fulfilling (15) for any " + }, + { + "bbox": [ + 47, + 534, + 287, + 712 + ], + "type": "inline_equation", + "content": "\\kappa_{r}" + }, + { + "bbox": [ + 47, + 534, + 287, + 712 + ], + "type": "text", + "content": ", but not the first term. To handle radial distortion we could incorporate the additional parameter " + }, + { + "bbox": [ + 47, + 534, + 287, + 712 + ], + "type": "inline_equation", + "content": "\\kappa_{r}" + }, + { + "bbox": [ + 47, + 534, + 287, + 712 + ], + "type": "text", + "content": " in (14) and explicitly estimate it. Unfortunately, this results in a more complex model (with trilinear interactions) making optimization difficult. Alternatively, to achieve robustness to radial distortion we can remove the first term, as in [18]. The downside of doing this is that it removes roughly half of the data (one out of two coordinates for each projection) available for use in inference. Therefore we here propose to compensate for the unknown radial distortion by down-weighting the first term or equivalently allowing a larger standard deviation in the radial direction." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 72, + 545, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 72, + 545, + 156 + ], + "spans": [ + { + "bbox": [ + 305, + 72, + 545, + 156 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 305, + 72, + 545, + 156 + ], + "type": "inline_equation", + "content": "\\sigma_r^2" + }, + { + "bbox": [ + 305, + 72, + 545, + 156 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 72, + 545, + 156 + ], + "type": "inline_equation", + "content": "\\sigma_t^2" + }, + { + "bbox": [ + 305, + 72, + 545, + 156 + ], + "type": "text", + "content": " denote the uncertainties of the reprojection error " + }, + { + "bbox": [ + 305, + 72, + 545, + 156 + ], + "type": "inline_equation", + "content": "\\epsilon = s\\pmb{x} / z - \\pmb{m}" + }, + { + "bbox": [ + 305, + 72, + 545, + 156 + ], + "type": "text", + "content": " along the radial and tangential direction, respectively, and where " + }, + { + "bbox": [ + 305, + 72, + 545, + 156 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 305, + 72, + 545, + 156 + ], + "type": "text", + "content": " is an unknown positive scalar that models radial distortion effects and focal length scaling. Assuming the reprojection error " + }, + { + "bbox": [ + 305, + 72, + 545, + 156 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 305, + 72, + 545, + 156 + ], + "type": "text", + "content": " is sampled from a 2D normal distribution " + }, + { + "bbox": [ + 305, + 72, + 545, + 156 + ], + "type": "inline_equation", + "content": "\\mathcal{N}(0,\\Sigma)" + }, + { + "bbox": [ + 305, + 72, + 545, + 156 + ], + "type": "text", + "content": ", the probability of the model " + }, + { + "bbox": [ + 305, + 72, + 545, + 156 + ], + "type": "inline_equation", + "content": "\\{\\pmb{x},z\\}" + }, + { + "bbox": [ + 305, + 72, + 545, + 156 + ], + "type": "text", + "content": " given " + }, + { + "bbox": [ + 305, + 72, + 545, + 156 + ], + "type": "inline_equation", + "content": "\\pmb{m}" + }, + { + "bbox": [ + 305, + 72, + 545, + 156 + ], + "type": "text", + "content": " is" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 307, + 163, + 545, + 196 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 163, + 545, + 196 + ], + "spans": [ + { + "bbox": [ + 307, + 163, + 545, + 196 + ], + "type": "interline_equation", + "content": "P (\\boldsymbol {x}, z \\mid \\boldsymbol {m}) = \\frac {1}{2 \\pi \\det (\\Sigma) ^ {1 / 2}} e ^ {- s ^ {2} \\left(\\frac {1}{s} \\boldsymbol {m} - \\boldsymbol {x} / z\\right) ^ {T} \\Sigma^ {- 1} \\left(\\frac {1}{s} \\boldsymbol {m} - \\boldsymbol {x} / z\\right)}. \\tag {16}", + "image_path": "5c2b997c51bd27d441e3172bfafc1a9523e701a870e7764139279f127c332e4c.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 198, + 545, + 221 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 198, + 545, + 221 + ], + "spans": [ + { + "bbox": [ + 305, + 198, + 545, + 221 + ], + "type": "text", + "content": "Maximizing the likelihood (16) w.r.t. " + }, + { + "bbox": [ + 305, + 198, + 545, + 221 + ], + "type": "inline_equation", + "content": "\\{x,z\\}" + }, + { + "bbox": [ + 305, + 198, + 545, + 221 + ], + "type": "text", + "content": " is equivalent to minimizing" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 322, + 227, + 545, + 258 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 322, + 227, + 545, + 258 + ], + "spans": [ + { + "bbox": [ + 322, + 227, + 545, + 258 + ], + "type": "interline_equation", + "content": "\\frac {s ^ {2}}{\\sigma_ {r} ^ {2}} \\left(\\frac {\\boldsymbol {m} ^ {T}}{\\| \\boldsymbol {m} \\|} \\frac {\\boldsymbol {x}}{z} - \\frac {1}{s} \\| \\boldsymbol {m} \\|\\right) ^ {2} + \\frac {s ^ {2}}{\\sigma_ {t} ^ {2}} \\left(\\frac {\\boldsymbol {m} _ {\\perp} ^ {T}}{\\| \\boldsymbol {m} \\|} \\frac {\\boldsymbol {x}}{z}\\right) ^ {2}, \\tag {17}", + "image_path": "bae68acca186989dda12bc170161c71a6dd33793541a65d7a89ed95eb1cef895.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 305, + 263, + 545, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 263, + 545, + 335 + ], + "spans": [ + { + "bbox": [ + 305, + 263, + 545, + 335 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 263, + 545, + 335 + ], + "type": "inline_equation", + "content": "\\Sigma = R^T\\mathrm{diag}(\\sigma_r^2,\\sigma_t^2)R" + }, + { + "bbox": [ + 305, + 263, + 545, + 335 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 263, + 545, + 335 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 305, + 263, + 545, + 335 + ], + "type": "text", + "content": " is a rotation matrix that aligns the coordinate axis with " + }, + { + "bbox": [ + 305, + 263, + 545, + 335 + ], + "type": "inline_equation", + "content": "\\boldsymbol {m} / \\| \\boldsymbol {m}\\|" + }, + { + "bbox": [ + 305, + 263, + 545, + 335 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 263, + 545, + 335 + ], + "type": "inline_equation", + "content": "\\boldsymbol {m}_{\\perp} / \\| \\boldsymbol {m}\\|" + }, + { + "bbox": [ + 305, + 263, + 545, + 335 + ], + "type": "text", + "content": ". While the second term quadratic term of (17) is not affected by " + }, + { + "bbox": [ + 305, + 263, + 545, + 335 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 305, + 263, + 545, + 335 + ], + "type": "text", + "content": ", in the first term " + }, + { + "bbox": [ + 305, + 263, + 545, + 335 + ], + "type": "inline_equation", + "content": "\\| \\boldsymbol {m}\\|" + }, + { + "bbox": [ + 305, + 263, + 545, + 335 + ], + "type": "text", + "content": " is weighted by " + }, + { + "bbox": [ + 305, + 263, + 545, + 335 + ], + "type": "inline_equation", + "content": "1 / s" + }, + { + "bbox": [ + 305, + 263, + 545, + 335 + ], + "type": "text", + "content": ", which is undesirable as previously motivated. We propose to approximate (17) by" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 319, + 341, + 545, + 387 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 341, + 545, + 387 + ], + "spans": [ + { + "bbox": [ + 319, + 341, + 545, + 387 + ], + "type": "interline_equation", + "content": "\\underbrace {\\frac {1}{\\sigma_ {r} ^ {2}}} _ {(1 - \\alpha)} \\left(\\frac {\\boldsymbol {m} ^ {T}}{\\| \\boldsymbol {m} \\|} \\frac {\\boldsymbol {x}}{z} - \\| \\boldsymbol {m} \\|\\right) ^ {2} + \\underbrace {\\frac {1}{\\sigma_ {t} ^ {2}}} _ {\\alpha} \\left(\\frac {\\boldsymbol {m} _ {\\perp} ^ {T}}{\\| \\boldsymbol {m} \\|} \\frac {\\boldsymbol {x}}{z}\\right) ^ {2}. \\tag {18}", + "image_path": "73078e904302305ffe39fb8107888b5c7229c779bd57a1041f50e24a3b7540e0.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 305, + 393, + 545, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 393, + 545, + 514 + ], + "spans": [ + { + "bbox": [ + 305, + 393, + 545, + 514 + ], + "type": "text", + "content": "This approximation of the first term adds a bias to the obtained solution based on the unknown shift " + }, + { + "bbox": [ + 305, + 393, + 545, + 514 + ], + "type": "inline_equation", + "content": "\\left(\\frac{1}{s} - 1\\right)\\| \\boldsymbol{m}\\|" + }, + { + "bbox": [ + 305, + 393, + 545, + 514 + ], + "type": "text", + "content": ". We regulate the effect of this bias - and thus the robustness to radial distortion - by controlling the relative weight of the first quadratic term (biased) versus the second quadratic term (unbiased) through the value of " + }, + { + "bbox": [ + 305, + 393, + 545, + 514 + ], + "type": "inline_equation", + "content": "\\alpha \\in [0,1]" + }, + { + "bbox": [ + 305, + 393, + 545, + 514 + ], + "type": "text", + "content": ". For the extreme case of " + }, + { + "bbox": [ + 305, + 393, + 545, + 514 + ], + "type": "inline_equation", + "content": "\\alpha = 1" + }, + { + "bbox": [ + 305, + 393, + 545, + 514 + ], + "type": "text", + "content": " the radial component of the error is completely dropped resulting in the loss presented in [18]. Linear residuals can be obtained by replacing (18) with its component-weighted OSE counterpart" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 309, + 518, + 545, + 557 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 518, + 545, + 557 + ], + "spans": [ + { + "bbox": [ + 309, + 518, + 545, + 557 + ], + "type": "interline_equation", + "content": "\\ell_ {\\mathrm {w O S E}} = (1 - \\alpha) \\left(\\frac {\\boldsymbol {m} ^ {T}}{\\| \\boldsymbol {m} \\|} \\boldsymbol {x} - \\| \\boldsymbol {m} \\| z\\right) ^ {2} + \\alpha \\left(\\frac {\\boldsymbol {m} _ {\\perp} ^ {T}}{\\| \\boldsymbol {m} \\|} \\boldsymbol {x}\\right) ^ {2}. \\tag {19}", + "image_path": "04c301afaaa5666a6de30b057035bd44defb904e8b6bbee09d9131ce001f6bce.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 305, + 558, + 545, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 558, + 545, + 581 + ], + "spans": [ + { + "bbox": [ + 305, + 558, + 545, + 581 + ], + "type": "text", + "content": "Figure 5 shows an example of level sets (in the image plane " + }, + { + "bbox": [ + 305, + 558, + 545, + 581 + ], + "type": "inline_equation", + "content": "z = 1" + }, + { + "bbox": [ + 305, + 558, + 545, + 581 + ], + "type": "text", + "content": ") for " + }, + { + "bbox": [ + 305, + 558, + 545, + 581 + ], + "type": "inline_equation", + "content": "\\alpha = 0.1" + }, + { + "bbox": [ + 305, + 558, + 545, + 581 + ], + "type": "text", + "content": " and 0.9." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 582, + 545, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 582, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 304, + 582, + 545, + 712 + ], + "type": "text", + "content": "Note that the same approach can be used to handle unknown focal lengths. If we assume that the intrinsic calibration matrix of the camera is " + }, + { + "bbox": [ + 304, + 582, + 545, + 712 + ], + "type": "inline_equation", + "content": "\\mathbf{K} = \\mathrm{diag}(f, f, 1)" + }, + { + "bbox": [ + 304, + 582, + 545, + 712 + ], + "type": "text", + "content": ", the relation between the reprojected point and the image measurement is " + }, + { + "bbox": [ + 304, + 582, + 545, + 712 + ], + "type": "inline_equation", + "content": "\\frac{\\kappa_r}{f} \\mathbf{m} = \\frac{\\mathbf{x}}{z}" + }, + { + "bbox": [ + 304, + 582, + 545, + 712 + ], + "type": "text", + "content": " and therefore the re-weighted formulation can be applied to this setting as well. An unknown/varying focal length " + }, + { + "bbox": [ + 304, + 582, + 545, + 712 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 304, + 582, + 545, + 712 + ], + "type": "text", + "content": " is however modeled by the standard pOSE model in contrast to " + }, + { + "bbox": [ + 304, + 582, + 545, + 712 + ], + "type": "inline_equation", + "content": "\\kappa_r" + }, + { + "bbox": [ + 304, + 582, + 545, + 712 + ], + "type": "text", + "content": " which depends on the distance between the projection and the principal point and thus cannot be included in a factorization algorithm without adding extra variables." + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "8964" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 286, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 286, + 85 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 286, + 85 + ], + "type": "text", + "content": "4.3. Regularization for radial distortion invariance" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 91, + 287, + 226 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 91, + 287, + 226 + ], + "spans": [ + { + "bbox": [ + 46, + 91, + 287, + 226 + ], + "type": "text", + "content": "Weighting differently the radial and tangential of the OSE does not change, in general, the exponential regularization described in Section 2. However, one must note that for the extreme case " + }, + { + "bbox": [ + 46, + 91, + 287, + 226 + ], + "type": "inline_equation", + "content": "\\alpha = 1" + }, + { + "bbox": [ + 46, + 91, + 287, + 226 + ], + "type": "text", + "content": ", for a given " + }, + { + "bbox": [ + 46, + 91, + 287, + 226 + ], + "type": "inline_equation", + "content": "X = PU" + }, + { + "bbox": [ + 46, + 91, + 287, + 226 + ], + "type": "text", + "content": " the variables in every third row of " + }, + { + "bbox": [ + 46, + 91, + 287, + 226 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 46, + 91, + 287, + 226 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 91, + 287, + 226 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 46, + 91, + 287, + 226 + ], + "type": "text", + "content": " vanish from the OSE. In other words, decreasing the total loss will always be possible by increasing " + }, + { + "bbox": [ + 46, + 91, + 287, + 226 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 46, + 91, + 287, + 226 + ], + "type": "text", + "content": " through the third row of " + }, + { + "bbox": [ + 46, + 91, + 287, + 226 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 46, + 91, + 287, + 226 + ], + "type": "text", + "content": ", and consequently decreasing the " + }, + { + "bbox": [ + 46, + 91, + 287, + 226 + ], + "type": "inline_equation", + "content": "e^{-\\frac{z}{\\sqrt{\\|m\\|^2 + 1}}}" + }, + { + "bbox": [ + 46, + 91, + 287, + 226 + ], + "type": "text", + "content": " part of the exponential regularization. To avoid such undesirable behavior, we proposed an alternative exponential regularization for the particular case " + }, + { + "bbox": [ + 46, + 91, + 287, + 226 + ], + "type": "inline_equation", + "content": "\\alpha = 1" + }, + { + "bbox": [ + 46, + 91, + 287, + 226 + ], + "type": "text", + "content": " acting only on " + }, + { + "bbox": [ + 46, + 91, + 287, + 226 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 46, + 91, + 287, + 226 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 91, + 287, + 226 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 46, + 91, + 287, + 226 + ], + "type": "text", + "content": ", i.e.," + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 134, + 235, + 287, + 253 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 235, + 287, + 253 + ], + "spans": [ + { + "bbox": [ + 134, + 235, + 287, + 253 + ], + "type": "interline_equation", + "content": "\\ell_ {\\exp} = e ^ {- \\frac {\\boldsymbol {m} ^ {T}}{\\| \\boldsymbol {m} \\|} \\boldsymbol {x}} \\tag {20}", + "image_path": "b5c0a2f8b8ccb4a9a765e1a90523a945e1f873c4b719287e7846756de63f4c5a.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 263, + 287, + 310 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 263, + 287, + 310 + ], + "spans": [ + { + "bbox": [ + 47, + 263, + 287, + 310 + ], + "type": "text", + "content": "This alternative regularization enforces the reprojection " + }, + { + "bbox": [ + 47, + 263, + 287, + 310 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 47, + 263, + 287, + 310 + ], + "type": "text", + "content": " according to the 1D radial camera model " + }, + { + "bbox": [ + 47, + 263, + 287, + 310 + ], + "type": "inline_equation", + "content": "\\pmb{m} = \\lambda \\pmb{x}" + }, + { + "bbox": [ + 47, + 263, + 287, + 310 + ], + "type": "text", + "content": " to have positive scale " + }, + { + "bbox": [ + 47, + 263, + 287, + 310 + ], + "type": "inline_equation", + "content": "\\lambda > 0" + }, + { + "bbox": [ + 47, + 263, + 287, + 310 + ], + "type": "text", + "content": ", canceling out the shrinking bias of the OSE as in the general case." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 312, + 287, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 312, + 287, + 335 + ], + "spans": [ + { + "bbox": [ + 47, + 312, + 287, + 335 + ], + "type": "text", + "content": "The expOSE loss for weighted radial and tangent components of the OSE can then be approximated as" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 345, + 287, + 370 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 345, + 287, + 370 + ], + "spans": [ + { + "bbox": [ + 52, + 345, + 287, + 370 + ], + "type": "interline_equation", + "content": "\\ell_ {\\text {e x p O S E}} = \\sum_ {i j} (1 - \\eta) \\ell_ {\\text {w O S E}} \\left(\\boldsymbol {x} _ {i j}, z _ {i j}\\right) + \\eta \\tilde {\\ell} _ {\\text {e x p}} \\left(\\boldsymbol {x} _ {i j}, z _ {i j}\\right) \\tag {21}", + "image_path": "21eca05d516c48c934a5f8c2920d33f5151f22bbd54c56d295fe21661a2f910f.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 381, + 128, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 381, + 128, + 396 + ], + "spans": [ + { + "bbox": [ + 47, + 381, + 128, + 396 + ], + "type": "text", + "content": "with " + }, + { + "bbox": [ + 47, + 381, + 128, + 396 + ], + "type": "inline_equation", + "content": "\\tilde{\\ell}_{\\mathrm{exp}}" + }, + { + "bbox": [ + 47, + 381, + 128, + 396 + ], + "type": "text", + "content": " defined as" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 405, + 287, + 459 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 405, + 287, + 459 + ], + "spans": [ + { + "bbox": [ + 55, + 405, + 287, + 459 + ], + "type": "interline_equation", + "content": "\\left\\{ \\begin{array}{l l} \\frac {\\ell_ {\\exp} \\left(\\bar {x} _ {i j} , \\bar {z} _ {i j}\\right)}{2} \\left(\\frac {\\boldsymbol {m} _ {i j} ^ {T} \\Delta \\boldsymbol {x} _ {i j} + \\Delta z _ {i j}}{\\sqrt {\\| \\boldsymbol {m} _ {i j} \\| ^ {2} + 1}} - 1\\right) ^ {2}, & \\alpha \\in [ 0, 1 [ \\\\ \\frac {\\ell_ {\\exp} (\\bar {x} _ {i j})}{2} \\left(\\frac {\\boldsymbol {m} _ {i j} ^ {T} \\Delta \\boldsymbol {x} _ {i j}}{\\| \\boldsymbol {m} _ {i j} \\|} - 1\\right) ^ {2}, & \\alpha = 1 \\end{array} . \\right. \\tag {22}", + "image_path": "9a42904ed4e1743e8f35ef5bd8d523c18145215f7114e67afe4820211aa5c148.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 468, + 287, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 468, + 287, + 517 + ], + "spans": [ + { + "bbox": [ + 47, + 468, + 287, + 517 + ], + "type": "text", + "content": "This radial distortion robust version of expOSE can be optimized following Algorithm 1 nonetheless since both the component-weighted OSE and the quadratic approximation of the regularization can still be written as " + }, + { + "bbox": [ + 47, + 468, + 287, + 517 + ], + "type": "inline_equation", + "content": "\\| \\mathcal{A}(PU) - b\\|^2" + }, + { + "bbox": [ + 47, + 468, + 287, + 517 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 528, + 262, + 542 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 528, + 262, + 542 + ], + "spans": [ + { + "bbox": [ + 47, + 528, + 262, + 542 + ], + "type": "text", + "content": "5. Outline of Full Reconstruction Pipeline" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 548, + 287, + 644 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 548, + 287, + 644 + ], + "spans": [ + { + "bbox": [ + 46, + 548, + 287, + 644 + ], + "type": "text", + "content": "We propose to use expOSE as a solution to uncalibrated and radial distortion invariant Structure-from-Motion. A few Bundle Adjustment steps can be performed for further refinement. The pipeline takes as input 2D image measurements of points tracked along multiple views, just like any other factorization-based SfM pipeline. The proposed radial distortion-invariant pipeline can be decomposed into the following sequential modules:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 55, + 653, + 287, + 702 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 653, + 287, + 702 + ], + "spans": [ + { + "bbox": [ + 55, + 653, + 287, + 702 + ], + "type": "text", + "content": "1. expOSE factorization: Given a set of image points tracked along several images, we use Algorithm 1 to obtain estimations of the uncalibrated camera matrix, and the 3D points, up to projective ambiguity." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 72, + 545, + 165 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 545, + 165 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 545, + 165 + ], + "type": "text", + "content": "2. Radial distortion estimation (and camera matrix completion): Using the solution obtained with expOSE, the distortion parameters and, for " + }, + { + "bbox": [ + 313, + 72, + 545, + 165 + ], + "type": "inline_equation", + "content": "\\alpha = 1" + }, + { + "bbox": [ + 313, + 72, + 545, + 165 + ], + "type": "text", + "content": ", the third row of the uncalibrated camera matrix are estimated from the equations in (15). Note that by assuming a Brown-Conrady radial distortion model [2] with " + }, + { + "bbox": [ + 313, + 72, + 545, + 165 + ], + "type": "inline_equation", + "content": "\\kappa(m) = \\sum_{j} k_{j} \\|m\\|^{2j}" + }, + { + "bbox": [ + 313, + 72, + 545, + 165 + ], + "type": "text", + "content": ", for each camera a system of equations of the form" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 403, + 165, + 545, + 192 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 403, + 165, + 545, + 192 + ], + "spans": [ + { + "bbox": [ + 403, + 165, + 545, + 192 + ], + "type": "interline_equation", + "content": "M _ {i} \\left[ \\begin{array}{c} p _ {i} ^ {(3)} \\\\ \\mathbf {k} \\end{array} \\right] = b _ {i} \\tag {23}", + "image_path": "62e2f6dc263e62a8830be62eac7d51bcc348c6d8c085034a9cb367b623fdad94.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 324, + 198, + 545, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 198, + 545, + 357 + ], + "spans": [ + { + "bbox": [ + 324, + 198, + 545, + 357 + ], + "type": "text", + "content": "can be obtained, where " + }, + { + "bbox": [ + 324, + 198, + 545, + 357 + ], + "type": "inline_equation", + "content": "p_i^{(3)}" + }, + { + "bbox": [ + 324, + 198, + 545, + 357 + ], + "type": "text", + "content": " is the third row of the " + }, + { + "bbox": [ + 324, + 198, + 545, + 357 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 324, + 198, + 545, + 357 + ], + "type": "text", + "content": "th camera matrix, and " + }, + { + "bbox": [ + 324, + 198, + 545, + 357 + ], + "type": "inline_equation", + "content": "\\mathbf{k}" + }, + { + "bbox": [ + 324, + 198, + 545, + 357 + ], + "type": "text", + "content": " is a vector of the distortion parameters. Here we use a distortion model with three parameters, " + }, + { + "bbox": [ + 324, + 198, + 545, + 357 + ], + "type": "inline_equation", + "content": "k_j, j = 1,\\dots,3" + }, + { + "bbox": [ + 324, + 198, + 545, + 357 + ], + "type": "text", + "content": ". Assuming that the distortion model is constant along all views, the overall system of equations can be written as " + }, + { + "bbox": [ + 324, + 198, + 545, + 357 + ], + "type": "inline_equation", + "content": "M[p^{(3)T},\\mathbf{k}^T ]^T = b" + }, + { + "bbox": [ + 324, + 198, + 545, + 357 + ], + "type": "text", + "content": ", with " + }, + { + "bbox": [ + 324, + 198, + 545, + 357 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 324, + 198, + 545, + 357 + ], + "type": "text", + "content": " being a " + }, + { + "bbox": [ + 324, + 198, + 545, + 357 + ], + "type": "inline_equation", + "content": "4\\times" + }, + { + "bbox": [ + 324, + 198, + 545, + 357 + ], + "type": "text", + "content": " #views vector with all third rows of the camera matrices. For " + }, + { + "bbox": [ + 324, + 198, + 545, + 357 + ], + "type": "inline_equation", + "content": "\\alpha = 1" + }, + { + "bbox": [ + 324, + 198, + 545, + 357 + ], + "type": "text", + "content": " both " + }, + { + "bbox": [ + 324, + 198, + 545, + 357 + ], + "type": "inline_equation", + "content": "p^{(3)}" + }, + { + "bbox": [ + 324, + 198, + 545, + 357 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 324, + 198, + 545, + 357 + ], + "type": "inline_equation", + "content": "\\mathbf{k}" + }, + { + "bbox": [ + 324, + 198, + 545, + 357 + ], + "type": "text", + "content": " are unknowns and are estimated in this step. For " + }, + { + "bbox": [ + 324, + 198, + 545, + 357 + ], + "type": "inline_equation", + "content": "\\alpha \\neq 1" + }, + { + "bbox": [ + 324, + 198, + 545, + 357 + ], + "type": "text", + "content": ", the system can be simplified to " + }, + { + "bbox": [ + 324, + 198, + 545, + 357 + ], + "type": "inline_equation", + "content": "M\\mathbf{k} = b - Mp^{(3)}" + }, + { + "bbox": [ + 324, + 198, + 545, + 357 + ], + "type": "text", + "content": " since " + }, + { + "bbox": [ + 324, + 198, + 545, + 357 + ], + "type": "inline_equation", + "content": "p^{(3)}" + }, + { + "bbox": [ + 324, + 198, + 545, + 357 + ], + "type": "text", + "content": " is already estimated by expOSE. If it is assumed that there is no radial distortion and " + }, + { + "bbox": [ + 324, + 198, + 545, + 357 + ], + "type": "inline_equation", + "content": "\\alpha \\neq 1" + }, + { + "bbox": [ + 324, + 198, + 545, + 357 + ], + "type": "text", + "content": ", then this step can be completely skipped." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 363, + 544, + 375 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 363, + 544, + 375 + ], + "spans": [ + { + "bbox": [ + 313, + 363, + 544, + 375 + ], + "type": "text", + "content": "3. Bundle adjustment: We perform local optimization of" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 367, + 381, + 545, + 413 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 367, + 381, + 545, + 413 + ], + "spans": [ + { + "bbox": [ + 367, + 381, + 545, + 413 + ], + "type": "interline_equation", + "content": "\\sum_ {i j} \\left\\| \\boldsymbol {m} _ {i j} - (1 + \\kappa (\\boldsymbol {m} _ {i j})) \\frac {\\boldsymbol {x} _ {i j}}{z _ {i j}} \\right\\| ^ {2} \\tag {24}", + "image_path": "c13711d310c4e734fd5d1962eb161075c9f1cbfdd27343b9d47971ba842e7003.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 324, + 419, + 545, + 503 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 419, + 545, + 503 + ], + "spans": [ + { + "bbox": [ + 324, + 419, + 545, + 503 + ], + "type": "text", + "content": "starting from the estimations of " + }, + { + "bbox": [ + 324, + 419, + 545, + 503 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 324, + 419, + 545, + 503 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 324, + 419, + 545, + 503 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 324, + 419, + 545, + 503 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 324, + 419, + 545, + 503 + ], + "type": "inline_equation", + "content": "\\mathbf{k}" + }, + { + "bbox": [ + 324, + 419, + 545, + 503 + ], + "type": "text", + "content": " found with the previous steps. The optimization is solved using Levenberg-Marquardt algorithm. If there is no radial distortion then the parameters " + }, + { + "bbox": [ + 324, + 419, + 545, + 503 + ], + "type": "inline_equation", + "content": "\\mathbf{k}" + }, + { + "bbox": [ + 324, + 419, + 545, + 503 + ], + "type": "text", + "content": " can be set to zero and kept constant during optimization. For expOSE initialization, we observe that usually only a few steps are needed (5-10 steps)." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 510, + 545, + 570 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 510, + 545, + 570 + ], + "spans": [ + { + "bbox": [ + 313, + 510, + 545, + 570 + ], + "type": "text", + "content": "4. Euclidean update: Finally we estimate the projective transformation " + }, + { + "bbox": [ + 313, + 510, + 545, + 570 + ], + "type": "inline_equation", + "content": "H \\in \\mathbb{R}^{4 \\times 4}" + }, + { + "bbox": [ + 313, + 510, + 545, + 570 + ], + "type": "text", + "content": " such that the factorization " + }, + { + "bbox": [ + 313, + 510, + 545, + 570 + ], + "type": "inline_equation", + "content": "\\{PH, H^{-1}X\\}" + }, + { + "bbox": [ + 313, + 510, + 545, + 570 + ], + "type": "text", + "content": " is a Euclidean reconstruction. This is done by estimating the dual absolute conic as described in [12]." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 306, + 575, + 388, + 588 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 575, + 388, + 588 + ], + "spans": [ + { + "bbox": [ + 306, + 575, + 388, + 588 + ], + "type": "text", + "content": "5.1. Experiments" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "text", + "content": "The performance of the proposed pipeline is evaluated on 3 sequences from [24] with radial distortion: Grossmunster (19 cam., 1874 pts, " + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "inline_equation", + "content": "41\\%" + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "text", + "content": " missing data), Kirchenge (30 cam., 1158 pts, " + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "inline_equation", + "content": "60\\%" + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "text", + "content": " missing data), and Munterhof (20 cam., 2108 pts, " + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "inline_equation", + "content": "42\\%" + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "text", + "content": " missing data). We compare the performance when using either " + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\exp OSE" + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\eta = 0.01" + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "text", + "content": "), " + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathrm{pOSE}" + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "text", + "content": ", or " + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathrm{RpOSE}" + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "text", + "content": " (both with " + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\eta = 0.001" + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "text", + "content": ") in step 1 of the pipeline. We use " + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\exp OSE" + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "text", + "content": " with scheduling for regularization update, as described in Section 3.2. Refinement of the solutions is done by performing up to 50 iterations of BA." + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "8965" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 50, + 72, + 138, + 224 + ], + "blocks": [ + { + "bbox": [ + 50, + 72, + 138, + 224 + ], + "lines": [ + { + "bbox": [ + 50, + 72, + 138, + 224 + ], + "spans": [ + { + "bbox": [ + 50, + 72, + 138, + 224 + ], + "type": "image", + "image_path": "573df3627b32e8b669936dd217a28fb41e7094316edabcd5482d949197f38cec.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 234, + 287, + 312 + ], + "lines": [ + { + "bbox": [ + 46, + 234, + 287, + 312 + ], + "spans": [ + { + "bbox": [ + 46, + 234, + 287, + 312 + ], + "type": "text", + "content": "Figure 6. Visualization of reconstructions on the Grossmunster sequence. (Left) An example of one of the images on the sequence. At the bottom, we show a view of the 3D reconstruction of expOSE for " + }, + { + "bbox": [ + 46, + 234, + 287, + 312 + ], + "type": "inline_equation", + "content": "\\alpha = 1" + }, + { + "bbox": [ + 46, + 234, + 287, + 312 + ], + "type": "text", + "content": ". (Right) Comparison between the top view reconstructions (black) obtained with pOSE, RpOSE and expOSE. In red we show the ground-truth 3D point cloud. All reconstructions shown here were not refined with bundle adjustment." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 139, + 72, + 210, + 224 + ], + "blocks": [ + { + "bbox": [ + 139, + 72, + 210, + 224 + ], + "lines": [ + { + "bbox": [ + 139, + 72, + 210, + 224 + ], + "spans": [ + { + "bbox": [ + 139, + 72, + 210, + 224 + ], + "type": "image", + "image_path": "263cd2695e7d9d4ca5166ce4c2c5580b9ec9b7b2071a9239550e40fea3c9848a.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 211, + 72, + 286, + 224 + ], + "blocks": [ + { + "bbox": [ + 211, + 72, + 286, + 224 + ], + "lines": [ + { + "bbox": [ + 211, + 72, + 286, + 224 + ], + "spans": [ + { + "bbox": [ + 211, + 72, + 286, + 224 + ], + "type": "image", + "image_path": "682f9d74e513e408789f632deb0127253eb2582fb69e09d4649e43ff4eb532fc.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 337, + 287, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 337, + 287, + 517 + ], + "spans": [ + { + "bbox": [ + 46, + 337, + 287, + 517 + ], + "type": "text", + "content": "The metrics used are convergence rate (similarly to the experiments in Section 2), 2D reprojection error, rotation error, and 3D error. In order to compute the last two, we perform Euclidean registration on the output of the pipeline, i.e. after the Euclidean update, to the ground-truth 3D point cloud. The inverse of that Euclidean transformation is applied to the camera matrices. Rotation error is then computed as " + }, + { + "bbox": [ + 46, + 337, + 287, + 517 + ], + "type": "inline_equation", + "content": "e_{\\mathrm{rot}} = \\mathrm{acos}\\left(\\left(\\mathrm{trace}\\left(R_i^{GT}R_i^T\\right) - 1\\right) / 2\\right)" + }, + { + "bbox": [ + 46, + 337, + 287, + 517 + ], + "type": "text", + "content": " and the 3D error as the median of all " + }, + { + "bbox": [ + 46, + 337, + 287, + 517 + ], + "type": "inline_equation", + "content": "\\| X_j - X_j^{GT}\\|" + }, + { + "bbox": [ + 46, + 337, + 287, + 517 + ], + "type": "text", + "content": ". The values presented in Table 1 correspond to the average over all instances that converged to the desired optimum. The chosen metrics are evaluated at two points of the pipeline: after the radial distortion estimation (step 2), and after the bundle adjustment (step 3). At both stages, a metric update is performed in order to obtain a Euclidean reconstruction." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 519, + 286, + 686 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 519, + 286, + 686 + ], + "spans": [ + { + "bbox": [ + 46, + 519, + 286, + 686 + ], + "type": "text", + "content": "The results show that expOSE clearly outperforms both pOSE and RpOSE. The difference in performance is even more evident when looking at the output of the factorizations, where expOSE was able to achieve reprojection errors that almost match the refined solution with BA. Note that in many cases expOSE even got better rotation and 3D errors than its refined counterpart. A visualization for the Grossmunter sequence is shown in Figure 6. It is also possible to notice the impact of using the regularization for radial distortion invariance as described in Section 4.3. For " + }, + { + "bbox": [ + 46, + 519, + 286, + 686 + ], + "type": "inline_equation", + "content": "\\alpha = 0.999" + }, + { + "bbox": [ + 46, + 519, + 286, + 686 + ], + "type": "text", + "content": " the method has slow convergence, leading to poor solutions as can be seen by the high rotation and reprojection errors. Additional results for other values of " + }, + { + "bbox": [ + 46, + 519, + 286, + 686 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 46, + 519, + 286, + 686 + ], + "type": "text", + "content": " and sequences are presented in the supplementary material." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 689, + 286, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 286, + 712 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 286, + 712 + ], + "type": "text", + "content": "In practice, as seen in these experiments, we notice that " + }, + { + "bbox": [ + 47, + 689, + 286, + 712 + ], + "type": "inline_equation", + "content": "\\alpha = 1" + }, + { + "bbox": [ + 47, + 689, + 286, + 712 + ], + "type": "text", + "content": " achieves the best results for images with radial" + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 307, + 135, + 548, + 256 + ], + "blocks": [ + { + "bbox": [ + 305, + 71, + 545, + 125 + ], + "lines": [ + { + "bbox": [ + 305, + 71, + 545, + 125 + ], + "spans": [ + { + "bbox": [ + 305, + 71, + 545, + 125 + ], + "type": "text", + "content": "Table 1. Results on the Grossmunster, Kirchenge, and Munsterhof datasets (over 10 instances). For each method two rows are presented: the first consists of the results for the output of the factorization method; the second of the output of the Bundle Adjustment (+BA). In green, we show the best results for each metric." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 307, + 135, + 548, + 256 + ], + "lines": [ + { + "bbox": [ + 307, + 135, + 548, + 256 + ], + "spans": [ + { + "bbox": [ + 307, + 135, + 548, + 256 + ], + "type": "table", + "html": "
GrossmunsterConv. RateRot. [deg]3D [unit]2D [pix]
pOSE+ BA50%148.250.76218.48
50%27.610.2931.50
RpOSE+ BA90%2.240.0822.91
90%0.530.0111.48
ExpOSEα=0.999100%44.740.22741.51
α=0.999+BA100%0.430.0071.48
α=1100%0.180.0041.86
α=1+BA100%0.420.0061.48
", + "image_path": "f652ec7db7bf4148137c3d4a3b3fcef719bb4abd10eecb944126e8a8555ee566.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 307, + 256, + 548, + 364 + ], + "blocks": [ + { + "bbox": [ + 307, + 256, + 548, + 364 + ], + "lines": [ + { + "bbox": [ + 307, + 256, + 548, + 364 + ], + "spans": [ + { + "bbox": [ + 307, + 256, + 548, + 364 + ], + "type": "table", + "html": "
Kirchenge
pOSE+ BA100%160.386.84414.95
100%0.720.0241.22
RpOSE+ BA90%0.980.0621.94
90%1.060.0311.22
ExpOSEα=0.99960%24.710.02245.28
α=0.999+BA80%1.190.0211.22
α=180%0.510.0261.57
α=1+BA80%2.920.0501.22
", + "image_path": "c45294355b6cfa9b10fd01e40e2bc8fb6b5ad6cab87ca5f60826d3d892f81ec3.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 307, + 364, + 548, + 472 + ], + "blocks": [ + { + "bbox": [ + 307, + 364, + 548, + 472 + ], + "lines": [ + { + "bbox": [ + 307, + 364, + 548, + 472 + ], + "spans": [ + { + "bbox": [ + 307, + 364, + 548, + 472 + ], + "type": "table", + "html": "
Munsterhof
pOSE+ BA100%14.010.23012.08
100%0.440.0271.70
RpOSE+ BA60%1.000.07111.96
60%0.440.0271.70
ExpOSEα=0.999100%20.130.02147.71
α=0.999+BA100%0.470.0291.70
α=180%0.120.0133.43
α=1+BA90%0.450.0301.70
", + "image_path": "b55b6dc148005ae63391ae326781720c3ff2565f72a2786c9dbaed8cc00ab809.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 491, + 545, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 491, + 545, + 552 + ], + "spans": [ + { + "bbox": [ + 305, + 491, + 545, + 552 + ], + "type": "text", + "content": "distortion. In the supplementary material we provide additional experiments that show the benefit of using values " + }, + { + "bbox": [ + 305, + 491, + 545, + 552 + ], + "type": "inline_equation", + "content": "1/2 < \\alpha < 1" + }, + { + "bbox": [ + 305, + 491, + 545, + 552 + ], + "type": "text", + "content": " in particular problem instances where data availability is too low for the stability of a pure radial model (e.g. few viewpoints and/or points per camera available)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 562, + 383, + 574 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 562, + 383, + 574 + ], + "spans": [ + { + "bbox": [ + 306, + 562, + 383, + 574 + ], + "type": "text", + "content": "6. Conclusions" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 582, + 545, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 582, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 304, + 582, + 545, + 712 + ], + "type": "text", + "content": "In this paper, we propose the use of exponential regularization on projective factorization problems as a way to enforce Cheirality conditions on the reconstruction. Radial distortion robustness is achieved by weighting differently the radial and tangential components of the object space error. We show that the proposed regularization results in higher reconstruction quality (that matches bundle adjustment refined solutions) while keeping the same convergence properties as state-of-the-art factorization methods and being less sensitive to the choice of the weight " + }, + { + "bbox": [ + 304, + 582, + 545, + 712 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 304, + 582, + 545, + 712 + ], + "type": "text", + "content": " of the regularization." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "8966" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 107, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 107, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 107, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 287, + 711 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 156 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 156 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 156 + ], + "type": "text", + "content": "[1] Srinadh Bhojanapalli, Behnam Neyshabur, and Nati Srebro. Global optimality of local search for low rank matrix recovery. In D. D. Lee, M. Sugiyama, U. V. Luxburg, I. Guyon, and R. Garnett, editors, Advances in Neural Information Processing Systems 29, pages 3873-3881. Curran Associates, Inc., 2016. 1" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 158, + 269, + 168 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 158, + 269, + 168 + ], + "spans": [ + { + "bbox": [ + 53, + 158, + 269, + 168 + ], + "type": "text", + "content": "[2] Dean Brown. Decentering distortion of lenses. 1966. 7" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 169, + 287, + 212 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 169, + 287, + 212 + ], + "spans": [ + { + "bbox": [ + 53, + 169, + 287, + 212 + ], + "type": "text", + "content": "[3] A. M. Buchanan and A. W. Fitzgibbon. Damped newton algorithms for matrix factorization with missing data. In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2005. 1, 5" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 213, + 287, + 256 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 213, + 287, + 256 + ], + "spans": [ + { + "bbox": [ + 53, + 213, + 287, + 256 + ], + "type": "text", + "content": "[4] Alessio Del Bue, João M. F. Xavier, Lourdes Agapito, and Marco Paladini. Bilinear modeling via augmented lagrange multipliers (BALM). IEEE Trans. Pattern Anal. Mach. Intell., 34(8):1496-1508, 2012. 1" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 258, + 287, + 300 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 258, + 287, + 300 + ], + "spans": [ + { + "bbox": [ + 53, + 258, + 287, + 300 + ], + "type": "text", + "content": "[5] R. Cabral, F. De la Torre, J. P. Costeira, and A. Bernardino. Unifying nuclear norm and bilinear factorization approaches for low-rank matrix decomposition. In International Conference on Computer Vision (ICCV), 2013. 1" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 302, + 287, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 302, + 287, + 335 + ], + "spans": [ + { + "bbox": [ + 53, + 302, + 287, + 335 + ], + "type": "text", + "content": "[6] Emmanuel J. Candès and Benjamin Recht. Exact matrix completion via convex optimization. Foundations of Computational Mathematics, 9(6):717-772, 2009. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 336, + 287, + 357 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 336, + 287, + 357 + ], + "spans": [ + { + "bbox": [ + 53, + 336, + 287, + 357 + ], + "type": "text", + "content": "[7] I. Csiszar and G. Tusnády. Information Geometry and Alternating Minimization Procedures. 5" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 358, + 287, + 401 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 358, + 287, + 401 + ], + "spans": [ + { + "bbox": [ + 53, + 358, + 287, + 401 + ], + "type": "text", + "content": "[8] Y. Dai, H. Li, and M. He. Projective multiview structure and motion from element-wise factorization. IEEE Transactions on Pattern Analysis and Machine Intelligence, 35(9):2238-2251, 2013. 1" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 403, + 287, + 435 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 403, + 287, + 435 + ], + "spans": [ + { + "bbox": [ + 53, + 403, + 287, + 435 + ], + "type": "text", + "content": "[9] Rong Ge, Chi Jin, and Yi Zheng. No spurious local minima in nonconvex low rank problems: A unified geometric analysis. arXiv preprint, arxiv:1704.00708, 2017. 1, 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 436, + 287, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 436, + 287, + 468 + ], + "spans": [ + { + "bbox": [ + 48, + 436, + 287, + 468 + ], + "type": "text", + "content": "[10] Rong Ge, Jason D. Lee, and Tengyu Ma. Matrix completion has no spurious local minimum. In Annual Conference on Neural Information Processing Systems (NIPS), 2016. 1, 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 469, + 287, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 469, + 287, + 502 + ], + "spans": [ + { + "bbox": [ + 48, + 469, + 287, + 502 + ], + "type": "text", + "content": "[11] Christian Grussler, Anders Rantzer, and Pontus Giselsson. Low-rank optimization with convex constraints. IEEE Transactions on Automatic Control, 63(11):4000-4007, 2018. 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 502, + 287, + 535 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 502, + 287, + 535 + ], + "spans": [ + { + "bbox": [ + 48, + 502, + 287, + 535 + ], + "type": "text", + "content": "[12] Richard Hartley and Andrew Zisserman. Multiple View Geometry in Computer Vision. Cambridge University Press, USA, 2 edition, 2003. 2, 3, 7" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 536, + 287, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 536, + 287, + 567 + ], + "spans": [ + { + "bbox": [ + 48, + 536, + 287, + 567 + ], + "type": "text", + "content": "[13] Richard I. Hartley. In defense of the eight-point algorithm. IEEE Trans. Pattern Anal. Mach. Intell., 19(6):580-593, 1997. 4" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 570, + 287, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 570, + 287, + 612 + ], + "spans": [ + { + "bbox": [ + 48, + 570, + 287, + 612 + ], + "type": "text", + "content": "[14] Je Hyeong Hong and Andrew Fitzgibbon. Secrets of matrix factorization: Approximations, numerics, manifold optimization and random restarts. In Int. Conf. on Computer Vision, 2015. 1, 4" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 48, + 614, + 287, + 657 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 614, + 287, + 657 + ], + "spans": [ + { + "bbox": [ + 48, + 614, + 287, + 657 + ], + "type": "text", + "content": "[15] Je Hyeong Hong and Christopher Zach. pose: Pseudo object space error for initialization-free bundle adjustment. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), June 2018. 1, 2, 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 48, + 658, + 287, + 711 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 658, + 287, + 711 + ], + "spans": [ + { + "bbox": [ + 48, + 658, + 287, + 711 + ], + "type": "text", + "content": "[16] J. H. Hong, C. Zach, and A. Fitzgibbon. Revisiting the variable projection method for separable nonlinear least squares problems. In 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 5939-5947, 2017. 1, 3, 4" + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 32, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "text", + "content": "[17] Je Hyeong Hong, Christopher Zach, Andrew W. Fitzgibbon, and Roberto Cipolla. Projective bundle adjustment from arbitrary initialization using the variable projection method. In European Conf. on Computer Vision, 2016. 1" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 118, + 545, + 160 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 118, + 545, + 160 + ], + "spans": [ + { + "bbox": [ + 307, + 118, + 545, + 160 + ], + "type": "text", + "content": "[18] Jose Iglesias and Carl Olsson. Radial distortion invariant factorization for structure from motion. In Proceedings of the IEEE International Conference on Computer Vision, 2021. 1, 2, 3, 5, 6" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 162, + 545, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 162, + 545, + 205 + ], + "spans": [ + { + "bbox": [ + 308, + 162, + 545, + 205 + ], + "type": "text", + "content": "[19] José Pedro Iglesias, Carl Olsson, and Marcus Valtonen Örnhag. Accurate optimization of weighted nuclear norm for non-rigid structure from motion. In European Conference on Computer Vision (ECCV), 2020. 1, 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 206, + 545, + 258 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 206, + 545, + 258 + ], + "spans": [ + { + "bbox": [ + 308, + 206, + 545, + 258 + ], + "type": "text", + "content": "[20] Jae-Hak Kim, Yuchao Dai, Hongdong li, Xin Du, and Jonghyuk Kim. Multi-view 3d reconstruction from uncalibrated radially-symmetric cameras. In Proceedings of the IEEE International Conference on Computer Vision, pages 1896-1903, 12 2013. 5" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 261, + 545, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 261, + 545, + 304 + ], + "spans": [ + { + "bbox": [ + 308, + 261, + 545, + 304 + ], + "type": "text", + "content": "[21] Z. Kukelova, M. Bujnak, and T. Pajdla. Real-time solution to the absolute pose problem with unknown radial distortion and focal length. In 2013 IEEE International Conference on Computer Vision, pages 2816-2823, 2013. 5" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 305, + 545, + 358 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 305, + 545, + 358 + ], + "spans": [ + { + "bbox": [ + 308, + 305, + 545, + 358 + ], + "type": "text", + "content": "[22] Suryansh Kumar. Non-rigid structure from motion: Prior-free factorization method revisited. In IEEE Winter Conference on Applications of Computer Vision, WACV 2020, Snowmass Village, CO, USA, March 1-5, 2020, pages 51-60. IEEE, 2020. 1" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 360, + 545, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 360, + 545, + 403 + ], + "spans": [ + { + "bbox": [ + 308, + 360, + 545, + 403 + ], + "type": "text", + "content": "[23] Viktor Larsson, Torsten Sattler, Zuzana Kukelova, and Marc Pollefeys. Revisiting radial distortion absolute pose. In International Conference on Computer Vision (ICCV). IEEE, September 2019. 5" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 404, + 545, + 448 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 404, + 545, + 448 + ], + "spans": [ + { + "bbox": [ + 308, + 404, + 545, + 448 + ], + "type": "text", + "content": "[24] Viktor Larsson, Nicolcas Zobernig, Kasim Taskin, and Marc Pellefeys. Calibration-free structure-from-motion with calibrated radial trifocal tensors. In European Conference of Computer Vision, 2020. 5, 7" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 449, + 545, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 449, + 545, + 491 + ], + "spans": [ + { + "bbox": [ + 308, + 449, + 545, + 491 + ], + "type": "text", + "content": "[25] Ludovic Magerand and Alessio Del Bue. Practical projective structure from motion (p2sfm). In 2017 IEEE International Conference on Computer Vision (ICCV), pages 39-47, 2017. 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 493, + 545, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 493, + 545, + 536 + ], + "spans": [ + { + "bbox": [ + 308, + 493, + 545, + 536 + ], + "type": "text", + "content": "[26] Behrooz Nasihatkon, Richard I. Hartley, and Jochen Trumpf. A generalized projective reconstruction theorem and depth constraints for projective factorization. Int. J. Comput. Vis., 115(2):87-114, 2015. 2" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 308, + 537, + 545, + 580 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 537, + 545, + 580 + ], + "spans": [ + { + "bbox": [ + 308, + 537, + 545, + 580 + ], + "type": "text", + "content": "[27] Takayuki Okatani and Koichiro Deguchi. On the wiberg algorithm for matrix factorization in the presence of missing components. International Journal of Computer Vision, 72(3):329-337, 2007. 3" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 308, + 582, + 545, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 582, + 545, + 635 + ], + "spans": [ + { + "bbox": [ + 308, + 582, + 545, + 635 + ], + "type": "text", + "content": "[28] Carl Olsson, Daniele Gerosa, and Marcus Carlsson. Relaxations for non-separable cardinality/rank penalties. In 2021 IEEE/CVF International Conference on Computer Vision Workshops (ICCVW), IEEE International Conference on Computer Vision Workshops, pages 162-171, 2021. 3" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 308, + 636, + 545, + 680 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 636, + 545, + 680 + ], + "spans": [ + { + "bbox": [ + 308, + 636, + 545, + 680 + ], + "type": "text", + "content": "[29] Carl Olsson, Viktor Larsson, and Fredrik Kahl. A quasiconvex formulation for radial cameras. In 2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 14571-14580, 2021. 5" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 308, + 681, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 681, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 308, + 681, + 545, + 713 + ], + "type": "text", + "content": "[30] Marcus Valtonen Ornag, Carl Olsson, and Anders Heyden. Bilinear parameterization for differentiable rank-regularization. 2020 IEEE/CVF Conference on Computer" + } + ] + } + ], + "index": 31 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "8967" + } + ] + } + ], + "index": 33 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 632 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 67, + 72, + 287, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 287, + 95 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 287, + 95 + ], + "type": "text", + "content": "Vision and Pattern Recognition Workshops (CVPRW), Jun 2020. 1, 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 96, + 287, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 96, + 287, + 183 + ], + "spans": [ + { + "bbox": [ + 48, + 96, + 287, + 183 + ], + "type": "text", + "content": "[31] Dohyung Park, Anastasios Kyrillidis, Constantine Carmanis, and Sujay Sanghavi. Non-square matrix sensing without spurious local minima via the Burer-Monteiro approach. In Aarti Singh and Jerry Zhu, editors, Proceedings of the 20th International Conference on Artificial Intelligence and Statistics, volume 54 of Proceedings of Machine Learning Research, pages 65-74, Fort Lauderdale, FL, USA, 20-22 Apr 2017. PMLR. 1" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 185, + 287, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 185, + 287, + 217 + ], + "spans": [ + { + "bbox": [ + 48, + 185, + 287, + 217 + ], + "type": "text", + "content": "[32] Conrad J. Poelman and Takeo Kanade. A parapspective factorization method for shape and motion recovery. IEEE Trans. Pattern Anal. Mach. Intell., 19(3):206-218, 1997. 1" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 219, + 287, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 219, + 287, + 262 + ], + "spans": [ + { + "bbox": [ + 48, + 219, + 287, + 262 + ], + "type": "text", + "content": "[33] Benjamin Recht, Maryam Fazel, and Pablo A. Parrilo. Guaranteed minimum-rank solutions of linear matrix equations via nuclear norm minimization. SIAM Rev., 52(3):471-501, Aug. 2010. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 263, + 287, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 263, + 287, + 319 + ], + "spans": [ + { + "bbox": [ + 48, + 263, + 287, + 319 + ], + "type": "text", + "content": "[34] C. Strecha, W. von Hansen, L. Van Gool, P. Fua, and U. Thoennessen. On benchmarking camera calibration and multi-view stereo for high resolution imagery. In 2008 IEEE Conference on Computer Vision and Pattern Recognition, pages 1-8, 2008. 5" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 319, + 287, + 363 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 319, + 287, + 363 + ], + "spans": [ + { + "bbox": [ + 48, + 319, + 287, + 363 + ], + "type": "text", + "content": "[35] D. Strelow, Q. Wang, L. Si, and A. Eriksson. General, nested, and constrained wiberg minimization. IEEE Transactions on Pattern Analysis and Machine Intelligence, 38(9):1803-1815, 2016. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 364, + 287, + 419 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 364, + 287, + 419 + ], + "spans": [ + { + "bbox": [ + 48, + 364, + 287, + 419 + ], + "type": "text", + "content": "[36] Peter F. Sturm and Bill Triggs. A factorization based algorithm for multi-image projective structure and motion. In Proceedings of the 4th European Conference on Computer Vision-Volume II - Volume II, ECCV '96, page 709-720, Berlin, Heidelberg, 1996. Springer-Verlag. 1" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 420, + 287, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 420, + 287, + 453 + ], + "spans": [ + { + "bbox": [ + 48, + 420, + 287, + 453 + ], + "type": "text", + "content": "[37] SriRam Thirthala and Marc Pollefeys. Radial multi-focal tensors. International Journal of Computer Vision - IJCV, 96, 06 2012. 5" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 454, + 287, + 496 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 454, + 287, + 496 + ], + "spans": [ + { + "bbox": [ + 48, + 454, + 287, + 496 + ], + "type": "text", + "content": "[38] Carlo Tomasi and Takeo Kanade. Shape and motion from image streams under orthography: A factorization method. International Journal of Computer Vision, 9(2):137-154, 1992. 1" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 498, + 287, + 553 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 498, + 287, + 553 + ], + "spans": [ + { + "bbox": [ + 48, + 498, + 287, + 553 + ], + "type": "text", + "content": "[39] Bill Triggs, Philip F. McLauchlan, Richard I. Hartley, and Andrew W. Fitzgibbon. Bundle adjustment - a modern synthesis. In Proceedings of the International Workshop on Vision Algorithms: Theory and Practice, ICCV '99, pages 298-372. Springer-Verlag, 2000. 2, 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 555, + 287, + 598 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 555, + 287, + 598 + ], + "spans": [ + { + "bbox": [ + 48, + 555, + 287, + 598 + ], + "type": "text", + "content": "[40] R. Tsai. A versatile camera calibration technique for high-accuracy 3d machine vision metrology using off-the-shelf tv cameras and lenses. IEEE Journal on Robotics and Automation, 3(4):323-344, August 1987. 5" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 599, + 287, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 599, + 287, + 632 + ], + "spans": [ + { + "bbox": [ + 48, + 599, + 287, + 632 + ], + "type": "text", + "content": "[41] T. Wiberg. Computation of principal components when data are missing. In Proceedings of the Second Symposium of Computational Statistics, page 229-326, 1976. 3" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "8968" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/gSDF_ Geometry-Driven Signed Distance Functions for 3D Hand-Object Reconstruction/ab15db8d-1011-4beb-9528-c9481ccf45d6_content_list.json b/2023/gSDF_ Geometry-Driven Signed Distance Functions for 3D Hand-Object Reconstruction/ab15db8d-1011-4beb-9528-c9481ccf45d6_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..e2f5466a91b558897ee532ec916001d92350a446 --- /dev/null +++ b/2023/gSDF_ Geometry-Driven Signed Distance Functions for 3D Hand-Object Reconstruction/ab15db8d-1011-4beb-9528-c9481ccf45d6_content_list.json @@ -0,0 +1,2251 @@ +[ + { + "type": "text", + "text": "gSDF: Geometry-Driven Signed Distance Functions for 3D Hand-Object Reconstruction", + "text_level": 1, + "bbox": [ + 223, + 130, + 746, + 174 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Zerui Chen", + "bbox": [ + 209, + 204, + 303, + 220 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Shizhe Chen", + "bbox": [ + 343, + 204, + 444, + 219 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Cordelia Schmid", + "bbox": [ + 486, + 204, + 620, + 219 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Ivan Laptev", + "bbox": [ + 661, + 204, + 758, + 220 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Inria, École normale supérieure, CNRS, PSL Research Univ., 75005 Paris, France", + "bbox": [ + 161, + 222, + 805, + 239 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "firstname.lastname@inria.fr", + "bbox": [ + 361, + 243, + 602, + 255 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 292, + 312, + 308 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Signed distance functions (SDFs) is an attractive framework that has recently shown promising results for 3D shape reconstruction from images. SDFs seamlessly generalize to different shape resolutions and topologies but lack explicit modelling of the underlying 3D geometry. In this work, we exploit the hand structure and use it as guidance for SDF-based shape reconstruction. In particular, we address reconstruction of hands and manipulated objects from monocular RGB images. To this end, we estimate poses of hands and objects and use them to guide 3D reconstruction. More specifically, we predict kinematic chains of pose transformations and align SDFs with highly-articulated hand poses. We improve the visual features of 3D points with geometry alignment and further leverage temporal information to enhance the robustness to occlusion and motion blurs. We conduct extensive experiments on the challenging ObMan and DexYCB benchmarks and demonstrate significant improvements of the proposed method over the state of the art.", + "bbox": [ + 75, + 325, + 473, + 598 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 630, + 209, + 646 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Understanding how hands interact with objects is becoming increasingly important for widespread applications, including virtual reality, robotic manipulation and human-computer interaction. Compared to 3D estimation of sparse hand joints [24,38,51,53,67], joint reconstruction of hands and object meshes [11, 18, 21, 26, 62] provides rich information about hand-object interactions and has received increased attention in recent years.", + "bbox": [ + 75, + 657, + 470, + 777 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "To reconstruct high-quality meshes, some recent works [9, 17, 61] explore multi-view image inputs. Multi-view images, however, are less common both for training and testing scenarios. In this work, we focus on a more practical and user-friendly setting where we aim to reconstruct hand and object meshes from monocular RGB images. Given the ill-posed nature of the task, many existing methods [7, 19, 21, 54, 62] employ parametric mesh models (e.g., MANO [46]) to im", + "bbox": [ + 75, + 779, + 470, + 901 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/527adaaf0d94728c6cef36247c014db1a2971619c5891a07228162e574525f61.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 295, + 635, + 369 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/0101e1ed915fabefe96f599e74a8be60a2f28721973d0da39113d65b0623b487.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 522, + 369, + 586, + 386 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/e9a24ed55ed9a034f6e98cdc5905a4854a353a9a30987c24fef26688e57761a0.jpg", + "image_caption": [ + "gSDF" + ], + "image_footnote": [], + "bbox": [ + 534, + 401, + 627, + 474 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/930162f97d68298d59444a1e5ab0920b6530a1751ebcfac02c1fe9dbc0ea6f13.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 532, + 474, + 584, + 489 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/1b1769b767cc71b8b5a0b055e8b51352741b96b36c76b761da995b8b93eb34ff.jpg", + "image_caption": [ + "Figure 1. We aim to reconstruct 3D hand and object meshes from monocular images (top). Our method gSDF (middle) first predicts 3D hand joints (blue) and object locations (red) from input images. We use estimated hand poses and object locations to incorporate strong geometric priors into SDF by generating hand- and object-aware kinematic features for each SDF query point. Our resulting gSDF model generates accurate results for real images with various objects and grasping hand poses (bottom)." + ], + "image_footnote": [], + "bbox": [ + 506, + 492, + 571, + 565 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/3fbc938809057c339e88a7bb8d0ff47ccf23b8af98ac8617d0fd901344b357d9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 576, + 503, + 630, + 555 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/4a1a563a15f47748a088359df125a6570a6b1dc58f1080f2d38d0b31e57163e9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 637, + 297, + 763, + 369 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/ad192d46e9560e1148a60d5966194b8e3af4f3fc412c3047cab7eb537367d8ed.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 686, + 369, + 717, + 387 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/b25647d7d93da1380b6c3a96306a849f93a0482f741ad7b0b0e71833b5ed57de.jpg", + "image_caption": [ + "gSDF" + ], + "image_footnote": [], + "bbox": [ + 676, + 402, + 758, + 473 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/62f855d278fa1773f1ccd345c22d665c228c5025bdf03be93579addd06590db3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 676, + 473, + 717, + 489 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/497e1728de1b8fbc5a2151c53cdd64addffc970583c47f9fcdf96fd3962bb525.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 643, + 491, + 702, + 564 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/32542e1d490c939963f930ab36bafdb27f211b811804d61961326a153108a97d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 764, + 297, + 890, + 369 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/08b8aa3975bd581498a60f555dd8d76e8ded955cf04ab2f8770c8ea9c8e38b36.jpg", + "image_caption": [ + "gSDF" + ], + "image_footnote": [], + "bbox": [ + 784, + 369, + 841, + 387 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/edbdf5237d3dd6f6eb8457cc4d27704ccd1490f337b318a0858c637ff68236a9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 794, + 414, + 867, + 454 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/70890cfc46200935f42539a0e94cd1527bee74410b7c6512a569baa97443b19a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 807, + 474, + 843, + 489 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/1d92d9abaeb0505145b8c2e27e8d30a41950b3b74e2df184eb0c889c34160c26.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 764, + 491, + 828, + 565 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/79e91292827ed0312f99c1f1bd6de33f036784d32fdf77c6fa9dbc407bd1262d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 834, + 507, + 883, + 550 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "pose prior knowledge and reduce ambiguities in 3D hand reconstruction. MANO hand meshes, however, have relatively limited resolution and can be suboptimal for the precise capture of hand-object interactions.", + "bbox": [ + 496, + 715, + 893, + 777 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "To reconstruct detailed hand and object meshes, another line of efforts [11, 26] employ signed distance functions (SDFs). Grasping Field [26] makes the first attempt to model hand and object surfaces using SDFs. However, it does not explicitly associate 3D geometry with image cues and has no prior knowledge incorporated in SDFs, leading to unrealistic meshes. AlignSDF [11] proposes to align SDFs with respect to global poses (i.e., the hand wrist transformation and the", + "bbox": [ + 496, + 779, + 893, + 901 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 807, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "12890", + "bbox": [ + 480, + 944, + 519, + 957 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "object translation) and produces improved results. However, it is still challenging to capture geometric details for more complex hand motions and manipulations of diverse objects, which involve the articulation of multiple fingers.", + "bbox": [ + 78, + 90, + 468, + 151 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To address limitations of prior works, we propose a geometry-driven SDF (gSDF) method that encodes strong pose priors and improves reconstruction by disentangling pose and shape estimation (see Figure 1). To this end, we first predict sparse 3D hand joints from images and derive full kinematic chains of local pose transformations from joint locations using inverse kinematics. Instead of only using the global pose as in [11], we optimize SDFs with respect to poses of all the hand joints, which leads to a more fine-grained alignment between the 3D shape and articulated hand poses. In addition, we project 3D points onto the image plane to extract geometry-aligned visual features for signed distance prediction. The visual features are further refined with spatio-temporal contexts using a transformer model to enhance the robustness to occlusions and motion blurs.", + "bbox": [ + 78, + 152, + 468, + 377 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We conduct extensive ablation experiments to show the effectiveness of different components in our approach. The proposed gSDF model greatly advances state-of-the-art accuracy on the challenging ObMan and DexYCB benchmarks. Our contributions can be summarized in three-fold: (i) To embed strong pose priors into SDFs, we propose to align the SDF shape with its underlying kinematic chains of pose transformations, which reduces ambiguities in 3D reconstruction. (ii) To further reduce the misalignment induced by inaccurate pose estimations, we propose to extract geometry-aligned local visual features and enhance the robustness with spatio-temporal contexts. (iii) We conduct comprehensive experiments to show that our approach outperforms state-of-the-art results by a significant margin.", + "bbox": [ + 78, + 378, + 468, + 589 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 78, + 603, + 217, + 619 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "This paper focuses on jointly reconstructing hands and hand-held objects from RGB images. In this section, we first review previous works on the 3D hand pose and shape estimation. We then discuss relevant works on the joint reconstruction of hands and objects.", + "bbox": [ + 78, + 628, + 468, + 703 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3D hand pose and shape estimation. The topic of 3D hand pose estimation has received widespread attention since the 90s [23, 45] and has seen significant progress in recent years [31, 65]. Methods which take RGB images as input [24, 36, 38, 39, 48, 50, 51, 53, 59, 67] often estimate sparse 3D hand joint locations from visual data using well-designed deep neural networks. Though these methods can achieve high estimation accuracy, their outputs of 3D sparse joints provide limited information about the 3D hand surface, which is critical in AR/VR applications. Following the introduction of the anthropomorphic parametric hand mesh model MANO [46], several works [2, 5, 10, 18, 29, 30, 32, 34, 40, 57] estimate the MANO hand shape and pose parameters to", + "bbox": [ + 78, + 704, + 468, + 898 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "recover the full hand surface. However, MANO has a limited mesh resolution and cannot produce fine surface details. Neural implicit functions [13,25] have the potential to reconstruct more realistic high resolution hand surfaces [12,37,42]. In this work, we combine the advantages of sparse, parametric and implicit modelling. We predict sparse 3D joints accurately from images and estimate the MANO parameters using inverse kinematics. We then optimize neural implicit functions with respect to underlying kinematic structures and reconstruct realistic meshes.", + "bbox": [ + 501, + 90, + 893, + 241 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3D hand and object reconstruction. Joint reconstruction of hand and object meshes provides a more comprehensive view about how hands interact with manipulated objects in the 3D space and has received more attention in the past few years. Previous works often rely on multiview correspondence [3,9,17,41,58,61] or additional depth information [15, 16, 49, 55, 56] to approach this task. In this work, we focus on a more challenging setting and perform a joint reconstruction from monocular RGB images. Given the ill-posed nature of this problem, many works [7, 18-21, 54, 60, 62] deploy MANO, which encodes hand prior knowledge learned from hand scans, to reconstruct hand meshes. To further simplify the object reconstruction task, several works [18, 60, 62] make a strong assumption that the ground-truth object model is available at test time. Our work and some previous efforts [11, 21, 26] relax this assumption and assume unknown object models. Hasson et al. [21] employ a differentiable MANO layer to estimate the hand shape and AtlasNet [14] to reconstruct the manipulated object. However, both MANO and AtlasNet can only produce meshes of limited resolution, which prevents the modelling of detailed contacts between hands and objects. To generate more detailed surfaces, Karunratanakul et al. [26] introduce grasping fields and propose to use SDFs to reconstruct both hand and object meshes. However, such a model-free approach does not capture any prior knowledge about hands or objects, which can lead to predicting unrealistic 3D geometry. To mitigate this, Ye et al. [63] propose to use hand poses estimated from an off-the-shelf model to help reconstruct the hand-held object mesh. The main difference with our work is that we jointly reconstruct hand meshes and object meshes using our proposed model, which is more challenging. Also, in addition to using hand poses to help capture the object shapes, we predict object poses and show their benefits for SDF-based object reconstruction. Another work AlignSDF [11] optimizes SDFs with respect to estimated hand-object global poses and encodes pose priors into SDFs. In addition to using global poses as a guide for SDFs, we propose to learn SDFs from the full kinematic chains of local pose transformations, and achieve a more precise alignment between the 3D shape and the underlying poses. To further handle hard cases induced by occlusion or motion blur where pose estimations are inaccurate, we leverage", + "bbox": [ + 501, + 250, + 893, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "12891", + "bbox": [ + 480, + 945, + 517, + 955 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/9b8c982f7483297e13d7b608462e4d57d786c39f3cc17a94fefbd4d06d0d9e33.jpg", + "image_caption": [ + "Figure 2. The overview of our proposed single-frame model. Our method reconstructs realistic hand and object meshes from a single RGB image. Marching Cubes algorithm [33] is used at test time to extract meshes." + ], + "image_footnote": [], + "bbox": [ + 80, + 90, + 893, + 305 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "a transformer to accumulate corresponding image features from multiple frames and benefit the geometry recovery.", + "bbox": [ + 75, + 368, + 470, + 400 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 76, + 414, + 169, + 430 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "This section presents our geometry-driven SDF (gSDF) method for 3D hand and object reconstruction from monocular RGB images. We aim to learn two signed distance functions $\\mathrm{SDF}_{hand}$ and $\\mathrm{SDF}_{obj}$ to implicitly represent 3D shapes for the hand and the object. The $\\mathrm{SDF}_{hand}$ and $\\mathrm{SDF}_{obj}$ map a query 3D point $x\\in \\mathbb{R}^3$ to a signed distance from the hand surface and object surface, respectively. The Marching Cubes algorithm [33] can thus be employed to reconstruct the hand and the object from $\\mathrm{SDF}_{hand}$ and $\\mathrm{SDF}_{obj}$ .", + "bbox": [ + 75, + 440, + 472, + 578 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Overview of gSDF", + "text_level": 1, + "bbox": [ + 76, + 588, + 253, + 604 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Figure 2 illustrates the overview of our gSDF reconstruction approach. Given an image $I_{t}$ , we extract two types of features to predict the signed distance for each query point $x$ , namely kinematic features and visual features.", + "bbox": [ + 75, + 612, + 470, + 672 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The kinematic feature encodes the position of $x$ under the coordinate system of the hand or the object, which can provide strong pose priors to assist SDF learning. Since the feature is based on canonical hand and object poses, it helps to disentangles shape learning from pose learning.", + "bbox": [ + 75, + 672, + 470, + 748 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The existing work [63] proposes to use hand poses for reconstructing object meshes but does not consider using pose priors to reconstruct hand meshes. Another work [11] only deploys coarse geometry in terms of the hand wrist object locations, which fails to capture fine-grained details. In this work, we aim to strengthen the kinematic feature with geometry transformation of $x$ to poses of all the hand joints (see Figure 3) for both the hand and the object reconstruction. However, it is challenging to directly predict hand pose parameters [6,28,66]. To improve the hand pose estimation,", + "bbox": [ + 75, + 750, + 472, + 901 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "we propose to first predict sparse 3D joint locations $j_{h}$ from the image and then use inverse kinematics to derive pose transformations $\\theta_{h}$ from the predicted joints. In this way, we are able to obtain kinematic features $e_{h}$ and $e_{o}$ for the hand and the object respectively.", + "bbox": [ + 496, + 368, + 893, + 445 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The visual feature encodes the visual appearance for the point $x$ to provide more shape details. Prior works [11, 26] use the same global visual feature for all the points, e.g., averaging the feature map of a SDF feature encoder on the spatial dimension. Such global visual features suffers from imprecise geometry alignment between a point and its visual appearance. To alleviate the limitation, inspired by [47], we apply the geometry transformation to extract aligned local visual features. Moreover, to address hard cases with occlusions and motion blur in a single image $I_{t}$ , we propose to enhance the local visual feature with its temporal contexts from videos using a spatio-temporal transformer. We denote the local visual feature of a point as $e_v$ . Finally, we concatenate the kinematic feature and local visual feature to predict the signed distance for $x$ :", + "bbox": [ + 496, + 450, + 895, + 676 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\begin{array}{l} \\mathrm {S D F} _ {\\text {h a n d}} (x) = f _ {h} ([ e _ {v}; e _ {h} ]), \\\\ \\mathrm {S D F} _ {\\text {e x p}} (x) = f _ {h} ([ e _ {v}; e _ {h} ]) \\end{array} \\tag {1} \\\\ \\mathrm {S D F} _ {\\text {o b j e c t}} (x) = f _ {o} ([ e _ {v}; e _ {o} ]), \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 596, + 700, + 890, + 736 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $f_{h}$ and $f_{o}$ are the hand SDF decoder and the object SDF decoder respectively.", + "bbox": [ + 496, + 758, + 893, + 789 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In the following, we first present the proposed geometry-driven kinematic feature and visual feature encodings in Section 3.2 and 3.3 respectively. Then, in Section 3.4 we introduce different strategies of sharing image backbones for hand and object pose predictors as well as the SDF feature encoder. Finally, the training strategy of our model is described in Section 3.5.", + "bbox": [ + 496, + 795, + 895, + 898 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "12892", + "bbox": [ + 480, + 944, + 519, + 957 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/f34350c6deed27ad5477d84b749b1f608ddac3aa1cd56b24bf45eee708218c83.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 81, + 88, + 274, + 191 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/d3016ffe59ac56b52df47e56a812684a22855a7924bb78ac556142fa44c5473a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 287, + 88, + 464, + 191 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/551a9977fb27793d12b2e9380a58e895ad757e56a21c4cf6256218d284242093.jpg", + "image_caption": [ + "Figure 3. We define hand and object features by transforming queries $x$ into hand- and object-centered coordinate systems. Compared to AlignSDF [11] (left), each hand joint in our method defines its own coordinate frame." + ], + "image_footnote": [], + "bbox": [ + 80, + 193, + 272, + 309 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/2351cd87d20dfea867192f8de15c2fd463fbaa5e9e442690d712867b537726fb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 279, + 193, + 464, + 309 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. Kinematic Feature Encoding", + "text_level": 1, + "bbox": [ + 76, + 412, + 334, + 429 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Hand and object pose estimation. Directly regressing hand pose parameters of MANO from image features [11, 19, 21] has proved to be difficult [6, 28, 66]. In contrast, predicting sparse 3D joint locations is easier and can achieve higher accuracy. Therefore, we first train a 3D hand joint prediction model which produces volumetric heatmaps [38, 44] for 21 hand joints. We use a differentiable soft-argmax operator [50] to extract 3D coordinates $\\psi_h \\in \\mathbb{R}^{21 \\times 3}$ of hand joints from the heatmaps. We then obtain an analytic solution for hand poses $\\theta_h \\in \\mathbb{R}^{16 \\times 3}, \\phi_h \\in \\mathbb{R}^{16 \\times 3}$ from estimated 3D joints $\\psi_h$ using inverse kinematics, where each $\\theta_{h,i} \\in \\mathbb{R}^3$ and $\\theta_{h,i} \\in \\mathbb{R}^3$ denote the relative pose of $i_{th}$ joint in terms of rotation and translation with respect to its ancestor joint. Here, we only calculate the rotation and use the default limb lengths provided by the MANO model. Specifically, we first compute the pose of the hand wrist using the template pose defined in MANO, and then follow the hand kinematic chain to solve the pose of other finger joints recursively. More details are presented in the supplementary material.", + "bbox": [ + 75, + 439, + 472, + 727 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "For the object pose estimation, it is often difficult to accurately estimate the rotation of the object since many objects have a high degree of symmetry and are often occluded by hands. We therefore follow [11] and only estimate the center position of the object $\\psi_{o} \\in \\mathbb{R}^{3}$ relative to the hand wrist.", + "bbox": [ + 75, + 729, + 470, + 806 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Hand kinematic feature. Given the 3D point $x$ , we generate the hand kinematic feature $e_h \\in \\mathbb{R}^{51}$ by transforming $x$ into canonical coordinate frames defined by hand joints. Figure 3(top,right) illustrates the proposed geometry transformation for the hand. For the $i_{th}$ hand joint pose $\\theta_{h,i}, \\phi_{h,i}$ , the pose transformation $T_p(x, \\theta_{h,i}, \\phi_{h,i})$ to obtain the local", + "bbox": [ + 75, + 809, + 472, + 902 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "hand kinematic feature $e_{h,i}\\in \\mathbb{R}^3$ is defined as", + "bbox": [ + 498, + 90, + 807, + 106 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nG _ {h, i} = \\prod_ {j \\in A (i)} \\left[ \\begin{array}{c c} \\exp (\\theta_ {h, j}) & \\phi_ {h, j} \\\\ \\hline 0 & 1 \\end{array} \\right], \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 549, + 117, + 890, + 157 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\ne _ {h, i} = T _ {p} (x, \\theta_ {h, i}, \\phi_ {h, i}) = \\widetilde {H} (G _ {h, i} ^ {- 1} \\cdot H (x)),\n$$\n", + "text_format": "latex", + "bbox": [ + 550, + 161, + 839, + 181 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $A(i)$ denotes the ordered set of ancestors of the $i_{th}$ joint. We use Rodrigues formula $\\exp (\\cdot)$ to convert $\\theta_{h,i}$ into the form of a rotation matrix. By traversing the hand kinematic chain, we obtain the global transformation $G_{h,i}\\in \\mathbb{R}^{4\\times 4}$ for the $i_{th}$ joint. Then, we take the inverse of $G_{h,i}$ to transform $x$ into the $i_{th}$ hand joint canonical coordinates. $H(\\cdot)$ transforms $x$ into homogeneous coordinates while $\\widetilde{H} (\\cdot)$ transforms homogeneous coordinates back to Euclidean coordinates. Given local kinematic features $e_{h,i}$ , the hand kinematic feature $e_h\\in \\mathbb{R}^{51}$ is defined as:", + "bbox": [ + 498, + 191, + 893, + 345 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\ne _ {h} = \\left[ x, e _ {h, 1}, \\dots , e _ {h, 1 6} \\right]. \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 609, + 358, + 890, + 377 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Object kinematic feature. To obtain geometry-aware SDF for object reconstruction, we propose object kinematic feature $e_{o} \\in \\mathbb{R}^{72}$ . Following [11], we use estimated object center $\\psi_{o}$ to transform $x$ into the object canonical coordinate frame by the translation transformation $x_{oc} = T_t(x, \\psi_o) = x - \\psi_o$ . As the grasping hand pose also gives hints about the shape of the manipulated object, similar to [63] we incorporate the knowledge of hand poses into object reconstruction. To this end, for each joint $i$ and its estimated 3D location $\\psi_{h,i}$ , we transform $x$ by translation as", + "bbox": [ + 498, + 388, + 893, + 541 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\ne _ {o, i} = T _ {t} \\left(x, \\psi_ {h, i}\\right) = x - \\psi_ {j, i}. \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 593, + 553, + 890, + 570 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Given the importance of the wrist motion for object grasping, we also transform $x$ into the canonical coordinate system of the hand wrist $x_{ow} = T_p(x,\\theta_{h,1},\\phi_{h,1}) = \\widetilde{H}(G_{h,1}^{-1}\\cdot H(x))$ , which normalizes the orientation of the grasping and further simplifies the task for the SDF object decoder. The object kinematic feature is then defined by $e_o\\in \\mathbb{R}^{72}$ as", + "bbox": [ + 496, + 580, + 893, + 672 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\ne _ {o} = \\left[ x, x _ {o c}, e _ {o, 1}, \\dots , e _ {o, 2 1}, x _ {o w} \\right]. \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 578, + 686, + 890, + 703 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Figure 3(bottom,right) illustrates the proposed geometry transformation for the object kinematic feature.", + "bbox": [ + 498, + 715, + 890, + 746 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3. Visual Feature Encoding", + "text_level": 1, + "bbox": [ + 500, + 756, + 725, + 772 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Geometry-aligned visual feature. Previous works [11, 26] typically predict signed distances from global image features that lack spatial resolution. Motivated by [47], we aim to generate geometry-aligned local image features for each input point $x$ . Assume $v_{t}^{r} \\in \\mathbb{R}^{16 \\times 16 \\times d}$ is the feature map generated from the SDF feature encoder, e.g. a ResNet model [22], where $16 \\times 16$ is the spatial feature resolution and $d$ is the feature dimension. We project the 3D input point $x$ to $\\hat{x}$", + "bbox": [ + 496, + 779, + 893, + 901 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "12893", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/e508e991fdf28c4839c4ac860e62390834ab9612d013c2de1f2f1294c9129aea.jpg", + "image_caption": [ + "(a) Single backbone." + ], + "image_footnote": [], + "bbox": [ + 81, + 90, + 464, + 166 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/5f164d4994fa36e30dfd0fb25c76d4c17e360706b8c42017358c9dbf0291e986.jpg", + "image_caption": [ + "(b) Symmetric backbone." + ], + "image_footnote": [], + "bbox": [ + 81, + 186, + 464, + 263 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/205075424b7e82596f85b701c053aeddd710494c31f70fcc7951701294bc47ab.jpg", + "image_caption": [ + "(c) Asymmetric backbone.", + "Figure 4. Illustrations of three image backbone sharing strategies." + ], + "image_footnote": [], + "bbox": [ + 81, + 281, + 465, + 358 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "on the image plane with the camera projection matrix and use bilinear sampling to obtain a local feature $e_v$ from the location on the feature map corresponding to $\\hat{x}$ .", + "bbox": [ + 75, + 426, + 468, + 472 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Temporally-enhanced visual feature. To improve the robustness of visual features in a single frame $I_{t}$ from occlusion or motion blur, we propose to exploit temporal information from videos to refine $v_{t}^{r}$ . Note that due to non-rigid hand motions, we do not assume video frames to contain different views of the same rigid scene. We make use of the spatial-temporal transformer architecture [1,4] to efficiently propagate image features across frames. Assume $v_{t-1}^{r}, \\dots, v_{t+1}^{r}$ are the feature maps from neighboring frames of $I_{t}$ in a video. We flatten all the feature maps as a sequence in the spatial-temporal dimension leading to $3 \\times 16 \\times 16$ tokens fed into the transformer model. We reshape the output features of the transformer into a feature map again for $I_{t}$ , denoted as $v_{t} \\in \\mathbb{R}^{16 \\times 16 \\times d}$ . By aggregating spatial and temporal information from multiple frames, $v_{t}$ becomes more robust to the noise and can potentially produce more stable reconstruction results compared to $v_{t}^{r}$ . Our full gSDF model relies on the feature map $v_{t}$ to compute the local visual feature $e_{v}$ for the given input point $x$ .", + "bbox": [ + 75, + 474, + 468, + 762 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.4. Image Backbone Sharing Strategy", + "text_level": 1, + "bbox": [ + 76, + 771, + 375, + 787 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "As shown in Figure 2, our model contains three branches for hand and object pose estimations as well as for SDF feature encoding. These different branches may share image backbones which might be beneficial with the multi-task learning. In this section, we describe three alternative strategies for sharing image backbones in our model.", + "bbox": [ + 75, + 794, + 468, + 883 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Single image backbone (Figure 4a). We only employ one", + "bbox": [ + 76, + 885, + 468, + 901 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "single image backbone for both pose and shape predictions. This is the strategy used in AlignSDF [11].", + "bbox": [ + 498, + 90, + 890, + 119 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Symmetric image backbone (Figure 4b). To disentangle pose and shape learning, we share the image backbone for hand and object pose estimation, but use a different backbone to extract visual features for SDFs learning.", + "bbox": [ + 496, + 121, + 890, + 181 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Asymmetric image backbone (Figure 4c). Since hand pose estimation plays a critical role in the task, we use a separate backbone to predict the hand pose, while share the image backbone for object pose predictor and SDF feature encoder.", + "bbox": [ + 496, + 181, + 893, + 242 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.5. Training", + "text_level": 1, + "bbox": [ + 500, + 251, + 602, + 267 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We apply a two-stage training strategy. In the first stage, we train the hand pose predictor to predict hand joint coordinates $\\psi_h$ with $\\ell 2$ loss $\\mathcal{L}_{hp}$ and an ordinal loss [43] $\\mathcal{L}_{ord}$ to penalize the case if the predicted depth order between the $i_{th}$ joint and the $j_{th}$ joint is misaligned with the ground-truth relation $\\mathbb{1}_{i,j}^{ord}$ , which are:", + "bbox": [ + 496, + 273, + 893, + 366 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {h p} = \\frac {1}{2 1} \\sum_ {i = 1} ^ {2 1} \\left\\| \\psi_ {h, i} - \\hat {\\psi} _ {h, i} \\right\\| _ {2} ^ {2}, \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 591, + 377, + 890, + 417 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {o r d} = \\sum_ {j = 2} ^ {2 1} \\sum_ {i = 1} ^ {j - 1} \\mathbb {1} _ {i, j} ^ {o r d} \\times \\left| \\left(\\psi_ {h, i} - \\psi_ {h, j}\\right) \\cdot \\vec {n} \\right|, \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 550, + 430, + 890, + 472 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\vec{n} \\in \\mathbb{R}^3$ denotes the viewpoint direction. We randomly sample twenty virtual views to optimize $\\mathcal{L}_{ord}$ . Since the proposed kinematic features are based on the predicted hand joints $\\psi_h$ , we empirically find that pretraining the hand joint predictor in the first stage and then freezing its weights can achieve better performance.", + "bbox": [ + 496, + 479, + 890, + 569 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In the second training stage, we learn all the modules except the hand joint predictor in an end-to-end manner. We use the $\\ell 2$ loss $\\mathcal{L}_{op}$ to predict the object pose $\\psi_{o}$ as follows:", + "bbox": [ + 496, + 570, + 890, + 616 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {o p} = \\left\\| \\psi_ {o} - \\hat {\\psi} _ {o} \\right\\| _ {2} ^ {2} \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 627, + 626, + 890, + 655 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\hat{\\psi}_o$ denote the ground-truth location for the object center. To train the SDFs, we sample many 3D points around the hand-object surface and calculate their ground-truth signed distances to the hand mesh and the object mesh. We use $\\ell 1$ loss to optimize the SDF decoders:", + "bbox": [ + 496, + 667, + 893, + 742 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {h s d f} = \\left\\| \\mathrm {S D F} _ {h a n d} - \\mathrm {S D F} _ {h a n d} \\right\\| _ {1} ^ {1}, \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 573, + 753, + 890, + 787 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {o s d f} = \\left\\| \\mathrm {S D F} _ {o b j} - \\hat {\\mathrm {S D F}} _ {o b j} \\right\\| _ {1} ^ {1},\n$$\n", + "text_format": "latex", + "bbox": [ + 578, + 785, + 790, + 813 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\hat{\\mathrm{SDF}}_{hand}$ and $\\hat{\\mathrm{SDF}}_{obj}$ denote ground-truth signed distances to the hand and the object, respectively. The overall training objective $\\mathcal{L}_{shape}$ in the second training stage is:", + "bbox": [ + 496, + 824, + 890, + 869 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {s h a p e}} = \\mathcal {L} _ {\\text {o p}} + 0. 5 \\times \\mathcal {L} _ {\\text {h s d f}} + 0. 5 \\times \\mathcal {L} _ {\\text {o s d f}}. \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 532, + 883, + 890, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "12894", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 76, + 89, + 209, + 107 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We conduct extensive experiments on two 3D hand-object reconstruction benchmarks to evaluate the effectiveness of our proposed gSDF model.", + "bbox": [ + 75, + 114, + 470, + 161 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1. Datasets", + "text_level": 1, + "bbox": [ + 76, + 172, + 179, + 186 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "ObMan [21] is a large-scale synthetic dataset that contains diverse hand grasping poses on a wide range of objects imported from ShapeNet [8]. We follow previous methods [11,26,42,63] to generate data for SDFs training. First, we remove meshes that contain too many double-sided triangles, which results in 87,190 hand-object meshes. Then, we fit the hand-object mesh into a unit cube and sample 40,000 points inside the cube. For each sampled point, we compute its signed distance to the ground-truth hand mesh and object mesh, respectively. At test time, we report the performance on the whole ObMan test set of 6,285 testing samples.", + "bbox": [ + 75, + 196, + 468, + 362 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "DexYCB [9] is currently the largest real dataset that captures hand and object interactions in videos. Following [11,60], we focus on right-hand samples and use the official s0 split. We follow the same steps as in ObMan to obtain SDF training samples. To reduce the temporal redundancy, we downsample the video data to 6 frames per second, which results in 29,656 training samples and 5,928 testing samples.", + "bbox": [ + 75, + 363, + 470, + 470 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Evaluation metrics", + "text_level": 1, + "bbox": [ + 76, + 479, + 259, + 494 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We follow prior works to comprehensively evaluate the 3D reconstructions with multiple metrics as below.", + "bbox": [ + 76, + 503, + 468, + 532 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Hand Chamfer Distance $(\\mathrm{CD_h})$ . We evaluate Chamfer distance $(\\mathrm{cm}^2)$ between our reconstructed hand mesh and the ground-truth hand mesh. We follow previous works [11, 26] to optimize the scale and translation to align the reconstructed mesh with the ground truth and sample 30,000 points on both meshes to compute Chamfer distance. We report the median Chamfer distance on the test set to reflect the quality of our reconstructed hand mesh.", + "bbox": [ + 75, + 535, + 470, + 655 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Hand F-score $(\\mathbf{FS_h})$ . Since Chamfer distance is vulnerable to outliers [52, 63], we also report the F-score to evaluate the predicted hand mesh. After aligning the hand mesh with its ground truth, we report F-score at $1\\mathrm{mm}$ $(\\mathrm{FS_h}@\\mathrm{l})$ and $5\\mathrm{mm}$ $(\\mathrm{FS_h}@\\mathrm{5})$ thresholds.", + "bbox": [ + 75, + 656, + 468, + 731 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Object Chamfer Distance $(\\mathrm{CD_o})$ . Following [11, 26], we first use the optimized hand scale and translation to transform the reconstructed object mesh. Then, we follow the same process as $\\mathrm{CD_h}$ to compute $\\mathrm{CD_o}$ $(\\mathrm{cm}^2)$ and evaluate the quality of our reconstructed object mesh.", + "bbox": [ + 75, + 733, + 468, + 808 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Object F-score $(\\mathrm{FS_o})$ .We follow the previous work [63] to evaluate the reconstructed object mesh using F-score at 5 mm $\\mathrm{(FS_o@5)}$ and $10\\mathrm{mm}$ $\\mathrm{(FS_o@10)}$ thresholds.", + "bbox": [ + 75, + 809, + 468, + 854 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Hand Joint Error $(\\mathbf{E_h})$ . To measure the hand pose estimation accuracy, we compute the mean joint error (cm) relative to the hand wrist over all 21 joints in the form of $\\ell 2$ distance.", + "bbox": [ + 75, + 854, + 470, + 900 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/d026c5cc1a0a07fbceaba1d3783b8bf3b416d878437b376cd7d998074ba3da53.jpg", + "table_caption": [ + "Table 1. Hand reconstruction performance with different hand kinematic features $\\mathbf{K}_{*}^{h}$ and visual feature $\\mathrm{V}_1$ on DexYCB dataset." + ], + "table_footnote": [], + "table_body": "
Wrist onlyAll jointsCDh↓FSh@1↑FSh@5↑
K1h××0.3640.1540.764
K2h×0.3440.1670.776
K3h×0.3170.1710.788
", + "bbox": [ + 504, + 117, + 883, + 196 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/5e66986513695f2dc36c5edd15af610976b3ac4295da4516561605ecb0758bf9.jpg", + "table_caption": [ + "Table 2. Object reconstruction performance with different object kinematic features ${\\mathrm{K}}_{ * }^{o}$ and visual feature ${\\mathrm{V}}_{1}$ on DexYCB dataset." + ], + "table_footnote": [], + "table_body": "
Obj poseHand poseCDo↓FSo@5↑FSo@10↑
K1o××2.060.3920.660
K2o×1.930.3960.668
K3o1.710.4180.689
", + "bbox": [ + 501, + 237, + 883, + 314 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Object Center Error $(\\mathbf{E_o})$ . To evaluate the accuracy of our predicted object translation, we report the $\\ell 2$ distance (cm) between the prediction and its ground truth.", + "bbox": [ + 496, + 321, + 893, + 368 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Additionally, we report Contact ratio $(\\mathrm{C}_r)$ , Penetration depth $(\\mathrm{P}_d)$ and Intersection volume $(\\mathrm{I}_v)$ [11,21,26,60,62] to present more details about the interaction between the hand mesh and the object mesh. Please see supplementary material for more details.", + "bbox": [ + 496, + 369, + 893, + 443 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3. Implementation details", + "text_level": 1, + "bbox": [ + 500, + 452, + 714, + 468 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Model architecture. We use ResNet-18 [22] as our image backbone. For hand and object pose estimation, we adopt volumetric heatmaps of spatial resolution $64 \\times 64 \\times 64$ to localize hand joints and the object center in 3D space. For the spatial-temporal transformer, we use 16 transformer layers with 4 attention heads. We present more details about our model architecture in supplementary material.", + "bbox": [ + 496, + 474, + 890, + 580 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Training details. We take the image crop of the hand-object region according to their bounding boxes for DexYCB benchmark. Then, we modify camera intrinsic and extrinsic parameters [35,64] accordingly and take the cropped image as the input to our model. The spatial size of input images is $256 \\times 256$ for all our models. We perform data augmentation including rotation $\\left[\\left[-45^{\\circ}, 45^{\\circ}\\right]\\right)$ and color jittering. During SDF training, we randomly sample 1000 points (500 points inside the mesh and 500 points outside the mesh) for the hand and the object, respectively. We train our model with a batch size of 256 for 1600 epochs on both ObMan and DexYCB using the Adam optimizer [27] with 4 NVIDIA RTX 3090 GPUs. We use an initial learning rate of $1 \\times 10^{-4}$ and decay it by half every 600 epochs. It takes 22 hours for training on DexYCB and 60 hours on ObMan dataset.", + "bbox": [ + 496, + 582, + 893, + 808 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.4. Ablation studies", + "text_level": 1, + "bbox": [ + 500, + 816, + 661, + 830 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We carry out ablations on the DexYCB dataset to validate different components in our gSDF model. We evaluate different settings of hand kinematic features $(\\mathbf{K}_*^h$ in Table 1), object kinematic features $(\\mathbf{K}_*^o$ in Table 2), and visual features", + "bbox": [ + 496, + 839, + 893, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "12895", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/3ae6d1026b212bb8a46a0bed9b316485f520e5f020e4197e666decd843b4555a.jpg", + "table_caption": [ + "Table 3. Hand-object reconstruction performance with different visual features on DexYCB dataset. The visual features are combined with the best kinematic features ${\\mathrm{K}}_{3}^{h}$ (Table 1) and ${\\mathrm{K}}_{3}^{o}$ (Table 2) to reconstruct hand and object respectively." + ], + "table_footnote": [], + "table_body": "
GlobalLocalTransformerCDh↓FSh@1↑FSh@5↑CDo↓FSo@5↑FSo@10↑Eh↓Eo↓
SpatialTemp.
V1×××0.3170.1710.7881.710.4180.6891.441.91
V2×××0.3100.1720.7951.710.4260.6941.441.98
V3××0.3040.1740.7971.600.4340.7031.441.94
V4×0.3020.1770.8011.550.4370.7091.441.96
", + "bbox": [ + 122, + 118, + 846, + 224 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/f5eba0e0f92278d1b358264d703c8164ec987251663f232535497be4f23948af.jpg", + "table_caption": [ + "Table 4. Hand-object reconstruction performance using different image backbone sharing strategies on DexYCB dataset. The ablation is carried out with visual features ${\\mathrm{V}}_{1}$ and kinematic features ${\\mathrm{K}}_{3}^{h}$ and ${\\mathrm{K}}_{3}^{o}$ ." + ], + "table_footnote": [], + "table_body": "
BackboneCDh↓FSh@1↑FSh@5↑CDo↓FSo@5↑FSo@10↑Eh↓Eo↓
Single0.4110.1480.7411.880.4020.6741.721.83
Symmetric0.3240.1680.7791.840.4050.6721.461.93
Asymmetric0.3170.1710.7881.710.4180.6891.441.91
", + "bbox": [ + 150, + 267, + 815, + 343 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "$(\\mathrm{V}_{*}$ in Table 3). We use the asymmetric image backbone if not otherwise mentioned.", + "bbox": [ + 75, + 354, + 468, + 383 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Hand kinematic feature. In Table 1, we evaluate the contribution of the proposed hand kinematic features for 3D hand reconstruction. The model in $\\mathrm{K}_1^h$ does not use any pose priors to transform the 3D point. The model in $\\mathrm{K}_2^h$ only uses the hand wrist pose to transform the 3D point as AlignSDF [11]. Our model in $\\mathrm{K}_3^h$ computes the transformations to all the hand joints, which achieves the best performance on all the evaluation metrics. Compared to $\\mathrm{K}_1^h$ without any pose priors, our model achieves more than $12\\%$ and $9\\%$ improvement on $\\mathrm{CD_h}$ and $\\mathrm{FS_h}@\\mathbb{1}$ , respectively. Compared to $\\mathrm{K}_2^h$ with only hand wrist, our model greatly reduces the hand Chamfer distance from $0.344~\\mathrm{cm}^2$ to $0.317~\\mathrm{cm}^2$ , leading to $7.8\\%$ relative gains. These results demonstrate the significance of pose priors and the advantage of gSDF for 3D hand reconstruction.", + "bbox": [ + 75, + 386, + 468, + 598 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Object kinematic feature. In Table 2, we validate the effectiveness of our proposed object kinematic feature. The model in $\\mathrm{K}_1^o$ does not contain any pose priors, while the model in $\\mathrm{K}_2^o$ aligns query points to the object center as in [11]. Our model in $\\mathrm{K}_3^o$ further employs the hand pose to produce the object kinematic feature, which significantly boosts the performance for the object reconstruction on different metrics. Compared to $\\mathrm{K}_2^o$ , our proposed object kinematic feature achieves more than $11\\%$ and $5.5\\%$ improvement on $\\mathrm{CD_o}$ and $\\mathrm{FS_o}@\\mathsf{5}$ , respectively.", + "bbox": [ + 75, + 598, + 468, + 750 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Visual features. We compare different visual features for SDF prediction in Table 3. $\\mathrm{V}_{1}$ uses the global visual feature e.g. the average pooling of ResNet feature map as in previous works [11,26]. Our local visual features $\\mathrm{V}_{2}$ derived from the geometry alignment with the query point reduces the hand Chamfer distance from $0.317~\\mathrm{cm}^2$ to $0.310~\\mathrm{cm}^2$ . However, it shows less improvement on the object shape accuracy. In $\\mathrm{V}_{3}$ and $\\mathrm{V}_{4}$ , we use the transformer model to refine the feature maps. To ablate the improvement from the transformer architecture and from the temporal information", + "bbox": [ + 75, + 750, + 470, + 901 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/a2fc2087c8926407024aceddb57fdb907fca43dbfaaaaa85e1f9f3ce0af2e779.jpg", + "image_caption": [ + "Input Images" + ], + "image_footnote": [], + "bbox": [ + 549, + 366, + 643, + 440 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/406f8b2bb7be3f36f49ae72e85ff726200b4de097e075921745b6128b33911ee.jpg", + "image_caption": [ + "Our single-frame model" + ], + "image_footnote": [], + "bbox": [ + 645, + 366, + 741, + 440 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/26e3356a3edb06d32680c7a5458a24dd7cf4a5f55113205c55775f19b3067664.jpg", + "image_caption": [ + "Our video model" + ], + "image_footnote": [], + "bbox": [ + 741, + 366, + 838, + 440 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/28775e7b0390b3fd2da60253534f1ed3dae6e3f88f9c8fbcaa8a1330a7df6733.jpg", + "image_caption": [ + "Figure 5. The qualitative comparison between our single-frame model built with the transformer and our video model." + ], + "image_footnote": [], + "bbox": [ + 549, + 441, + 643, + 515 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/e5dd54af45fb9666196be863cfdc724c7150dc7069e43683a2f8a6f6d723b5a9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 645, + 441, + 741, + 515 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/41160320a085061c1f3ae14b3c5bfc76c6c84c9fd77cc608cffb6e5fb116b782.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 741, + 441, + 838, + 515 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "in videos, we only use transformer for each single frame in $\\mathrm{V}_3$ while use it for multiple frames in $\\mathrm{V}_4$ . We can see that the transformer architecture alone is beneficial for the reconstruction. Enhancing the visual features with temporal contexts further improves the performance in terms of all the evaluation metrics especially for the objects. In Figure 5, compared with our single-frame model built with the transformer, our video model can make more robust predictions under some hard cases (e.g., motion blur). Although the reconstruction of the can is not accurate in the first example, our model tends to produce more regular shapes.", + "bbox": [ + 496, + 551, + 893, + 718 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Image backbone sharing strategy. Results of using different strategies for image backbone sharing are presented in Table 4. We train all the three models using the two-stage strategy described in Section 3.5. The model with one single backbone achieves the worst performance under most of the evaluation metrics. This is because the pose learning and shape learning compete with each other during training. The symmetric strategy to separate backbones for pose and SDFs performs better than the single backbone model. Our asymmetric strategy with a separate backbone for hand pose estimation and a shared backbone for object pose and SDF feature encoder achieves the best performance. We also em", + "bbox": [ + 496, + 719, + 893, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "12896", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/f193afc997567272abcba654f7fc1b6faa2c911f4a1bddffe5edddc857a7683a.jpg", + "table_caption": [ + "Table 5. Comparison with state-of-the-art methods on the image ObMan dataset." + ], + "table_footnote": [], + "table_body": "
MethodsCDh↓FSh@1↑FSh@5↑CDo↓FSo@5↑FSo@10↑Eh↓Eo↓
Hasson et al. [21]0.4150.1380.7513.600.3590.5901.13-
Karunratanakul et al. [26]0.261--6.80----
Ye et al. [63]----0.4200.630--
Chen et al. [11]0.1360.3020.9133.380.4040.6361.273.29
gSDF (Ours)0.1120.3320.9353.140.4380.6600.933.43
", + "bbox": [ + 130, + 103, + 834, + 210 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/c95ba4c9a3a84dd2134cac935eb17f6ba39829e7a15017704849f0aa861e80cc.jpg", + "table_caption": [ + "Table 6. Comparison with state-of-the-art methods on the video Dex YCB dataset." + ], + "table_footnote": [], + "table_body": "
MethodsCDh↓FSh@1↑FSh@5↑CDo↓FSo@5↑FSo@10↑Eh↓Eo↓
Hasson et al. [21]0.5370.1150.6471.940.3830.6421.67-
Karunratanakul et al. [26]0.3640.1540.7642.060.3920.660--
Chen et al. [11]0.3580.1620.7671.830.4100.6791.581.78
Chen et al. [11] 1†0.3440.1670.7761.810.4130.6871.571.93
gSDF (Ours)0.3020.1770.8011.550.4370.7091.441.96
", + "bbox": [ + 130, + 237, + 834, + 345 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/39ed9750feff3fbe2547e84316f8642767f8b8ec8ed882144508820c4b5274f1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 107, + 353, + 184, + 411 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/205b452a5802100b9a01591043a110f94a42796fa140c94ead9ad5f905272765.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 107, + 412, + 184, + 470 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/49fd956fdc8cf147437a8ea4bb3365039671495734ec7a8acc49948ce00567d7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 107, + 472, + 184, + 530 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/4356e5359f399a9406dafdcca70a9c95aeb4cfecc956d0725fc62934972df905.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 107, + 532, + 183, + 590 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/1eb8bb9f7720efd45bf01a89fe7988b0bb76d992220e316a637f66312945b4a4.jpg", + "image_caption": [ + "Figure 6. Qualitative results of our model on test images from the ObMan and DexYCB benchmarks. Our model produces convincing results for different grasping poses and diverse objects." + ], + "image_footnote": [], + "bbox": [ + 107, + 590, + 184, + 648 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/2960a5e8e974a67f4ef83809c2bcc825b37473220cb57827905ed333df3de6c5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 187, + 354, + 261, + 406 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/13903a0ca9925e1c8d065ca5669db7edc88b0a5c6a8633ea2861e24b978089c6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 191, + 415, + 261, + 464 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/fb079a5f525650ce642ab4943ca68a1f9da21fb77bbeea60f190c6504edb56fb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 191, + 470, + 261, + 525 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/1bc77af7e87f4b7f2fc219f484870514e2eca4acdfd27cfaba7935f56455eb5c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 189, + 532, + 261, + 579 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/cd8232bcd997e1e35719325604ebd9ef49b1e440174c1123c8eababd70c9ede1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 191, + 592, + 266, + 643 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/455aa6d4cb069cbda3bdfd07ae3af3788347a47a97e7b6bf3b1c722d96e09edf.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 281, + 354, + 357, + 410 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/55549b002e014a326591bd0db01eecf830dcbc552d7b6e759e22d9acb42f3a10.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 281, + 411, + 357, + 470 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/e71cbc42877831d036ebc5f2191c6f3f0ee3353ef108860ec09872e723f9edd3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 282, + 472, + 357, + 527 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/1477bc6049d11ebb049d1f9cb5e0b3fbb848177ddeb296f18a0cec1d8ea3faf2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 282, + 530, + 357, + 588 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/8aa7c02088f82d10f587d942b76aaec39801e04d3d08362d7948bb43920f281c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 282, + 590, + 357, + 647 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/7e0c697f296ef13edc1a09cd92d627cf1d4c37d2d96708553ee12c6fabfe7cfc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 364, + 354, + 419, + 406 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/340174b095ecda11c76d010375a3400d3fd57a086421c8a6e4c409f6b4cfa3f4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 367, + 412, + 419, + 465 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/6fa8633c18178a6fecbe50d2643461c1fd579a32b1b6f18b46979035e5974956.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 364, + 474, + 423, + 527 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/9bac27d76484944de1854e78079c3b4a5d0904b61ba8481043de1194ff3bb557.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 366, + 536, + 433, + 580 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/db353de4603cc3652877d9be443d09e004a74e6bb8c66d3ed4fbbd08d6baec8f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 364, + 590, + 431, + 643 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "pirically find that learning the object pose and SDFs together improves both the pose accuracy and the shape accuracy. The possible reason is that estimating object pose also helps our model to focus more on hand-object regions and boosts the 3D reconstruction accuracy.", + "bbox": [ + 75, + 700, + 470, + 777 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.5. Comparison with state of the art", + "text_level": 1, + "bbox": [ + 76, + 782, + 362, + 799 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We compare our gSDF model with state-of-the-art methods on ObMan and DexYCB benchmarks. In Figure 6, we qualitatively demonstrate that our approach can produce convincing 3D hand-object reconstruction results.", + "bbox": [ + 75, + 805, + 472, + 866 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "ObMan. Table 5 shows the comparison of hand and object reconstruction results on the synthetic ObMan dataset. Since ObMan does not contain video data, we do not use the spatial-temporal transformer in this model. The proposed gSDF outperforms previous methods by a significant margin. Compared with the recent method [63] that only reconstructs hand-held objects, our joint method produces more accurate object meshes. gSDF achieves a $17.6\\%$ improvement on $\\mathrm{CD_h}$ and a $7.1\\%$ improvement on $\\mathrm{CD_o}$ over the state-of-the-art accuracy, which indicates that our model can better reconstruct both hand meshes and diverse object meshes.", + "bbox": [ + 496, + 354, + 893, + 521 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "DexYCB. Table 6 presents results on the DexYCB benchmark. We also show the performance of AlignSDF [11] with two backbones ([11]-2BB). Our model demonstrates a large improvement over recent methods. In particular, it advances the state-of-the-art accuracy on $\\mathrm{CD_h}$ and $\\mathrm{CD_o}$ by $12.2\\%$ and $14.4\\%$ , respectively. The high accuracy of gSDF on DexYCB demonstrates that it generalizes well to real images.", + "bbox": [ + 496, + 525, + 893, + 631 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 647, + 617, + 662 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this work, we propose a geometry-driven SDF (gSDF) approach for 3D hand and object reconstruction. We explicitly model the underlying 3D geometry to guide the SDF learning. We first estimate poses of hands and objects according to kinematic chains of pose transformations, and then derive kinematic features and local visual features using the geometry information for signed distance prediction. Extensive experiments on ObMan and DexYCB datasets demonstrate the effectiveness of our proposed method.", + "bbox": [ + 496, + 669, + 893, + 805 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgements. This work was granted access to the HPC resources of IDRIS under the allocation AD011013147 made by GENCI. This work was funded in part by the French government under management of Agence Nationale de la Recherche as part of the \"Investissements d'avenir\" program, reference ANR19-P3IA-0001 (PRAIRIE 3IA Institute) and by Louis Vuitton ENS Chair on Artificial Intelligence. We thank Yana Hasson for helpful discussions.", + "bbox": [ + 496, + 811, + 893, + 898 + ], + "page_idx": 7 + }, + { + "type": "page_footnote", + "text": "$^{1\\dagger}$ To make more fair comparison with Chen et al. [11], we adapt their model to the same asymmetric backbone structure as used in our method.", + "bbox": [ + 76, + 875, + 468, + 898 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "12897", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 174, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Anurag Arnab, Mostafa Dehghani, Georg Heigold, Chen Sun, Mario Lucic, and Cordelia Schmid. ViViT: A video vision transformer. In ICCV, 2021. 5", + "[2] Seungryul Baek, Kwang In Kim, and Tae-Kyun Kim. Pushing the envelope for RGB-based dense 3D hand pose estimation via neural rendering. In CVPR, 2019. 2", + "[3] Luca Ballan, Aparna Taneja, Jürgen Gall, Luc Van Gool, and Marc Pollefeys. Motion capture of hands in action using discriminative salient points. In ECCV, 2012. 2", + "[4] Gedas Bertasius, Heng Wang, and Lorenzo Torresani. Is space-time attention all you need for video understanding? In ICML, 2021. 5", + "[5] Adnane Boukhayma, Rodrigo de Bem, and Philip HS Torr. 3D hand shape and pose from images in the wild. In CVPR, 2019. 2", + "[6] Romain Brégier. Deep regression on manifolds: a 3D rotation case study. In 3DV, 2021. 3, 4", + "[7] Zhe Cao, Ilija Radosavovic, Angjoo Kanazawa, and Jitendra Malik. Reconstructing hand-object interactions in the wild. In ICCV, 2021. 1, 2", + "[8] Angel X Chang, Thomas Funkhouser, Leonidas Guibas, Pat Hanrahan, Qixing Huang, Zimo Li, Silvio Savarese, Manolis Savva, Shuran Song, Hao Su, et al. ShapeNet: An information-rich 3D model repository. arXiv preprint arXiv:1512.03012, 2015. 6", + "[9] Yu-Wei Chao, Wei Yang, Yu Xiang, Pavlo Molchanov, Ankur Handa, Jonathan Tremblay, Yashraj S Narang, Karl Van Wyk, Umar Iqbal, Stan Birchfield, et al. DexYCB: A benchmark for capturing hand grasping of objects. In CVPR, 2021. 1, 2, 6", + "[10] Xingyu Chen, Yufeng Liu, Chongyang Ma, Jianlong Chang, Huayan Wang, Tian Chen, Xiaoyan Guo, Pengfei Wan, and Wen Zheng. Camera-space hand mesh recovery via semantic aggregation and adaptive 2D-1D registration. In CVPR, 2021. 2", + "[11] Zerui Chen, Yana Hasson, Cordelia Schmid, and Ivan Laptev. AlignSDF: Pose-Aligned signed distance fields for handobject reconstruction. In ECCV, 2022. 1, 2, 3, 4, 5, 6, 7, 8", + "[12] Zhiqin Chen and Hao Zhang. Learning implicit fields for generative shape modeling. In CVPR, 2019. 2", + "[13] Enric Corona, Tomas Hodan, Minh Vo, Francesc Moreno-Noguer, Chris Sweeney, Richard Newcombe, and Lingni Ma. LISA: Learning implicit shape and appearance of hands. In CVPR, 2022. 2", + "[14] Thibault Groueix, Matthew Fisher, Vladimir G Kim, Bryan C Russell, and Mathieu Aubry. A papier-mâché approach to learning 3D surface generation. In CVPR, 2018. 2", + "[15] Henning Hamer, Juergen Gall, Thibaut Weise, and Luc Van Gool. An object-dependent hand pose prior from sparse training data. In CVPR, 2010. 2", + "[16] Henning Hamer, Konrad Schindler, Esther Koller-Meier, and Luc Van Gool. Tracking a hand manipulating an object. In ICCV, 2009. 2" + ], + "bbox": [ + 78, + 116, + 470, + 898 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[17] Shreyas Hampali, Mahdi Rad, Markus Oberweger, and Vincent Lepetit. HOnnotate: A method for 3D annotation of hand and object poses. In CVPR, 2020. 1, 2", + "[18] Shreyas Hampali, Sayan Deb Sarkar, Mahdi Rad, and Vincent Lepetit. Keypoint Transformer: Solving joint identification in challenging hands and object interactions for accurate 3D pose estimation. In CVPR, 2022. 1, 2", + "[19] Yana Hasson, Bugra Tekin, Federica Bogo, Ivan Laptev, Marc Pollefeys, and Cordelia Schmid. Leveraging photometric consistency over time for sparsely supervised hand-object reconstruction. In CVPR, 2020. 1, 2, 4", + "[20] Yana Hasson, Gül Varol, Cordelia Schmid, and Ivan Laptev. Towards unconstrained joint hand-object reconstruction from RGB videos. In 3DV, 2021. 2", + "[21] Yana Hasson, Gul Varol, Dimitrios Tzionas, Igor Kalevatykh, Michael J Black, Ivan Laptev, and Cordelia Schmid. Learning joint reconstruction of hands and manipulated objects. In CVPR, 2019. 1, 2, 4, 6, 8", + "[22] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In CVPR, 2016. 4, 6", + "[23] Tony Heap and David Hogg. Towards 3D hand tracking using a deformable model. In FG, 1996. 2", + "[24] Umar Iqbal, Pavlo Molchanov, Thomas Breuel Juergen Gall, and Jan Kautz. Hand pose estimation via latent 2.5D heatmap regression. In ECCV, 2018. 1, 2", + "[25] Korrawe Karunratanakul, Adrian Spurr, Zicong Fan, Otmar Hilliges, and Siyu Tang. A skeleton-driven neural occupancy representation for articulated hands. In 3DV, 2021. 2", + "[26] Korrawe Karunratanakul, Jinlong Yang, Yan Zhang, Michael J Black, Krikamol Muandet, and Siyu Tang. Grasping Field: Learning implicit representations for human grasps. In 3DV, 2020. 1, 2, 3, 4, 6, 7, 8", + "[27] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. 6", + "[28] Nikos Kolotouros, Georgios Pavlakos, Michael J Black, and Kostas Daniilidis. Learning to reconstruct 3D human pose and shape via model-fitting in the loop. In ICCV, 2019. 3, 4", + "[29] Dominik Kulon, Riza Alp Güler, I. Kokkinos, M. Bronstein, and S. Zafeiriou. Weakly-supervised mesh-convolutional hand reconstruction in the wild. In CVPR, 2020. 2", + "[30] Dominik Kulon, Haoyang Wang, Riza Alp Güler, Michael M. Bronstein, and Stefanos Zafeiriou. Single image 3D hand reconstruction with mesh convolutions. In BMVC, 2019. 2", + "[31] Vincent Lepetit. Recent advances in 3D object and hand pose estimation. arXiv preprint arXiv:2006.05927, 2020. 2", + "[32] Mengcheng Li, Liang An, Hongwen Zhang, Lianpeng Wu, Feng Chen, Tao Yu, and Yebin Liu. Interacting attention graph for single image two-hand reconstruction. In CVPR, 2022. 2", + "[33] William E Lorensen and Harvey E Cline. Marching Cubes: A high resolution 3D surface construction algorithm. TOG, 1987. 3", + "[34] Jun Lv, Wenqiang Xu, Lixin Yang, Sucheng Qian, Chongzhao Mao, and Cewu Lu. HandTailor: Towards high-precision monocular 3D hand recovery. In BMVC, 2021. 2" + ], + "bbox": [ + 501, + 92, + 893, + 898 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "12898", + "bbox": [ + 480, + 945, + 519, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[35] Dushyant Mehta, Helge Rhodin, Dan Casas, Pascal Fua, Oleksandr Sotnychenko, Weipeng Xu, and Christian Theobalt. Monocular 3D human pose estimation in the wild using improved CNN supervision. In 3DV, 2017. 6", + "[36] Hao Meng, Sheng Jin, Wentao Liu, Chen Qian, Mengxiang Lin, Wanli Ouyang, and Ping Luo. 3D interacting hand pose estimation by hand de-occlusion and removal. In ECCV, 2022. 2", + "[37] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy Networks: Learning 3D reconstruction in function space. In CVPR, 2019. 2", + "[38] Gyeongsik Moon, Ju Yong Chang, and Kyoung Mu Lee. V2V- PoseNet: Voxel-to-voxel prediction network for accurate 3D hand and human pose estimation from a single depth map. In CVPR, 2018. 1, 2, 4", + "[39] Franziska Mueller, Florian Bernard, Oleksandr Sotnychenko, Dushyant Mehta, Srinath Sridhar, Dan Casas, and Christian Theobalt. Ganerated hands for real-time 3D hand tracking from monocular RGB. In CVPR, 2018. 2", + "[40] Franziska Mueller, Micah Davis, Florian Bernard, Oleksandr Sotnychenko, Micekal Verschooor, Miguel A Otaduy, Dan Casas, and Christian Theobalt. Real-time pose and shape reconstruction of two interacting hands with a single depth camera. TOG, 2019. 2", + "[41] Iason Oikonomidis, Nikolaos Kyriazis, and Antonis A Argyros. Full DOF tracking of a hand interacting with an object by modeling occlusions and physical constraints. In ICCV, 2011. 2", + "[42] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. DeepSDF: Learning continuous signed distance functions for shape representation. In CVPR, 2019. 2, 6", + "[43] Georgios Pavlakos, Xiaowei Zhou, and Kostas Daniilidis. Ordinal depth supervision for 3D human pose estimation. In CVPR, 2018. 5", + "[44] Georgios Pavlakos, Xiaowei Zhou, Konstantinos G Derpanis, and Kostas Daniilidis. Coarse-to-fine volumetric prediction for single-image 3D human pose. In CVPR, 2017. 4", + "[45] James M Rehg and Takeo Kanade. Visual tracking of high DOF articulated structures: an application to human hand tracking. In ECCV, 1994. 2", + "[46] Javier Romero, Dimitrios Tzionas, and Michael J. Black. Embodied Hands: Modeling and capturing hands and bodies together. TOG, 2017. 1, 2", + "[47] Shunsuke Saito, Zeng Huang, Ryota Natsume, Shigeo Morishima, Angjoo Kanazawa, and Hao Li. PiFu: Pixel-aligned implicit function for high-resolution clothed human digitization. In ICCV, 2019. 3, 4", + "[48] Adrian Spurr, Aneesh Dahiya, Xi Wang, Xuong Zhang, and Otmar Hilliges. Self-supervised 3D hand pose estimation from monocular RGB via contrastive learning. In ICCV, 2021. 2", + "[49] Srinath Sridhar, Franziska Mueller, Michael Zollhöfer, Dan Casas, Antti Oulasvirta, and Christian Theobalt. Real-time joint tracking of a hand manipulating an object from RGB-D input. In ECCV, 2016. 2" + ], + "bbox": [ + 78, + 90, + 470, + 898 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[50] Xiao Sun, Bin Xiao, Fangyin Wei, Shuang Liang, and Yichen Wei. Integral human pose regression. In ECCV, 2018. 2, 4", + "[51] Danhang Tang, Hyung Jin Chang, Alykhan Tejani, and Tae Kyun Kim. Latent regression forest: Structured estimation of 3D articulated hand posture. In CVPR, 2014. 1, 2", + "[52] Maxim Tatarchenko, Stephan R Richter, René Ranftl, Zhuwen Li, Vladlen Koltun, and Thomas Brox. What do single-view 3D reconstruction networks learn? In CVPR, 2019. 6", + "[53] Bugra Tekin, Federica Bogo, and Marc Pollefeys. $\\mathrm{H} + \\mathrm{O}$ : Unified egocentric recognition of 3D hand-object poses and interactions. In CVPR, 2019. 1, 2", + "[54] Tze Ho Elden Tse, Kwang In Kim, Ales Leonardis, and Hyung Jin Chang. Collaborative learning for hand and object reconstruction with attention-guided graph convolution. In CVPR, 2022. 1, 2", + "[55] Aggeliki Tsoli and Antonis A Argyros. Joint 3D tracking of a deformable object in interaction with a hand. In ECCV, 2018. 2", + "[56] Dimitrios Tzionas and Juergen Gall. 3D object reconstruction from hand-object interactions. In ICCV, 2015. 2", + "[57] Jiayi Wang, Franziska Mueller, Florian Bernard, Suzanne Sorli, Oleksandr Sotnychenko, Neng Qian, Miguel A Otaduy, Dan Casas, and Christian Theobalt. RGB2Hands: Real-time tracking of 3D hand interactions from monocular RGB video. TOG, 2020. 2", + "[58] Yangang Wang, Jianyuan Min, Jianjie Zhang, Yebin Liu, Feng Xu, Qionghai Dai, and Jinxiang Chai. Video-based hand manipulation capture through composite motion control. TOG, 2013. 2", + "[59] Fu Xiong, Boshen Zhang, Yang Xiao, Zhiguo Cao, Taidong Yu, Joey Tianyi Zhou, and Junsong Yuan. A2J: Anchor-to-joint regression network for 3D articulated pose estimation from a single depth image. In ICCV, 2019. 2", + "[60] Lixin Yang, Kailin Li, Xinyu Zhan, Jun Lv, Wenqiang Xu, Jiefeng Li, and Cewu Lu. ArtiBoost: Boosting articulated 3D hand-object pose estimation via online exploration and synthesis. In CVPR, 2022. 2, 6", + "[61] Lixin Yang, Kailin Li, Xinyu Zhan, Fei Wu, Anran Xu, Liu Liu, and Cewu Lu. OakInk: A large-scale knowledge repository for understanding hand-object interaction. In CVPR, 2022. 1, 2", + "[62] Lixin Yang, Xinyu Zhan, Kailin Li, Wenqiang Xu, Jiefeng Li, and Cewu Lu. CPF: Learning a contact potential field to model the hand-object interaction. In ICCV, 2021. 1, 2, 6", + "[63] Yufei Ye, Abhinav Gupta, and Shubham Tulsiani. What's in your hands? 3D reconstruction of generic objects in hands. In CVPR, 2022. 2, 3, 4, 6, 8", + "[64] Frank Yu, Mathieu Salzmann, Pascal Fua, and Helge Rhodin. PCLs: Geometry-aware neural reconstruction of 3D pose with perspective crop layers. In CVPR, 2021. 6", + "[65] Shanxin Yuan, Guillermo Garcia-Hernando, Björn Stenger, Gyeongsik Moon, Ju Yong Chang, Kyoung Mu Lee, Pavlo Molchanov, Jan Kautz, Sina Honari, Liuhao Ge, Junsong Yuan, Xinghao Chen, Guijin Wang, Fan Yang, Kai Akiyama, Yang Wu, Qingfu Wan, Meysam Madadi, Sergio Escalera, Shile Li, Dongheui Lee, Iason Oikonomidis, Antonis Argyros, and Tae-Kyun Kim. Depth-based 3D hand pose estimation:" + ], + "bbox": [ + 501, + 92, + 893, + 898 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "12899", + "bbox": [ + 480, + 945, + 519, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "From current achievements to future goals. In CVPR, June 2018. 2", + "[66] Yi Zhou, Connelly Barnes, Jingwan Lu, Jimei Yang, and Hao Li. On the continuity of rotation representations in neural networks. In CVPR, 2019. 3, 4", + "[67] Christian Zimmermann and Thomas Brox. Learning to estimate 3D hand pose from single RGB images. In ICCV, 2017. 1, 2" + ], + "bbox": [ + 78, + 90, + 468, + 204 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "12900", + "bbox": [ + 480, + 945, + 517, + 955 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/2023/gSDF_ Geometry-Driven Signed Distance Functions for 3D Hand-Object Reconstruction/ab15db8d-1011-4beb-9528-c9481ccf45d6_model.json b/2023/gSDF_ Geometry-Driven Signed Distance Functions for 3D Hand-Object Reconstruction/ab15db8d-1011-4beb-9528-c9481ccf45d6_model.json new file mode 100644 index 0000000000000000000000000000000000000000..1bde44288411186ff4b20e6e7835aaee26d4f2a1 --- /dev/null +++ b/2023/gSDF_ Geometry-Driven Signed Distance Functions for 3D Hand-Object Reconstruction/ab15db8d-1011-4beb-9528-c9481ccf45d6_model.json @@ -0,0 +1,2983 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.808, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.224, + 0.131, + 0.748, + 0.175 + ], + "angle": 0, + "content": "gSDF: Geometry-Driven Signed Distance Functions for 3D Hand-Object Reconstruction" + }, + { + "type": "text", + "bbox": [ + 0.21, + 0.205, + 0.304, + 0.221 + ], + "angle": 0, + "content": "Zerui Chen" + }, + { + "type": "text", + "bbox": [ + 0.344, + 0.205, + 0.446, + 0.22 + ], + "angle": 0, + "content": "Shizhe Chen" + }, + { + "type": "text", + "bbox": [ + 0.487, + 0.205, + 0.622, + 0.22 + ], + "angle": 0, + "content": "Cordelia Schmid" + }, + { + "type": "text", + "bbox": [ + 0.663, + 0.205, + 0.759, + 0.222 + ], + "angle": 0, + "content": "Ivan Laptev" + }, + { + "type": "text", + "bbox": [ + 0.163, + 0.223, + 0.807, + 0.24 + ], + "angle": 0, + "content": "Inria, École normale supérieure, CNRS, PSL Research Univ., 75005 Paris, France" + }, + { + "type": "text", + "bbox": [ + 0.362, + 0.244, + 0.603, + 0.256 + ], + "angle": 0, + "content": "firstname.lastname@inria.fr" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.293, + 0.313, + 0.309 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.326, + 0.474, + 0.599 + ], + "angle": 0, + "content": "Signed distance functions (SDFs) is an attractive framework that has recently shown promising results for 3D shape reconstruction from images. SDFs seamlessly generalize to different shape resolutions and topologies but lack explicit modelling of the underlying 3D geometry. In this work, we exploit the hand structure and use it as guidance for SDF-based shape reconstruction. In particular, we address reconstruction of hands and manipulated objects from monocular RGB images. To this end, we estimate poses of hands and objects and use them to guide 3D reconstruction. More specifically, we predict kinematic chains of pose transformations and align SDFs with highly-articulated hand poses. We improve the visual features of 3D points with geometry alignment and further leverage temporal information to enhance the robustness to occlusion and motion blurs. We conduct extensive experiments on the challenging ObMan and DexYCB benchmarks and demonstrate significant improvements of the proposed method over the state of the art." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.631, + 0.21, + 0.647 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.658, + 0.471, + 0.779 + ], + "angle": 0, + "content": "Understanding how hands interact with objects is becoming increasingly important for widespread applications, including virtual reality, robotic manipulation and human-computer interaction. Compared to 3D estimation of sparse hand joints [24,38,51,53,67], joint reconstruction of hands and object meshes [11, 18, 21, 26, 62] provides rich information about hand-object interactions and has received increased attention in recent years." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.78, + 0.472, + 0.902 + ], + "angle": 0, + "content": "To reconstruct high-quality meshes, some recent works [9, 17, 61] explore multi-view image inputs. Multi-view images, however, are less common both for training and testing scenarios. In this work, we focus on a more practical and user-friendly setting where we aim to reconstruct hand and object meshes from monocular RGB images. Given the ill-posed nature of the task, many existing methods [7, 19, 21, 54, 62] employ parametric mesh models (e.g., MANO [46]) to im" + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.296, + 0.637, + 0.371 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.523, + 0.371, + 0.587, + 0.387 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.523, + 0.393, + 0.554, + 0.404 + ], + "angle": 0, + "content": "gSDF" + }, + { + "type": "image", + "bbox": [ + 0.535, + 0.402, + 0.629, + 0.475 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.533, + 0.475, + 0.585, + 0.491 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.493, + 0.572, + 0.566 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.577, + 0.505, + 0.631, + 0.556 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.638, + 0.298, + 0.764, + 0.371 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.687, + 0.371, + 0.718, + 0.388 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.657, + 0.394, + 0.687, + 0.405 + ], + "angle": 0, + "content": "gSDF" + }, + { + "type": "image", + "bbox": [ + 0.677, + 0.404, + 0.759, + 0.474 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.677, + 0.474, + 0.718, + 0.491 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.645, + 0.492, + 0.703, + 0.565 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.765, + 0.298, + 0.891, + 0.371 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.785, + 0.371, + 0.843, + 0.388 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.784, + 0.395, + 0.812, + 0.405 + ], + "angle": 0, + "content": "gSDF" + }, + { + "type": "image", + "bbox": [ + 0.795, + 0.415, + 0.868, + 0.455 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.808, + 0.475, + 0.844, + 0.491 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.765, + 0.492, + 0.83, + 0.566 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.835, + 0.508, + 0.884, + 0.551 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.584, + 0.895, + 0.697 + ], + "angle": 0, + "content": "Figure 1. We aim to reconstruct 3D hand and object meshes from monocular images (top). Our method gSDF (middle) first predicts 3D hand joints (blue) and object locations (red) from input images. We use estimated hand poses and object locations to incorporate strong geometric priors into SDF by generating hand- and object-aware kinematic features for each SDF query point. Our resulting gSDF model generates accurate results for real images with various objects and grasping hand poses (bottom)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.717, + 0.894, + 0.778 + ], + "angle": 0, + "content": "pose prior knowledge and reduce ambiguities in 3D hand reconstruction. MANO hand meshes, however, have relatively limited resolution and can be suboptimal for the precise capture of hand-object interactions." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.78, + 0.895, + 0.902 + ], + "angle": 0, + "content": "To reconstruct detailed hand and object meshes, another line of efforts [11, 26] employ signed distance functions (SDFs). Grasping Field [26] makes the first attempt to model hand and object surfaces using SDFs. However, it does not explicitly associate 3D geometry with image cues and has no prior knowledge incorporated in SDFs, leading to unrealistic meshes. AlignSDF [11] proposes to align SDFs with respect to global poses (i.e., the hand wrist transformation and the" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.958 + ], + "angle": 0, + "content": "12890" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.152 + ], + "angle": 0, + "content": "object translation) and produces improved results. However, it is still challenging to capture geometric details for more complex hand motions and manipulations of diverse objects, which involve the articulation of multiple fingers." + }, + { + "type": "text", + "bbox": [ + 0.08, + 0.153, + 0.47, + 0.378 + ], + "angle": 0, + "content": "To address limitations of prior works, we propose a geometry-driven SDF (gSDF) method that encodes strong pose priors and improves reconstruction by disentangling pose and shape estimation (see Figure 1). To this end, we first predict sparse 3D hand joints from images and derive full kinematic chains of local pose transformations from joint locations using inverse kinematics. Instead of only using the global pose as in [11], we optimize SDFs with respect to poses of all the hand joints, which leads to a more fine-grained alignment between the 3D shape and articulated hand poses. In addition, we project 3D points onto the image plane to extract geometry-aligned visual features for signed distance prediction. The visual features are further refined with spatio-temporal contexts using a transformer model to enhance the robustness to occlusions and motion blurs." + }, + { + "type": "text", + "bbox": [ + 0.08, + 0.379, + 0.47, + 0.59 + ], + "angle": 0, + "content": "We conduct extensive ablation experiments to show the effectiveness of different components in our approach. The proposed gSDF model greatly advances state-of-the-art accuracy on the challenging ObMan and DexYCB benchmarks. Our contributions can be summarized in three-fold: (i) To embed strong pose priors into SDFs, we propose to align the SDF shape with its underlying kinematic chains of pose transformations, which reduces ambiguities in 3D reconstruction. (ii) To further reduce the misalignment induced by inaccurate pose estimations, we propose to extract geometry-aligned local visual features and enhance the robustness with spatio-temporal contexts. (iii) We conduct comprehensive experiments to show that our approach outperforms state-of-the-art results by a significant margin." + }, + { + "type": "title", + "bbox": [ + 0.08, + 0.604, + 0.218, + 0.62 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.08, + 0.629, + 0.47, + 0.704 + ], + "angle": 0, + "content": "This paper focuses on jointly reconstructing hands and hand-held objects from RGB images. In this section, we first review previous works on the 3D hand pose and shape estimation. We then discuss relevant works on the joint reconstruction of hands and objects." + }, + { + "type": "text", + "bbox": [ + 0.08, + 0.705, + 0.47, + 0.9 + ], + "angle": 0, + "content": "3D hand pose and shape estimation. The topic of 3D hand pose estimation has received widespread attention since the 90s [23, 45] and has seen significant progress in recent years [31, 65]. Methods which take RGB images as input [24, 36, 38, 39, 48, 50, 51, 53, 59, 67] often estimate sparse 3D hand joint locations from visual data using well-designed deep neural networks. Though these methods can achieve high estimation accuracy, their outputs of 3D sparse joints provide limited information about the 3D hand surface, which is critical in AR/VR applications. Following the introduction of the anthropomorphic parametric hand mesh model MANO [46], several works [2, 5, 10, 18, 29, 30, 32, 34, 40, 57] estimate the MANO hand shape and pose parameters to" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.092, + 0.894, + 0.242 + ], + "angle": 0, + "content": "recover the full hand surface. However, MANO has a limited mesh resolution and cannot produce fine surface details. Neural implicit functions [13,25] have the potential to reconstruct more realistic high resolution hand surfaces [12,37,42]. In this work, we combine the advantages of sparse, parametric and implicit modelling. We predict sparse 3D joints accurately from images and estimate the MANO parameters using inverse kinematics. We then optimize neural implicit functions with respect to underlying kinematic structures and reconstruct realistic meshes." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.251, + 0.894, + 0.901 + ], + "angle": 0, + "content": "3D hand and object reconstruction. Joint reconstruction of hand and object meshes provides a more comprehensive view about how hands interact with manipulated objects in the 3D space and has received more attention in the past few years. Previous works often rely on multiview correspondence [3,9,17,41,58,61] or additional depth information [15, 16, 49, 55, 56] to approach this task. In this work, we focus on a more challenging setting and perform a joint reconstruction from monocular RGB images. Given the ill-posed nature of this problem, many works [7, 18-21, 54, 60, 62] deploy MANO, which encodes hand prior knowledge learned from hand scans, to reconstruct hand meshes. To further simplify the object reconstruction task, several works [18, 60, 62] make a strong assumption that the ground-truth object model is available at test time. Our work and some previous efforts [11, 21, 26] relax this assumption and assume unknown object models. Hasson et al. [21] employ a differentiable MANO layer to estimate the hand shape and AtlasNet [14] to reconstruct the manipulated object. However, both MANO and AtlasNet can only produce meshes of limited resolution, which prevents the modelling of detailed contacts between hands and objects. To generate more detailed surfaces, Karunratanakul et al. [26] introduce grasping fields and propose to use SDFs to reconstruct both hand and object meshes. However, such a model-free approach does not capture any prior knowledge about hands or objects, which can lead to predicting unrealistic 3D geometry. To mitigate this, Ye et al. [63] propose to use hand poses estimated from an off-the-shelf model to help reconstruct the hand-held object mesh. The main difference with our work is that we jointly reconstruct hand meshes and object meshes using our proposed model, which is more challenging. Also, in addition to using hand poses to help capture the object shapes, we predict object poses and show their benefits for SDF-based object reconstruction. Another work AlignSDF [11] optimizes SDFs with respect to estimated hand-object global poses and encodes pose priors into SDFs. In addition to using global poses as a guide for SDFs, we propose to learn SDFs from the full kinematic chains of local pose transformations, and achieve a more precise alignment between the 3D shape and the underlying poses. To further handle hard cases induced by occlusion or motion blur where pose estimations are inaccurate, we leverage" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.946, + 0.518, + 0.957 + ], + "angle": 0, + "content": "12891" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.081, + 0.092, + 0.895, + 0.306 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.318, + 0.895, + 0.348 + ], + "angle": 0, + "content": "Figure 2. The overview of our proposed single-frame model. Our method reconstructs realistic hand and object meshes from a single RGB image. Marching Cubes algorithm [33] is used at test time to extract meshes." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.369, + 0.471, + 0.401 + ], + "angle": 0, + "content": "a transformer to accumulate corresponding image features from multiple frames and benefit the geometry recovery." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.415, + 0.17, + 0.431 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.441, + 0.473, + 0.579 + ], + "angle": 0, + "content": "This section presents our geometry-driven SDF (gSDF) method for 3D hand and object reconstruction from monocular RGB images. We aim to learn two signed distance functions \\(\\mathrm{SDF}_{hand}\\) and \\(\\mathrm{SDF}_{obj}\\) to implicitly represent 3D shapes for the hand and the object. The \\(\\mathrm{SDF}_{hand}\\) and \\(\\mathrm{SDF}_{obj}\\) map a query 3D point \\(x\\in \\mathbb{R}^3\\) to a signed distance from the hand surface and object surface, respectively. The Marching Cubes algorithm [33] can thus be employed to reconstruct the hand and the object from \\(\\mathrm{SDF}_{hand}\\) and \\(\\mathrm{SDF}_{obj}\\)." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.589, + 0.254, + 0.605 + ], + "angle": 0, + "content": "3.1. Overview of gSDF" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.613, + 0.471, + 0.673 + ], + "angle": 0, + "content": "Figure 2 illustrates the overview of our gSDF reconstruction approach. Given an image \\(I_{t}\\), we extract two types of features to predict the signed distance for each query point \\(x\\), namely kinematic features and visual features." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.673, + 0.471, + 0.749 + ], + "angle": 0, + "content": "The kinematic feature encodes the position of \\( x \\) under the coordinate system of the hand or the object, which can provide strong pose priors to assist SDF learning. Since the feature is based on canonical hand and object poses, it helps to disentangles shape learning from pose learning." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.75, + 0.473, + 0.902 + ], + "angle": 0, + "content": "The existing work [63] proposes to use hand poses for reconstructing object meshes but does not consider using pose priors to reconstruct hand meshes. Another work [11] only deploys coarse geometry in terms of the hand wrist object locations, which fails to capture fine-grained details. In this work, we aim to strengthen the kinematic feature with geometry transformation of \\( x \\) to poses of all the hand joints (see Figure 3) for both the hand and the object reconstruction. However, it is challenging to directly predict hand pose parameters [6,28,66]. To improve the hand pose estimation," + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.369, + 0.895, + 0.446 + ], + "angle": 0, + "content": "we propose to first predict sparse 3D joint locations \\( j_{h} \\) from the image and then use inverse kinematics to derive pose transformations \\( \\theta_{h} \\) from the predicted joints. In this way, we are able to obtain kinematic features \\( e_{h} \\) and \\( e_{o} \\) for the hand and the object respectively." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.451, + 0.897, + 0.678 + ], + "angle": 0, + "content": "The visual feature encodes the visual appearance for the point \\( x \\) to provide more shape details. Prior works [11, 26] use the same global visual feature for all the points, e.g., averaging the feature map of a SDF feature encoder on the spatial dimension. Such global visual features suffers from imprecise geometry alignment between a point and its visual appearance. To alleviate the limitation, inspired by [47], we apply the geometry transformation to extract aligned local visual features. Moreover, to address hard cases with occlusions and motion blur in a single image \\( I_{t} \\), we propose to enhance the local visual feature with its temporal contexts from videos using a spatio-temporal transformer. We denote the local visual feature of a point as \\( e_v \\). Finally, we concatenate the kinematic feature and local visual feature to predict the signed distance for \\( x \\):" + }, + { + "type": "equation", + "bbox": [ + 0.597, + 0.701, + 0.892, + 0.737 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\begin{array}{l} \\mathrm {S D F} _ {\\text {h a n d}} (x) = f _ {h} ([ e _ {v}; e _ {h} ]), \\\\ \\mathrm {S D F} _ {\\text {e x p}} (x) = f _ {h} ([ e _ {v}; e _ {h} ]) \\end{array} \\tag {1} \\\\ \\mathrm {S D F} _ {\\text {o b j e c t}} (x) = f _ {o} ([ e _ {v}; e _ {o} ]), \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.759, + 0.895, + 0.79 + ], + "angle": 0, + "content": "where \\( f_{h} \\) and \\( f_{o} \\) are the hand SDF decoder and the object SDF decoder respectively." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.796, + 0.897, + 0.9 + ], + "angle": 0, + "content": "In the following, we first present the proposed geometry-driven kinematic feature and visual feature encodings in Section 3.2 and 3.3 respectively. Then, in Section 3.4 we introduce different strategies of sharing image backbones for hand and object pose predictors as well as the SDF feature encoder. Finally, the training strategy of our model is described in Section 3.5." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.958 + ], + "angle": 0, + "content": "12892" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.082, + 0.089, + 0.275, + 0.192 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.289, + 0.089, + 0.465, + 0.192 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.081, + 0.194, + 0.274, + 0.31 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.28, + 0.194, + 0.465, + 0.31 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.323, + 0.471, + 0.38 + ], + "angle": 0, + "content": "Figure 3. We define hand and object features by transforming queries \\( x \\) into hand- and object-centered coordinate systems. Compared to AlignSDF [11] (left), each hand joint in our method defines its own coordinate frame." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.413, + 0.336, + 0.43 + ], + "angle": 0, + "content": "3.2. Kinematic Feature Encoding" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.44, + 0.473, + 0.728 + ], + "angle": 0, + "content": "Hand and object pose estimation. Directly regressing hand pose parameters of MANO from image features [11, 19, 21] has proved to be difficult [6, 28, 66]. In contrast, predicting sparse 3D joint locations is easier and can achieve higher accuracy. Therefore, we first train a 3D hand joint prediction model which produces volumetric heatmaps [38, 44] for 21 hand joints. We use a differentiable soft-argmax operator [50] to extract 3D coordinates \\(\\psi_h \\in \\mathbb{R}^{21 \\times 3}\\) of hand joints from the heatmaps. We then obtain an analytic solution for hand poses \\(\\theta_h \\in \\mathbb{R}^{16 \\times 3}, \\phi_h \\in \\mathbb{R}^{16 \\times 3}\\) from estimated 3D joints \\(\\psi_h\\) using inverse kinematics, where each \\(\\theta_{h,i} \\in \\mathbb{R}^3\\) and \\(\\theta_{h,i} \\in \\mathbb{R}^3\\) denote the relative pose of \\(i_{th}\\) joint in terms of rotation and translation with respect to its ancestor joint. Here, we only calculate the rotation and use the default limb lengths provided by the MANO model. Specifically, we first compute the pose of the hand wrist using the template pose defined in MANO, and then follow the hand kinematic chain to solve the pose of other finger joints recursively. More details are presented in the supplementary material." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.731, + 0.471, + 0.807 + ], + "angle": 0, + "content": "For the object pose estimation, it is often difficult to accurately estimate the rotation of the object since many objects have a high degree of symmetry and are often occluded by hands. We therefore follow [11] and only estimate the center position of the object \\(\\psi_{o} \\in \\mathbb{R}^{3}\\) relative to the hand wrist." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.81, + 0.473, + 0.903 + ], + "angle": 0, + "content": "Hand kinematic feature. Given the 3D point \\(x\\), we generate the hand kinematic feature \\(e_h \\in \\mathbb{R}^{51}\\) by transforming \\(x\\) into canonical coordinate frames defined by hand joints. Figure 3(top,right) illustrates the proposed geometry transformation for the hand. For the \\(i_{th}\\) hand joint pose \\(\\theta_{h,i}, \\phi_{h,i}\\), the pose transformation \\(T_p(x, \\theta_{h,i}, \\phi_{h,i})\\) to obtain the local" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.091, + 0.808, + 0.107 + ], + "angle": 0, + "content": "hand kinematic feature \\(e_{h,i}\\in \\mathbb{R}^3\\) is defined as" + }, + { + "type": "equation", + "bbox": [ + 0.55, + 0.118, + 0.892, + 0.159 + ], + "angle": 0, + "content": "\\[\nG _ {h, i} = \\prod_ {j \\in A (i)} \\left[ \\begin{array}{c c} \\exp (\\theta_ {h, j}) & \\phi_ {h, j} \\\\ \\hline 0 & 1 \\end{array} \\right], \\tag {2}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.551, + 0.162, + 0.841, + 0.182 + ], + "angle": 0, + "content": "\\[\ne _ {h, i} = T _ {p} (x, \\theta_ {h, i}, \\phi_ {h, i}) = \\widetilde {H} (G _ {h, i} ^ {- 1} \\cdot H (x)),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.193, + 0.895, + 0.346 + ], + "angle": 0, + "content": "where \\(A(i)\\) denotes the ordered set of ancestors of the \\(i_{th}\\) joint. We use Rodrigues formula \\(\\exp (\\cdot)\\) to convert \\(\\theta_{h,i}\\) into the form of a rotation matrix. By traversing the hand kinematic chain, we obtain the global transformation \\(G_{h,i}\\in \\mathbb{R}^{4\\times 4}\\) for the \\(i_{th}\\) joint. Then, we take the inverse of \\(G_{h,i}\\) to transform \\(x\\) into the \\(i_{th}\\) hand joint canonical coordinates. \\(H(\\cdot)\\) transforms \\(x\\) into homogeneous coordinates while \\(\\widetilde{H} (\\cdot)\\) transforms homogeneous coordinates back to Euclidean coordinates. Given local kinematic features \\(e_{h,i}\\), the hand kinematic feature \\(e_h\\in \\mathbb{R}^{51}\\) is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.61, + 0.359, + 0.892, + 0.378 + ], + "angle": 0, + "content": "\\[\ne _ {h} = \\left[ x, e _ {h, 1}, \\dots , e _ {h, 1 6} \\right]. \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.39, + 0.895, + 0.542 + ], + "angle": 0, + "content": "Object kinematic feature. To obtain geometry-aware SDF for object reconstruction, we propose object kinematic feature \\( e_{o} \\in \\mathbb{R}^{72} \\). Following [11], we use estimated object center \\( \\psi_{o} \\) to transform \\( x \\) into the object canonical coordinate frame by the translation transformation \\( x_{oc} = T_t(x, \\psi_o) = x - \\psi_o \\). As the grasping hand pose also gives hints about the shape of the manipulated object, similar to [63] we incorporate the knowledge of hand poses into object reconstruction. To this end, for each joint \\( i \\) and its estimated 3D location \\( \\psi_{h,i} \\), we transform \\( x \\) by translation as" + }, + { + "type": "equation", + "bbox": [ + 0.594, + 0.554, + 0.892, + 0.571 + ], + "angle": 0, + "content": "\\[\ne _ {o, i} = T _ {t} \\left(x, \\psi_ {h, i}\\right) = x - \\psi_ {j, i}. \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.582, + 0.894, + 0.673 + ], + "angle": 0, + "content": "Given the importance of the wrist motion for object grasping, we also transform \\( x \\) into the canonical coordinate system of the hand wrist \\( x_{ow} = T_p(x,\\theta_{h,1},\\phi_{h,1}) = \\widetilde{H}(G_{h,1}^{-1}\\cdot H(x)) \\), which normalizes the orientation of the grasping and further simplifies the task for the SDF object decoder. The object kinematic feature is then defined by \\( e_o\\in \\mathbb{R}^{72} \\) as" + }, + { + "type": "equation", + "bbox": [ + 0.58, + 0.687, + 0.892, + 0.704 + ], + "angle": 0, + "content": "\\[\ne _ {o} = \\left[ x, x _ {o c}, e _ {o, 1}, \\dots , e _ {o, 2 1}, x _ {o w} \\right]. \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.717, + 0.892, + 0.747 + ], + "angle": 0, + "content": "Figure 3(bottom,right) illustrates the proposed geometry transformation for the object kinematic feature." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.757, + 0.727, + 0.773 + ], + "angle": 0, + "content": "3.3. Visual Feature Encoding" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.78, + 0.895, + 0.902 + ], + "angle": 0, + "content": "Geometry-aligned visual feature. Previous works [11, 26] typically predict signed distances from global image features that lack spatial resolution. Motivated by [47], we aim to generate geometry-aligned local image features for each input point \\( x \\). Assume \\( v_{t}^{r} \\in \\mathbb{R}^{16 \\times 16 \\times d} \\) is the feature map generated from the SDF feature encoder, e.g. a ResNet model [22], where \\( 16 \\times 16 \\) is the spatial feature resolution and \\( d \\) is the feature dimension. We project the 3D input point \\( x \\) to \\( \\hat{x} \\)" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "12893" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.082, + 0.091, + 0.465, + 0.167 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.218, + 0.172, + 0.329, + 0.184 + ], + "angle": 0, + "content": "(a) Single backbone." + }, + { + "type": "image", + "bbox": [ + 0.082, + 0.187, + 0.465, + 0.264 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.207, + 0.268, + 0.34, + 0.279 + ], + "angle": 0, + "content": "(b) Symmetric backbone." + }, + { + "type": "image", + "bbox": [ + 0.082, + 0.282, + 0.466, + 0.359 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.204, + 0.364, + 0.343, + 0.375 + ], + "angle": 0, + "content": "(c) Asymmetric backbone." + }, + { + "type": "image_caption", + "bbox": [ + 0.078, + 0.388, + 0.468, + 0.403 + ], + "angle": 0, + "content": "Figure 4. Illustrations of three image backbone sharing strategies." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.428, + 0.469, + 0.473 + ], + "angle": 0, + "content": "on the image plane with the camera projection matrix and use bilinear sampling to obtain a local feature \\(e_v\\) from the location on the feature map corresponding to \\(\\hat{x}\\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.476, + 0.47, + 0.763 + ], + "angle": 0, + "content": "Temporally-enhanced visual feature. To improve the robustness of visual features in a single frame \\( I_{t} \\) from occlusion or motion blur, we propose to exploit temporal information from videos to refine \\( v_{t}^{r} \\). Note that due to non-rigid hand motions, we do not assume video frames to contain different views of the same rigid scene. We make use of the spatial-temporal transformer architecture [1,4] to efficiently propagate image features across frames. Assume \\( v_{t-1}^{r}, \\dots, v_{t+1}^{r} \\) are the feature maps from neighboring frames of \\( I_{t} \\) in a video. We flatten all the feature maps as a sequence in the spatial-temporal dimension leading to \\( 3 \\times 16 \\times 16 \\) tokens fed into the transformer model. We reshape the output features of the transformer into a feature map again for \\( I_{t} \\), denoted as \\( v_{t} \\in \\mathbb{R}^{16 \\times 16 \\times d} \\). By aggregating spatial and temporal information from multiple frames, \\( v_{t} \\) becomes more robust to the noise and can potentially produce more stable reconstruction results compared to \\( v_{t}^{r} \\). Our full gSDF model relies on the feature map \\( v_{t} \\) to compute the local visual feature \\( e_{v} \\) for the given input point \\( x \\)." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.772, + 0.376, + 0.789 + ], + "angle": 0, + "content": "3.4. Image Backbone Sharing Strategy" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.795, + 0.47, + 0.885 + ], + "angle": 0, + "content": "As shown in Figure 2, our model contains three branches for hand and object pose estimations as well as for SDF feature encoding. These different branches may share image backbones which might be beneficial with the multi-task learning. In this section, we describe three alternative strategies for sharing image backbones in our model." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.886, + 0.469, + 0.902 + ], + "angle": 0, + "content": "Single image backbone (Figure 4a). We only employ one" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.092, + 0.892, + 0.121 + ], + "angle": 0, + "content": "single image backbone for both pose and shape predictions. This is the strategy used in AlignSDF [11]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.122, + 0.892, + 0.182 + ], + "angle": 0, + "content": "Symmetric image backbone (Figure 4b). To disentangle pose and shape learning, we share the image backbone for hand and object pose estimation, but use a different backbone to extract visual features for SDFs learning." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.183, + 0.894, + 0.243 + ], + "angle": 0, + "content": "Asymmetric image backbone (Figure 4c). Since hand pose estimation plays a critical role in the task, we use a separate backbone to predict the hand pose, while share the image backbone for object pose predictor and SDF feature encoder." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.252, + 0.604, + 0.268 + ], + "angle": 0, + "content": "3.5. Training" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.275, + 0.894, + 0.367 + ], + "angle": 0, + "content": "We apply a two-stage training strategy. In the first stage, we train the hand pose predictor to predict hand joint coordinates \\(\\psi_h\\) with \\(\\ell 2\\) loss \\(\\mathcal{L}_{hp}\\) and an ordinal loss [43] \\(\\mathcal{L}_{ord}\\) to penalize the case if the predicted depth order between the \\(i_{th}\\) joint and the \\(j_{th}\\) joint is misaligned with the ground-truth relation \\(\\mathbb{1}_{i,j}^{ord}\\), which are:" + }, + { + "type": "equation", + "bbox": [ + 0.592, + 0.378, + 0.892, + 0.419 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {h p} = \\frac {1}{2 1} \\sum_ {i = 1} ^ {2 1} \\left\\| \\psi_ {h, i} - \\hat {\\psi} _ {h, i} \\right\\| _ {2} ^ {2}, \\tag {6}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.552, + 0.431, + 0.892, + 0.473 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {o r d} = \\sum_ {j = 2} ^ {2 1} \\sum_ {i = 1} ^ {j - 1} \\mathbb {1} _ {i, j} ^ {o r d} \\times \\left| \\left(\\psi_ {h, i} - \\psi_ {h, j}\\right) \\cdot \\vec {n} \\right|, \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.48, + 0.892, + 0.57 + ], + "angle": 0, + "content": "where \\(\\vec{n} \\in \\mathbb{R}^3\\) denotes the viewpoint direction. We randomly sample twenty virtual views to optimize \\(\\mathcal{L}_{ord}\\). Since the proposed kinematic features are based on the predicted hand joints \\(\\psi_h\\), we empirically find that pretraining the hand joint predictor in the first stage and then freezing its weights can achieve better performance." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.571, + 0.892, + 0.617 + ], + "angle": 0, + "content": "In the second training stage, we learn all the modules except the hand joint predictor in an end-to-end manner. We use the \\(\\ell 2\\) loss \\(\\mathcal{L}_{op}\\) to predict the object pose \\(\\psi_{o}\\) as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.629, + 0.627, + 0.892, + 0.656 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {o p} = \\left\\| \\psi_ {o} - \\hat {\\psi} _ {o} \\right\\| _ {2} ^ {2} \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.668, + 0.894, + 0.743 + ], + "angle": 0, + "content": "where \\(\\hat{\\psi}_o\\) denote the ground-truth location for the object center. To train the SDFs, we sample many 3D points around the hand-object surface and calculate their ground-truth signed distances to the hand mesh and the object mesh. We use \\(\\ell 1\\) loss to optimize the SDF decoders:" + }, + { + "type": "equation", + "bbox": [ + 0.575, + 0.754, + 0.892, + 0.789 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {h s d f} = \\left\\| \\mathrm {S D F} _ {h a n d} - \\mathrm {S D F} _ {h a n d} \\right\\| _ {1} ^ {1}, \\tag {9}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.579, + 0.786, + 0.792, + 0.814 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {o s d f} = \\left\\| \\mathrm {S D F} _ {o b j} - \\hat {\\mathrm {S D F}} _ {o b j} \\right\\| _ {1} ^ {1},\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.825, + 0.892, + 0.871 + ], + "angle": 0, + "content": "where \\(\\hat{\\mathrm{SDF}}_{hand}\\) and \\(\\hat{\\mathrm{SDF}}_{obj}\\) denote ground-truth signed distances to the hand and the object, respectively. The overall training objective \\(\\mathcal{L}_{shape}\\) in the second training stage is:" + }, + { + "type": "equation", + "bbox": [ + 0.534, + 0.884, + 0.892, + 0.901 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {s h a p e}} = \\mathcal {L} _ {\\text {o p}} + 0. 5 \\times \\mathcal {L} _ {\\text {h s d f}} + 0. 5 \\times \\mathcal {L} _ {\\text {o s d f}}. \\tag {10}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "12894" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.077, + 0.09, + 0.21, + 0.108 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.116, + 0.471, + 0.162 + ], + "angle": 0, + "content": "We conduct extensive experiments on two 3D hand-object reconstruction benchmarks to evaluate the effectiveness of our proposed gSDF model." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.173, + 0.18, + 0.187 + ], + "angle": 0, + "content": "4.1. Datasets" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.197, + 0.47, + 0.363 + ], + "angle": 0, + "content": "ObMan [21] is a large-scale synthetic dataset that contains diverse hand grasping poses on a wide range of objects imported from ShapeNet [8]. We follow previous methods [11,26,42,63] to generate data for SDFs training. First, we remove meshes that contain too many double-sided triangles, which results in 87,190 hand-object meshes. Then, we fit the hand-object mesh into a unit cube and sample 40,000 points inside the cube. For each sampled point, we compute its signed distance to the ground-truth hand mesh and object mesh, respectively. At test time, we report the performance on the whole ObMan test set of 6,285 testing samples." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.364, + 0.471, + 0.471 + ], + "angle": 0, + "content": "DexYCB [9] is currently the largest real dataset that captures hand and object interactions in videos. Following [11,60], we focus on right-hand samples and use the official s0 split. We follow the same steps as in ObMan to obtain SDF training samples. To reduce the temporal redundancy, we downsample the video data to 6 frames per second, which results in 29,656 training samples and 5,928 testing samples." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.481, + 0.26, + 0.496 + ], + "angle": 0, + "content": "4.2. Evaluation metrics" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.505, + 0.469, + 0.534 + ], + "angle": 0, + "content": "We follow prior works to comprehensively evaluate the 3D reconstructions with multiple metrics as below." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.536, + 0.471, + 0.656 + ], + "angle": 0, + "content": "Hand Chamfer Distance \\((\\mathrm{CD_h})\\). We evaluate Chamfer distance \\((\\mathrm{cm}^2)\\) between our reconstructed hand mesh and the ground-truth hand mesh. We follow previous works [11, 26] to optimize the scale and translation to align the reconstructed mesh with the ground truth and sample 30,000 points on both meshes to compute Chamfer distance. We report the median Chamfer distance on the test set to reflect the quality of our reconstructed hand mesh." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.657, + 0.469, + 0.732 + ], + "angle": 0, + "content": "Hand F-score \\((\\mathbf{FS_h})\\). Since Chamfer distance is vulnerable to outliers [52, 63], we also report the F-score to evaluate the predicted hand mesh. After aligning the hand mesh with its ground truth, we report F-score at \\(1\\mathrm{mm}\\) \\((\\mathrm{FS_h}@\\mathrm{l})\\) and \\(5\\mathrm{mm}\\) \\((\\mathrm{FS_h}@\\mathrm{5})\\) thresholds." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.734, + 0.469, + 0.809 + ], + "angle": 0, + "content": "Object Chamfer Distance \\((\\mathrm{CD_o})\\). Following [11, 26], we first use the optimized hand scale and translation to transform the reconstructed object mesh. Then, we follow the same process as \\(\\mathrm{CD_h}\\) to compute \\(\\mathrm{CD_o}\\) \\((\\mathrm{cm}^2)\\) and evaluate the quality of our reconstructed object mesh." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.81, + 0.469, + 0.855 + ], + "angle": 0, + "content": "Object F-score \\((\\mathrm{FS_o})\\) .We follow the previous work [63] to evaluate the reconstructed object mesh using F-score at 5 mm \\(\\mathrm{(FS_o@5)}\\) and \\(10\\mathrm{mm}\\) \\(\\mathrm{(FS_o@10)}\\) thresholds." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.856, + 0.471, + 0.901 + ], + "angle": 0, + "content": "Hand Joint Error \\((\\mathbf{E_h})\\). To measure the hand pose estimation accuracy, we compute the mean joint error (cm) relative to the hand wrist over all 21 joints in the form of \\(\\ell 2\\) distance." + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.089, + 0.894, + 0.117 + ], + "angle": 0, + "content": "Table 1. Hand reconstruction performance with different hand kinematic features \\(\\mathbf{K}_{*}^{h}\\) and visual feature \\(\\mathrm{V}_1\\) on DexYCB dataset." + }, + { + "type": "table", + "bbox": [ + 0.505, + 0.118, + 0.885, + 0.197 + ], + "angle": 0, + "content": "
Wrist onlyAll jointsCDh↓FSh@1↑FSh@5↑
K1h××0.3640.1540.764
K2h×0.3440.1670.776
K3h×0.3170.1710.788
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.208, + 0.892, + 0.236 + ], + "angle": 0, + "content": "Table 2. Object reconstruction performance with different object kinematic features \\( {\\mathrm{K}}_{ * }^{o} \\) and visual feature \\( {\\mathrm{V}}_{1} \\) on DexYCB dataset." + }, + { + "type": "table", + "bbox": [ + 0.503, + 0.238, + 0.884, + 0.315 + ], + "angle": 0, + "content": "
Obj poseHand poseCDo↓FSo@5↑FSo@10↑
K1o××2.060.3920.660
K2o×1.930.3960.668
K3o1.710.4180.689
" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.323, + 0.894, + 0.369 + ], + "angle": 0, + "content": "Object Center Error \\((\\mathbf{E_o})\\). To evaluate the accuracy of our predicted object translation, we report the \\(\\ell 2\\) distance (cm) between the prediction and its ground truth." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.37, + 0.894, + 0.444 + ], + "angle": 0, + "content": "Additionally, we report Contact ratio \\((\\mathrm{C}_r)\\), Penetration depth \\((\\mathrm{P}_d)\\) and Intersection volume \\((\\mathrm{I}_v)\\) [11,21,26,60,62] to present more details about the interaction between the hand mesh and the object mesh. Please see supplementary material for more details." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.453, + 0.715, + 0.469 + ], + "angle": 0, + "content": "4.3. Implementation details" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.476, + 0.892, + 0.582 + ], + "angle": 0, + "content": "Model architecture. We use ResNet-18 [22] as our image backbone. For hand and object pose estimation, we adopt volumetric heatmaps of spatial resolution \\(64 \\times 64 \\times 64\\) to localize hand joints and the object center in 3D space. For the spatial-temporal transformer, we use 16 transformer layers with 4 attention heads. We present more details about our model architecture in supplementary material." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.583, + 0.894, + 0.809 + ], + "angle": 0, + "content": "Training details. We take the image crop of the hand-object region according to their bounding boxes for DexYCB benchmark. Then, we modify camera intrinsic and extrinsic parameters [35,64] accordingly and take the cropped image as the input to our model. The spatial size of input images is \\(256 \\times 256\\) for all our models. We perform data augmentation including rotation \\(\\left[\\left[-45^{\\circ}, 45^{\\circ}\\right]\\right)\\) and color jittering. During SDF training, we randomly sample 1000 points (500 points inside the mesh and 500 points outside the mesh) for the hand and the object, respectively. We train our model with a batch size of 256 for 1600 epochs on both ObMan and DexYCB using the Adam optimizer [27] with 4 NVIDIA RTX 3090 GPUs. We use an initial learning rate of \\(1 \\times 10^{-4}\\) and decay it by half every 600 epochs. It takes 22 hours for training on DexYCB and 60 hours on ObMan dataset." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.818, + 0.662, + 0.832 + ], + "angle": 0, + "content": "4.4. Ablation studies" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.894, + 0.901 + ], + "angle": 0, + "content": "We carry out ablations on the DexYCB dataset to validate different components in our gSDF model. We evaluate different settings of hand kinematic features \\((\\mathbf{K}_*^h\\) in Table 1), object kinematic features \\((\\mathbf{K}_*^o\\) in Table 2), and visual features" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "12895" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.089, + 0.892, + 0.118 + ], + "angle": 0, + "content": "Table 3. Hand-object reconstruction performance with different visual features on DexYCB dataset. The visual features are combined with the best kinematic features \\( {\\mathrm{K}}_{3}^{h} \\) (Table 1) and \\( {\\mathrm{K}}_{3}^{o} \\) (Table 2) to reconstruct hand and object respectively." + }, + { + "type": "table", + "bbox": [ + 0.124, + 0.119, + 0.847, + 0.226 + ], + "angle": 0, + "content": "
GlobalLocalTransformerCDh↓FSh@1↑FSh@5↑CDo↓FSo@5↑FSo@10↑Eh↓Eo↓
SpatialTemp.
V1×××0.3170.1710.7881.710.4180.6891.441.91
V2×××0.3100.1720.7951.710.4260.6941.441.98
V3××0.3040.1740.7971.600.4340.7031.441.94
V4×0.3020.1770.8011.550.4370.7091.441.96
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.239, + 0.89, + 0.267 + ], + "angle": 0, + "content": "Table 4. Hand-object reconstruction performance using different image backbone sharing strategies on DexYCB dataset. The ablation is carried out with visual features \\( {\\mathrm{V}}_{1} \\) and kinematic features \\( {\\mathrm{K}}_{3}^{h} \\) and \\( {\\mathrm{K}}_{3}^{o} \\) ." + }, + { + "type": "table", + "bbox": [ + 0.151, + 0.268, + 0.816, + 0.344 + ], + "angle": 0, + "content": "
BackboneCDh↓FSh@1↑FSh@5↑CDo↓FSo@5↑FSo@10↑Eh↓Eo↓
Single0.4110.1480.7411.880.4020.6741.721.83
Symmetric0.3240.1680.7791.840.4050.6721.461.93
Asymmetric0.3170.1710.7881.710.4180.6891.441.91
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.356, + 0.47, + 0.385 + ], + "angle": 0, + "content": "\\((\\mathrm{V}_{*}\\) in Table 3). We use the asymmetric image backbone if not otherwise mentioned." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.387, + 0.47, + 0.599 + ], + "angle": 0, + "content": "Hand kinematic feature. In Table 1, we evaluate the contribution of the proposed hand kinematic features for 3D hand reconstruction. The model in \\(\\mathrm{K}_1^h\\) does not use any pose priors to transform the 3D point. The model in \\(\\mathrm{K}_2^h\\) only uses the hand wrist pose to transform the 3D point as AlignSDF [11]. Our model in \\(\\mathrm{K}_3^h\\) computes the transformations to all the hand joints, which achieves the best performance on all the evaluation metrics. Compared to \\(\\mathrm{K}_1^h\\) without any pose priors, our model achieves more than \\(12\\%\\) and \\(9\\%\\) improvement on \\(\\mathrm{CD_h}\\) and \\(\\mathrm{FS_h}@\\mathbb{1}\\), respectively. Compared to \\(\\mathrm{K}_2^h\\) with only hand wrist, our model greatly reduces the hand Chamfer distance from \\(0.344~\\mathrm{cm}^2\\) to \\(0.317~\\mathrm{cm}^2\\), leading to \\(7.8\\%\\) relative gains. These results demonstrate the significance of pose priors and the advantage of gSDF for 3D hand reconstruction." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.599, + 0.47, + 0.75 + ], + "angle": 0, + "content": "Object kinematic feature. In Table 2, we validate the effectiveness of our proposed object kinematic feature. The model in \\(\\mathrm{K}_1^o\\) does not contain any pose priors, while the model in \\(\\mathrm{K}_2^o\\) aligns query points to the object center as in [11]. Our model in \\(\\mathrm{K}_3^o\\) further employs the hand pose to produce the object kinematic feature, which significantly boosts the performance for the object reconstruction on different metrics. Compared to \\(\\mathrm{K}_2^o\\), our proposed object kinematic feature achieves more than \\(11\\%\\) and \\(5.5\\%\\) improvement on \\(\\mathrm{CD_o}\\) and \\(\\mathrm{FS_o}@\\mathsf{5}\\), respectively." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.75, + 0.471, + 0.902 + ], + "angle": 0, + "content": "Visual features. We compare different visual features for SDF prediction in Table 3. \\( \\mathrm{V}_{1} \\) uses the global visual feature e.g. the average pooling of ResNet feature map as in previous works [11,26]. Our local visual features \\( \\mathrm{V}_{2} \\) derived from the geometry alignment with the query point reduces the hand Chamfer distance from \\( 0.317~\\mathrm{cm}^2 \\) to \\( 0.310~\\mathrm{cm}^2 \\). However, it shows less improvement on the object shape accuracy. In \\( \\mathrm{V}_{3} \\) and \\( \\mathrm{V}_{4} \\), we use the transformer model to refine the feature maps. To ablate the improvement from the transformer architecture and from the temporal information" + }, + { + "type": "image_caption", + "bbox": [ + 0.548, + 0.356, + 0.625, + 0.365 + ], + "angle": 0, + "content": "Input Images" + }, + { + "type": "image_caption", + "bbox": [ + 0.643, + 0.356, + 0.743, + 0.365 + ], + "angle": 0, + "content": "Our single-frame model" + }, + { + "type": "image_caption", + "bbox": [ + 0.755, + 0.356, + 0.826, + 0.365 + ], + "angle": 0, + "content": "Our video model" + }, + { + "type": "image", + "bbox": [ + 0.55, + 0.367, + 0.645, + 0.441 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.646, + 0.367, + 0.742, + 0.441 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.743, + 0.367, + 0.839, + 0.441 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.55, + 0.443, + 0.645, + 0.516 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.646, + 0.443, + 0.742, + 0.516 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.743, + 0.443, + 0.839, + 0.516 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.5, + 0.519, + 0.89, + 0.546 + ], + "angle": 0, + "content": "Figure 5. The qualitative comparison between our single-frame model built with the transformer and our video model." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.552, + 0.894, + 0.719 + ], + "angle": 0, + "content": "in videos, we only use transformer for each single frame in \\( \\mathrm{V}_3 \\) while use it for multiple frames in \\( \\mathrm{V}_4 \\). We can see that the transformer architecture alone is beneficial for the reconstruction. Enhancing the visual features with temporal contexts further improves the performance in terms of all the evaluation metrics especially for the objects. In Figure 5, compared with our single-frame model built with the transformer, our video model can make more robust predictions under some hard cases (e.g., motion blur). Although the reconstruction of the can is not accurate in the first example, our model tends to produce more regular shapes." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.72, + 0.895, + 0.901 + ], + "angle": 0, + "content": "Image backbone sharing strategy. Results of using different strategies for image backbone sharing are presented in Table 4. We train all the three models using the two-stage strategy described in Section 3.5. The model with one single backbone achieves the worst performance under most of the evaluation metrics. This is because the pose learning and shape learning compete with each other during training. The symmetric strategy to separate backbones for pose and SDFs performs better than the single backbone model. Our asymmetric strategy with a separate backbone for hand pose estimation and a shared backbone for object pose and SDF feature encoder achieves the best performance. We also em" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "12896" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.246, + 0.09, + 0.722, + 0.103 + ], + "angle": 0, + "content": "Table 5. Comparison with state-of-the-art methods on the image ObMan dataset." + }, + { + "type": "table", + "bbox": [ + 0.131, + 0.104, + 0.836, + 0.212 + ], + "angle": 0, + "content": "
MethodsCDh↓FSh@1↑FSh@5↑CDo↓FSo@5↑FSo@10↑Eh↓Eo↓
Hasson et al. [21]0.4150.1380.7513.600.3590.5901.13-
Karunratanakul et al. [26]0.261--6.80----
Ye et al. [63]----0.4200.630--
Chen et al. [11]0.1360.3020.9133.380.4040.6361.273.29
gSDF (Ours)0.1120.3320.9353.140.4380.6600.933.43
" + }, + { + "type": "table_caption", + "bbox": [ + 0.243, + 0.224, + 0.725, + 0.237 + ], + "angle": 0, + "content": "Table 6. Comparison with state-of-the-art methods on the video Dex YCB dataset." + }, + { + "type": "table", + "bbox": [ + 0.131, + 0.238, + 0.836, + 0.346 + ], + "angle": 0, + "content": "
MethodsCDh↓FSh@1↑FSh@5↑CDo↓FSo@5↑FSo@10↑Eh↓Eo↓
Hasson et al. [21]0.5370.1150.6471.940.3830.6421.67-
Karunratanakul et al. [26]0.3640.1540.7642.060.3920.660--
Chen et al. [11]0.3580.1620.7671.830.4100.6791.581.78
Chen et al. [11] 1†0.3440.1670.7761.810.4130.6871.571.93
gSDF (Ours)0.3020.1770.8011.550.4370.7091.441.96
" + }, + { + "type": "image", + "bbox": [ + 0.108, + 0.354, + 0.186, + 0.412 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.109, + 0.413, + 0.185, + 0.471 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.109, + 0.473, + 0.185, + 0.531 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.109, + 0.533, + 0.184, + 0.591 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.109, + 0.592, + 0.185, + 0.649 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.189, + 0.356, + 0.263, + 0.407 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.192, + 0.416, + 0.262, + 0.465 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.192, + 0.472, + 0.262, + 0.526 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.191, + 0.533, + 0.262, + 0.58 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.192, + 0.593, + 0.267, + 0.645 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.282, + 0.355, + 0.359, + 0.411 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.282, + 0.412, + 0.359, + 0.471 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.283, + 0.473, + 0.358, + 0.529 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.283, + 0.531, + 0.358, + 0.589 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.283, + 0.591, + 0.358, + 0.648 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.366, + 0.356, + 0.421, + 0.407 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.368, + 0.413, + 0.42, + 0.466 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.366, + 0.476, + 0.424, + 0.528 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.367, + 0.537, + 0.434, + 0.582 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.366, + 0.592, + 0.432, + 0.645 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.077, + 0.65, + 0.47, + 0.692 + ], + "angle": 0, + "content": "Figure 6. Qualitative results of our model on test images from the ObMan and DexYCB benchmarks. Our model produces convincing results for different grasping poses and diverse objects." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.702, + 0.471, + 0.778 + ], + "angle": 0, + "content": "pirically find that learning the object pose and SDFs together improves both the pose accuracy and the shape accuracy. The possible reason is that estimating object pose also helps our model to focus more on hand-object regions and boosts the 3D reconstruction accuracy." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.783, + 0.364, + 0.8 + ], + "angle": 0, + "content": "4.5. Comparison with state of the art" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.806, + 0.473, + 0.867 + ], + "angle": 0, + "content": "We compare our gSDF model with state-of-the-art methods on ObMan and DexYCB benchmarks. In Figure 6, we qualitatively demonstrate that our approach can produce convincing 3D hand-object reconstruction results." + }, + { + "type": "page_footnote", + "bbox": [ + 0.078, + 0.875, + 0.47, + 0.9 + ], + "angle": 0, + "content": "\\(^{1\\dagger}\\) To make more fair comparison with Chen et al. [11], we adapt their model to the same asymmetric backbone structure as used in our method." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.355, + 0.895, + 0.522 + ], + "angle": 0, + "content": "ObMan. Table 5 shows the comparison of hand and object reconstruction results on the synthetic ObMan dataset. Since ObMan does not contain video data, we do not use the spatial-temporal transformer in this model. The proposed gSDF outperforms previous methods by a significant margin. Compared with the recent method [63] that only reconstructs hand-held objects, our joint method produces more accurate object meshes. gSDF achieves a \\(17.6\\%\\) improvement on \\(\\mathrm{CD_h}\\) and a \\(7.1\\%\\) improvement on \\(\\mathrm{CD_o}\\) over the state-of-the-art accuracy, which indicates that our model can better reconstruct both hand meshes and diverse object meshes." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.526, + 0.895, + 0.632 + ], + "angle": 0, + "content": "DexYCB. Table 6 presents results on the DexYCB benchmark. We also show the performance of AlignSDF [11] with two backbones ([11]-2BB). Our model demonstrates a large improvement over recent methods. In particular, it advances the state-of-the-art accuracy on \\(\\mathrm{CD_h}\\) and \\(\\mathrm{CD_o}\\) by \\(12.2\\%\\) and \\(14.4\\%\\), respectively. The high accuracy of gSDF on DexYCB demonstrates that it generalizes well to real images." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.648, + 0.619, + 0.663 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.67, + 0.895, + 0.806 + ], + "angle": 0, + "content": "In this work, we propose a geometry-driven SDF (gSDF) approach for 3D hand and object reconstruction. We explicitly model the underlying 3D geometry to guide the SDF learning. We first estimate poses of hands and objects according to kinematic chains of pose transformations, and then derive kinematic features and local visual features using the geometry information for signed distance prediction. Extensive experiments on ObMan and DexYCB datasets demonstrate the effectiveness of our proposed method." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.813, + 0.894, + 0.9 + ], + "angle": 0, + "content": "Acknowledgements. This work was granted access to the HPC resources of IDRIS under the allocation AD011013147 made by GENCI. This work was funded in part by the French government under management of Agence Nationale de la Recherche as part of the \"Investissements d'avenir\" program, reference ANR19-P3IA-0001 (PRAIRIE 3IA Institute) and by Louis Vuitton ENS Chair on Artificial Intelligence. We thank Yana Hasson for helpful discussions." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "12897" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.175, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.117, + 0.47, + 0.157 + ], + "angle": 0, + "content": "[1] Anurag Arnab, Mostafa Dehghani, Georg Heigold, Chen Sun, Mario Lucic, and Cordelia Schmid. ViViT: A video vision transformer. In ICCV, 2021. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.16, + 0.47, + 0.202 + ], + "angle": 0, + "content": "[2] Seungryul Baek, Kwang In Kim, and Tae-Kyun Kim. Pushing the envelope for RGB-based dense 3D hand pose estimation via neural rendering. In CVPR, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.204, + 0.471, + 0.247 + ], + "angle": 0, + "content": "[3] Luca Ballan, Aparna Taneja, Jürgen Gall, Luc Van Gool, and Marc Pollefeys. Motion capture of hands in action using discriminative salient points. In ECCV, 2012. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.249, + 0.47, + 0.289 + ], + "angle": 0, + "content": "[4] Gedas Bertasius, Heng Wang, and Lorenzo Torresani. Is space-time attention all you need for video understanding? In ICML, 2021. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.293, + 0.471, + 0.332 + ], + "angle": 0, + "content": "[5] Adnane Boukhayma, Rodrigo de Bem, and Philip HS Torr. 3D hand shape and pose from images in the wild. In CVPR, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.336, + 0.47, + 0.364 + ], + "angle": 0, + "content": "[6] Romain Brégier. Deep regression on manifolds: a 3D rotation case study. In 3DV, 2021. 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.367, + 0.471, + 0.407 + ], + "angle": 0, + "content": "[7] Zhe Cao, Ilija Radosavovic, Angjoo Kanazawa, and Jitendra Malik. Reconstructing hand-object interactions in the wild. In ICCV, 2021. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.41, + 0.471, + 0.478 + ], + "angle": 0, + "content": "[8] Angel X Chang, Thomas Funkhouser, Leonidas Guibas, Pat Hanrahan, Qixing Huang, Zimo Li, Silvio Savarese, Manolis Savva, Shuran Song, Hao Su, et al. ShapeNet: An information-rich 3D model repository. arXiv preprint arXiv:1512.03012, 2015. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.482, + 0.471, + 0.55 + ], + "angle": 0, + "content": "[9] Yu-Wei Chao, Wei Yang, Yu Xiang, Pavlo Molchanov, Ankur Handa, Jonathan Tremblay, Yashraj S Narang, Karl Van Wyk, Umar Iqbal, Stan Birchfield, et al. DexYCB: A benchmark for capturing hand grasping of objects. In CVPR, 2021. 1, 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.553, + 0.47, + 0.622 + ], + "angle": 0, + "content": "[10] Xingyu Chen, Yufeng Liu, Chongyang Ma, Jianlong Chang, Huayan Wang, Tian Chen, Xiaoyan Guo, Pengfei Wan, and Wen Zheng. Camera-space hand mesh recovery via semantic aggregation and adaptive 2D-1D registration. In CVPR, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.626, + 0.47, + 0.68 + ], + "angle": 0, + "content": "[11] Zerui Chen, Yana Hasson, Cordelia Schmid, and Ivan Laptev. AlignSDF: Pose-Aligned signed distance fields for handobject reconstruction. In ECCV, 2022. 1, 2, 3, 4, 5, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.683, + 0.47, + 0.711 + ], + "angle": 0, + "content": "[12] Zhiqin Chen and Hao Zhang. Learning implicit fields for generative shape modeling. In CVPR, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.714, + 0.471, + 0.768 + ], + "angle": 0, + "content": "[13] Enric Corona, Tomas Hodan, Minh Vo, Francesc Moreno-Noguer, Chris Sweeney, Richard Newcombe, and Lingni Ma. LISA: Learning implicit shape and appearance of hands. In CVPR, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.771, + 0.47, + 0.813 + ], + "angle": 0, + "content": "[14] Thibault Groueix, Matthew Fisher, Vladimir G Kim, Bryan C Russell, and Mathieu Aubry. A papier-mâché approach to learning 3D surface generation. In CVPR, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.816, + 0.47, + 0.856 + ], + "angle": 0, + "content": "[15] Henning Hamer, Juergen Gall, Thibaut Weise, and Luc Van Gool. An object-dependent hand pose prior from sparse training data. In CVPR, 2010. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.859, + 0.47, + 0.9 + ], + "angle": 0, + "content": "[16] Henning Hamer, Konrad Schindler, Esther Koller-Meier, and Luc Van Gool. Tracking a hand manipulating an object. In ICCV, 2009. 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.117, + 0.471, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.134 + ], + "angle": 0, + "content": "[17] Shreyas Hampali, Mahdi Rad, Markus Oberweger, and Vincent Lepetit. HOnnotate: A method for 3D annotation of hand and object poses. In CVPR, 2020. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.136, + 0.894, + 0.19 + ], + "angle": 0, + "content": "[18] Shreyas Hampali, Sayan Deb Sarkar, Mahdi Rad, and Vincent Lepetit. Keypoint Transformer: Solving joint identification in challenging hands and object interactions for accurate 3D pose estimation. In CVPR, 2022. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.193, + 0.894, + 0.247 + ], + "angle": 0, + "content": "[19] Yana Hasson, Bugra Tekin, Federica Bogo, Ivan Laptev, Marc Pollefeys, and Cordelia Schmid. Leveraging photometric consistency over time for sparsely supervised hand-object reconstruction. In CVPR, 2020. 1, 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.249, + 0.894, + 0.289 + ], + "angle": 0, + "content": "[20] Yana Hasson, Gül Varol, Cordelia Schmid, and Ivan Laptev. Towards unconstrained joint hand-object reconstruction from RGB videos. In 3DV, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.291, + 0.894, + 0.345 + ], + "angle": 0, + "content": "[21] Yana Hasson, Gul Varol, Dimitrios Tzionas, Igor Kalevatykh, Michael J Black, Ivan Laptev, and Cordelia Schmid. Learning joint reconstruction of hands and manipulated objects. In CVPR, 2019. 1, 2, 4, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.348, + 0.894, + 0.387 + ], + "angle": 0, + "content": "[22] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In CVPR, 2016. 4, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.39, + 0.893, + 0.417 + ], + "angle": 0, + "content": "[23] Tony Heap and David Hogg. Towards 3D hand tracking using a deformable model. In FG, 1996. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.42, + 0.894, + 0.46 + ], + "angle": 0, + "content": "[24] Umar Iqbal, Pavlo Molchanov, Thomas Breuel Juergen Gall, and Jan Kautz. Hand pose estimation via latent 2.5D heatmap regression. In ECCV, 2018. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.462, + 0.893, + 0.503 + ], + "angle": 0, + "content": "[25] Korrawe Karunratanakul, Adrian Spurr, Zicong Fan, Otmar Hilliges, and Siyu Tang. A skeleton-driven neural occupancy representation for articulated hands. In 3DV, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.504, + 0.894, + 0.559 + ], + "angle": 0, + "content": "[26] Korrawe Karunratanakul, Jinlong Yang, Yan Zhang, Michael J Black, Krikamol Muandet, and Siyu Tang. Grasping Field: Learning implicit representations for human grasps. In 3DV, 2020. 1, 2, 3, 4, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.561, + 0.894, + 0.601 + ], + "angle": 0, + "content": "[27] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.604, + 0.893, + 0.645 + ], + "angle": 0, + "content": "[28] Nikos Kolotouros, Georgios Pavlakos, Michael J Black, and Kostas Daniilidis. Learning to reconstruct 3D human pose and shape via model-fitting in the loop. In ICCV, 2019. 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.647, + 0.893, + 0.687 + ], + "angle": 0, + "content": "[29] Dominik Kulon, Riza Alp Güler, I. Kokkinos, M. Bronstein, and S. Zafeiriou. Weakly-supervised mesh-convolutional hand reconstruction in the wild. In CVPR, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.689, + 0.894, + 0.73 + ], + "angle": 0, + "content": "[30] Dominik Kulon, Haoyang Wang, Riza Alp Güler, Michael M. Bronstein, and Stefanos Zafeiriou. Single image 3D hand reconstruction with mesh convolutions. In BMVC, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.732, + 0.893, + 0.759 + ], + "angle": 0, + "content": "[31] Vincent Lepetit. Recent advances in 3D object and hand pose estimation. arXiv preprint arXiv:2006.05927, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.761, + 0.894, + 0.813 + ], + "angle": 0, + "content": "[32] Mengcheng Li, Liang An, Hongwen Zhang, Lianpeng Wu, Feng Chen, Tao Yu, and Yebin Liu. Interacting attention graph for single image two-hand reconstruction. In CVPR, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.816, + 0.894, + 0.856 + ], + "angle": 0, + "content": "[33] William E Lorensen and Harvey E Cline. Marching Cubes: A high resolution 3D surface construction algorithm. TOG, 1987. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.859, + 0.893, + 0.9 + ], + "angle": 0, + "content": "[34] Jun Lv, Wenqiang Xu, Lixin Yang, Sucheng Qian, Chongzhao Mao, and Cewu Lu. HandTailor: Towards high-precision monocular 3D hand recovery. In BMVC, 2021. 2" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.946, + 0.52, + 0.957 + ], + "angle": 0, + "content": "12898" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.472, + 0.147 + ], + "angle": 0, + "content": "[35] Dushyant Mehta, Helge Rhodin, Dan Casas, Pascal Fua, Oleksandr Sotnychenko, Weipeng Xu, and Christian Theobalt. Monocular 3D human pose estimation in the wild using improved CNN supervision. In 3DV, 2017. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.149, + 0.472, + 0.203 + ], + "angle": 0, + "content": "[36] Hao Meng, Sheng Jin, Wentao Liu, Chen Qian, Mengxiang Lin, Wanli Ouyang, and Ping Luo. 3D interacting hand pose estimation by hand de-occlusion and removal. In ECCV, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.206, + 0.472, + 0.26 + ], + "angle": 0, + "content": "[37] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy Networks: Learning 3D reconstruction in function space. In CVPR, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.263, + 0.472, + 0.317 + ], + "angle": 0, + "content": "[38] Gyeongsik Moon, Ju Yong Chang, and Kyoung Mu Lee. V2V- PoseNet: Voxel-to-voxel prediction network for accurate 3D hand and human pose estimation from a single depth map. In CVPR, 2018. 1, 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.32, + 0.472, + 0.374 + ], + "angle": 0, + "content": "[39] Franziska Mueller, Florian Bernard, Oleksandr Sotnychenko, Dushyant Mehta, Srinath Sridhar, Dan Casas, and Christian Theobalt. Ganerated hands for real-time 3D hand tracking from monocular RGB. In CVPR, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.376, + 0.472, + 0.445 + ], + "angle": 0, + "content": "[40] Franziska Mueller, Micah Davis, Florian Bernard, Oleksandr Sotnychenko, Micekal Verschooor, Miguel A Otaduy, Dan Casas, and Christian Theobalt. Real-time pose and shape reconstruction of two interacting hands with a single depth camera. TOG, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.447, + 0.472, + 0.5 + ], + "angle": 0, + "content": "[41] Iason Oikonomidis, Nikolaos Kyriazis, and Antonis A Argyros. Full DOF tracking of a hand interacting with an object by modeling occlusions and physical constraints. In ICCV, 2011. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.504, + 0.472, + 0.558 + ], + "angle": 0, + "content": "[42] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. DeepSDF: Learning continuous signed distance functions for shape representation. In CVPR, 2019. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.561, + 0.472, + 0.601 + ], + "angle": 0, + "content": "[43] Georgios Pavlakos, Xiaowei Zhou, and Kostas Daniilidis. Ordinal depth supervision for 3D human pose estimation. In CVPR, 2018. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.604, + 0.472, + 0.645 + ], + "angle": 0, + "content": "[44] Georgios Pavlakos, Xiaowei Zhou, Konstantinos G Derpanis, and Kostas Daniilidis. Coarse-to-fine volumetric prediction for single-image 3D human pose. In CVPR, 2017. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.647, + 0.472, + 0.687 + ], + "angle": 0, + "content": "[45] James M Rehg and Takeo Kanade. Visual tracking of high DOF articulated structures: an application to human hand tracking. In ECCV, 1994. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.689, + 0.472, + 0.731 + ], + "angle": 0, + "content": "[46] Javier Romero, Dimitrios Tzionas, and Michael J. Black. Embodied Hands: Modeling and capturing hands and bodies together. TOG, 2017. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.733, + 0.472, + 0.786 + ], + "angle": 0, + "content": "[47] Shunsuke Saito, Zeng Huang, Ryota Natsume, Shigeo Morishima, Angjoo Kanazawa, and Hao Li. PiFu: Pixel-aligned implicit function for high-resolution clothed human digitization. In ICCV, 2019. 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.789, + 0.472, + 0.842 + ], + "angle": 0, + "content": "[48] Adrian Spurr, Aneesh Dahiya, Xi Wang, Xuong Zhang, and Otmar Hilliges. Self-supervised 3D hand pose estimation from monocular RGB via contrastive learning. In ICCV, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.846, + 0.472, + 0.9 + ], + "angle": 0, + "content": "[49] Srinath Sridhar, Franziska Mueller, Michael Zollhöfer, Dan Casas, Antti Oulasvirta, and Christian Theobalt. Real-time joint tracking of a hand manipulating an object from RGB-D input. In ECCV, 2016. 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.472, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.12 + ], + "angle": 0, + "content": "[50] Xiao Sun, Bin Xiao, Fangyin Wei, Shuang Liang, and Yichen Wei. Integral human pose regression. In ECCV, 2018. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.122, + 0.894, + 0.163 + ], + "angle": 0, + "content": "[51] Danhang Tang, Hyung Jin Chang, Alykhan Tejani, and Tae Kyun Kim. Latent regression forest: Structured estimation of 3D articulated hand posture. In CVPR, 2014. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.165, + 0.892, + 0.205 + ], + "angle": 0, + "content": "[52] Maxim Tatarchenko, Stephan R Richter, René Ranftl, Zhuwen Li, Vladlen Koltun, and Thomas Brox. What do single-view 3D reconstruction networks learn? In CVPR, 2019. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.208, + 0.892, + 0.248 + ], + "angle": 0, + "content": "[53] Bugra Tekin, Federica Bogo, and Marc Pollefeys. \\(\\mathrm{H} + \\mathrm{O}\\): Unified egocentric recognition of 3D hand-object poses and interactions. In CVPR, 2019. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.251, + 0.892, + 0.304 + ], + "angle": 0, + "content": "[54] Tze Ho Elden Tse, Kwang In Kim, Ales Leonardis, and Hyung Jin Chang. Collaborative learning for hand and object reconstruction with attention-guided graph convolution. In CVPR, 2022. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.307, + 0.892, + 0.346 + ], + "angle": 0, + "content": "[55] Aggeliki Tsoli and Antonis A Argyros. Joint 3D tracking of a deformable object in interaction with a hand. In ECCV, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.35, + 0.892, + 0.376 + ], + "angle": 0, + "content": "[56] Dimitrios Tzionas and Juergen Gall. 3D object reconstruction from hand-object interactions. In ICCV, 2015. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.379, + 0.892, + 0.446 + ], + "angle": 0, + "content": "[57] Jiayi Wang, Franziska Mueller, Florian Bernard, Suzanne Sorli, Oleksandr Sotnychenko, Neng Qian, Miguel A Otaduy, Dan Casas, and Christian Theobalt. RGB2Hands: Real-time tracking of 3D hand interactions from monocular RGB video. TOG, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.449, + 0.892, + 0.503 + ], + "angle": 0, + "content": "[58] Yangang Wang, Jianyuan Min, Jianjie Zhang, Yebin Liu, Feng Xu, Qionghai Dai, and Jinxiang Chai. Video-based hand manipulation capture through composite motion control. TOG, 2013. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.506, + 0.892, + 0.56 + ], + "angle": 0, + "content": "[59] Fu Xiong, Boshen Zhang, Yang Xiao, Zhiguo Cao, Taidong Yu, Joey Tianyi Zhou, and Junsong Yuan. A2J: Anchor-to-joint regression network for 3D articulated pose estimation from a single depth image. In ICCV, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.563, + 0.892, + 0.617 + ], + "angle": 0, + "content": "[60] Lixin Yang, Kailin Li, Xinyu Zhan, Jun Lv, Wenqiang Xu, Jiefeng Li, and Cewu Lu. ArtiBoost: Boosting articulated 3D hand-object pose estimation via online exploration and synthesis. In CVPR, 2022. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.62, + 0.892, + 0.673 + ], + "angle": 0, + "content": "[61] Lixin Yang, Kailin Li, Xinyu Zhan, Fei Wu, Anran Xu, Liu Liu, and Cewu Lu. OakInk: A large-scale knowledge repository for understanding hand-object interaction. In CVPR, 2022. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.676, + 0.892, + 0.717 + ], + "angle": 0, + "content": "[62] Lixin Yang, Xinyu Zhan, Kailin Li, Wenqiang Xu, Jiefeng Li, and Cewu Lu. CPF: Learning a contact potential field to model the hand-object interaction. In ICCV, 2021. 1, 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.719, + 0.892, + 0.759 + ], + "angle": 0, + "content": "[63] Yufei Ye, Abhinav Gupta, and Shubham Tulsiani. What's in your hands? 3D reconstruction of generic objects in hands. In CVPR, 2022. 2, 3, 4, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.762, + 0.892, + 0.802 + ], + "angle": 0, + "content": "[64] Frank Yu, Mathieu Salzmann, Pascal Fua, and Helge Rhodin. PCLs: Geometry-aware neural reconstruction of 3D pose with perspective crop layers. In CVPR, 2021. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.804, + 0.892, + 0.9 + ], + "angle": 0, + "content": "[65] Shanxin Yuan, Guillermo Garcia-Hernando, Björn Stenger, Gyeongsik Moon, Ju Yong Chang, Kyoung Mu Lee, Pavlo Molchanov, Jan Kautz, Sina Honari, Liuhao Ge, Junsong Yuan, Xinghao Chen, Guijin Wang, Fan Yang, Kai Akiyama, Yang Wu, Qingfu Wan, Meysam Madadi, Sergio Escalera, Shile Li, Dongheui Lee, Iason Oikonomidis, Antonis Argyros, and Tae-Kyun Kim. Depth-based 3D hand pose estimation:" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.946, + 0.52, + 0.957 + ], + "angle": 0, + "content": "12899" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.11, + 0.092, + 0.469, + 0.12 + ], + "angle": 0, + "content": "From current achievements to future goals. In CVPR, June 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.122, + 0.469, + 0.163 + ], + "angle": 0, + "content": "[66] Yi Zhou, Connelly Barnes, Jingwan Lu, Jimei Yang, and Hao Li. On the continuity of rotation representations in neural networks. In CVPR, 2019. 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.165, + 0.469, + 0.205 + ], + "angle": 0, + "content": "[67] Christian Zimmermann and Thomas Brox. Learning to estimate 3D hand pose from single RGB images. In ICCV, 2017. 1, 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.469, + 0.205 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.946, + 0.519, + 0.957 + ], + "angle": 0, + "content": "12900" + } + ] +] \ No newline at end of file diff --git a/2023/gSDF_ Geometry-Driven Signed Distance Functions for 3D Hand-Object Reconstruction/ab15db8d-1011-4beb-9528-c9481ccf45d6_origin.pdf b/2023/gSDF_ Geometry-Driven Signed Distance Functions for 3D Hand-Object Reconstruction/ab15db8d-1011-4beb-9528-c9481ccf45d6_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..803fc91afdc56b85746838c0bb64d56af865cf16 --- /dev/null +++ b/2023/gSDF_ Geometry-Driven Signed Distance Functions for 3D Hand-Object Reconstruction/ab15db8d-1011-4beb-9528-c9481ccf45d6_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fbb3eb5cf222303bfd11767014393b7065968d33d839689e346c229accdd0ede +size 1859142 diff --git a/2023/gSDF_ Geometry-Driven Signed Distance Functions for 3D Hand-Object Reconstruction/full.md b/2023/gSDF_ Geometry-Driven Signed Distance Functions for 3D Hand-Object Reconstruction/full.md new file mode 100644 index 0000000000000000000000000000000000000000..1a4f83855bae4bb91fce9478351bc0c67d3122ae --- /dev/null +++ b/2023/gSDF_ Geometry-Driven Signed Distance Functions for 3D Hand-Object Reconstruction/full.md @@ -0,0 +1,445 @@ +# gSDF: Geometry-Driven Signed Distance Functions for 3D Hand-Object Reconstruction + +Zerui Chen + +Shizhe Chen + +Cordelia Schmid + +Ivan Laptev + +Inria, École normale supérieure, CNRS, PSL Research Univ., 75005 Paris, France + +firstname.lastname@inria.fr + +# Abstract + +Signed distance functions (SDFs) is an attractive framework that has recently shown promising results for 3D shape reconstruction from images. SDFs seamlessly generalize to different shape resolutions and topologies but lack explicit modelling of the underlying 3D geometry. In this work, we exploit the hand structure and use it as guidance for SDF-based shape reconstruction. In particular, we address reconstruction of hands and manipulated objects from monocular RGB images. To this end, we estimate poses of hands and objects and use them to guide 3D reconstruction. More specifically, we predict kinematic chains of pose transformations and align SDFs with highly-articulated hand poses. We improve the visual features of 3D points with geometry alignment and further leverage temporal information to enhance the robustness to occlusion and motion blurs. We conduct extensive experiments on the challenging ObMan and DexYCB benchmarks and demonstrate significant improvements of the proposed method over the state of the art. + +# 1. Introduction + +Understanding how hands interact with objects is becoming increasingly important for widespread applications, including virtual reality, robotic manipulation and human-computer interaction. Compared to 3D estimation of sparse hand joints [24,38,51,53,67], joint reconstruction of hands and object meshes [11, 18, 21, 26, 62] provides rich information about hand-object interactions and has received increased attention in recent years. + +To reconstruct high-quality meshes, some recent works [9, 17, 61] explore multi-view image inputs. Multi-view images, however, are less common both for training and testing scenarios. In this work, we focus on a more practical and user-friendly setting where we aim to reconstruct hand and object meshes from monocular RGB images. Given the ill-posed nature of the task, many existing methods [7, 19, 21, 54, 62] employ parametric mesh models (e.g., MANO [46]) to im + +![](images/527adaaf0d94728c6cef36247c014db1a2971619c5891a07228162e574525f61.jpg) + +![](images/0101e1ed915fabefe96f599e74a8be60a2f28721973d0da39113d65b0623b487.jpg) + +![](images/e9a24ed55ed9a034f6e98cdc5905a4854a353a9a30987c24fef26688e57761a0.jpg) +gSDF + +![](images/930162f97d68298d59444a1e5ab0920b6530a1751ebcfac02c1fe9dbc0ea6f13.jpg) + +![](images/1b1769b767cc71b8b5a0b055e8b51352741b96b36c76b761da995b8b93eb34ff.jpg) +Figure 1. We aim to reconstruct 3D hand and object meshes from monocular images (top). Our method gSDF (middle) first predicts 3D hand joints (blue) and object locations (red) from input images. We use estimated hand poses and object locations to incorporate strong geometric priors into SDF by generating hand- and object-aware kinematic features for each SDF query point. Our resulting gSDF model generates accurate results for real images with various objects and grasping hand poses (bottom). + +![](images/3fbc938809057c339e88a7bb8d0ff47ccf23b8af98ac8617d0fd901344b357d9.jpg) + +![](images/4a1a563a15f47748a088359df125a6570a6b1dc58f1080f2d38d0b31e57163e9.jpg) + +![](images/ad192d46e9560e1148a60d5966194b8e3af4f3fc412c3047cab7eb537367d8ed.jpg) + +![](images/b25647d7d93da1380b6c3a96306a849f93a0482f741ad7b0b0e71833b5ed57de.jpg) +gSDF + +![](images/62f855d278fa1773f1ccd345c22d665c228c5025bdf03be93579addd06590db3.jpg) + +![](images/497e1728de1b8fbc5a2151c53cdd64addffc970583c47f9fcdf96fd3962bb525.jpg) + +![](images/32542e1d490c939963f930ab36bafdb27f211b811804d61961326a153108a97d.jpg) + +![](images/08b8aa3975bd581498a60f555dd8d76e8ded955cf04ab2f8770c8ea9c8e38b36.jpg) +gSDF + +![](images/edbdf5237d3dd6f6eb8457cc4d27704ccd1490f337b318a0858c637ff68236a9.jpg) + +![](images/70890cfc46200935f42539a0e94cd1527bee74410b7c6512a569baa97443b19a.jpg) + +![](images/1d92d9abaeb0505145b8c2e27e8d30a41950b3b74e2df184eb0c889c34160c26.jpg) + +![](images/79e91292827ed0312f99c1f1bd6de33f036784d32fdf77c6fa9dbc407bd1262d.jpg) + +pose prior knowledge and reduce ambiguities in 3D hand reconstruction. MANO hand meshes, however, have relatively limited resolution and can be suboptimal for the precise capture of hand-object interactions. + +To reconstruct detailed hand and object meshes, another line of efforts [11, 26] employ signed distance functions (SDFs). Grasping Field [26] makes the first attempt to model hand and object surfaces using SDFs. However, it does not explicitly associate 3D geometry with image cues and has no prior knowledge incorporated in SDFs, leading to unrealistic meshes. AlignSDF [11] proposes to align SDFs with respect to global poses (i.e., the hand wrist transformation and the + +object translation) and produces improved results. However, it is still challenging to capture geometric details for more complex hand motions and manipulations of diverse objects, which involve the articulation of multiple fingers. + +To address limitations of prior works, we propose a geometry-driven SDF (gSDF) method that encodes strong pose priors and improves reconstruction by disentangling pose and shape estimation (see Figure 1). To this end, we first predict sparse 3D hand joints from images and derive full kinematic chains of local pose transformations from joint locations using inverse kinematics. Instead of only using the global pose as in [11], we optimize SDFs with respect to poses of all the hand joints, which leads to a more fine-grained alignment between the 3D shape and articulated hand poses. In addition, we project 3D points onto the image plane to extract geometry-aligned visual features for signed distance prediction. The visual features are further refined with spatio-temporal contexts using a transformer model to enhance the robustness to occlusions and motion blurs. + +We conduct extensive ablation experiments to show the effectiveness of different components in our approach. The proposed gSDF model greatly advances state-of-the-art accuracy on the challenging ObMan and DexYCB benchmarks. Our contributions can be summarized in three-fold: (i) To embed strong pose priors into SDFs, we propose to align the SDF shape with its underlying kinematic chains of pose transformations, which reduces ambiguities in 3D reconstruction. (ii) To further reduce the misalignment induced by inaccurate pose estimations, we propose to extract geometry-aligned local visual features and enhance the robustness with spatio-temporal contexts. (iii) We conduct comprehensive experiments to show that our approach outperforms state-of-the-art results by a significant margin. + +# 2. Related Work + +This paper focuses on jointly reconstructing hands and hand-held objects from RGB images. In this section, we first review previous works on the 3D hand pose and shape estimation. We then discuss relevant works on the joint reconstruction of hands and objects. + +3D hand pose and shape estimation. The topic of 3D hand pose estimation has received widespread attention since the 90s [23, 45] and has seen significant progress in recent years [31, 65]. Methods which take RGB images as input [24, 36, 38, 39, 48, 50, 51, 53, 59, 67] often estimate sparse 3D hand joint locations from visual data using well-designed deep neural networks. Though these methods can achieve high estimation accuracy, their outputs of 3D sparse joints provide limited information about the 3D hand surface, which is critical in AR/VR applications. Following the introduction of the anthropomorphic parametric hand mesh model MANO [46], several works [2, 5, 10, 18, 29, 30, 32, 34, 40, 57] estimate the MANO hand shape and pose parameters to + +recover the full hand surface. However, MANO has a limited mesh resolution and cannot produce fine surface details. Neural implicit functions [13,25] have the potential to reconstruct more realistic high resolution hand surfaces [12,37,42]. In this work, we combine the advantages of sparse, parametric and implicit modelling. We predict sparse 3D joints accurately from images and estimate the MANO parameters using inverse kinematics. We then optimize neural implicit functions with respect to underlying kinematic structures and reconstruct realistic meshes. + +3D hand and object reconstruction. Joint reconstruction of hand and object meshes provides a more comprehensive view about how hands interact with manipulated objects in the 3D space and has received more attention in the past few years. Previous works often rely on multiview correspondence [3,9,17,41,58,61] or additional depth information [15, 16, 49, 55, 56] to approach this task. In this work, we focus on a more challenging setting and perform a joint reconstruction from monocular RGB images. Given the ill-posed nature of this problem, many works [7, 18-21, 54, 60, 62] deploy MANO, which encodes hand prior knowledge learned from hand scans, to reconstruct hand meshes. To further simplify the object reconstruction task, several works [18, 60, 62] make a strong assumption that the ground-truth object model is available at test time. Our work and some previous efforts [11, 21, 26] relax this assumption and assume unknown object models. Hasson et al. [21] employ a differentiable MANO layer to estimate the hand shape and AtlasNet [14] to reconstruct the manipulated object. However, both MANO and AtlasNet can only produce meshes of limited resolution, which prevents the modelling of detailed contacts between hands and objects. To generate more detailed surfaces, Karunratanakul et al. [26] introduce grasping fields and propose to use SDFs to reconstruct both hand and object meshes. However, such a model-free approach does not capture any prior knowledge about hands or objects, which can lead to predicting unrealistic 3D geometry. To mitigate this, Ye et al. [63] propose to use hand poses estimated from an off-the-shelf model to help reconstruct the hand-held object mesh. The main difference with our work is that we jointly reconstruct hand meshes and object meshes using our proposed model, which is more challenging. Also, in addition to using hand poses to help capture the object shapes, we predict object poses and show their benefits for SDF-based object reconstruction. Another work AlignSDF [11] optimizes SDFs with respect to estimated hand-object global poses and encodes pose priors into SDFs. In addition to using global poses as a guide for SDFs, we propose to learn SDFs from the full kinematic chains of local pose transformations, and achieve a more precise alignment between the 3D shape and the underlying poses. To further handle hard cases induced by occlusion or motion blur where pose estimations are inaccurate, we leverage + +![](images/9b8c982f7483297e13d7b608462e4d57d786c39f3cc17a94fefbd4d06d0d9e33.jpg) +Figure 2. The overview of our proposed single-frame model. Our method reconstructs realistic hand and object meshes from a single RGB image. Marching Cubes algorithm [33] is used at test time to extract meshes. + +a transformer to accumulate corresponding image features from multiple frames and benefit the geometry recovery. + +# 3. Method + +This section presents our geometry-driven SDF (gSDF) method for 3D hand and object reconstruction from monocular RGB images. We aim to learn two signed distance functions $\mathrm{SDF}_{hand}$ and $\mathrm{SDF}_{obj}$ to implicitly represent 3D shapes for the hand and the object. The $\mathrm{SDF}_{hand}$ and $\mathrm{SDF}_{obj}$ map a query 3D point $x\in \mathbb{R}^3$ to a signed distance from the hand surface and object surface, respectively. The Marching Cubes algorithm [33] can thus be employed to reconstruct the hand and the object from $\mathrm{SDF}_{hand}$ and $\mathrm{SDF}_{obj}$ . + +# 3.1. Overview of gSDF + +Figure 2 illustrates the overview of our gSDF reconstruction approach. Given an image $I_{t}$ , we extract two types of features to predict the signed distance for each query point $x$ , namely kinematic features and visual features. + +The kinematic feature encodes the position of $x$ under the coordinate system of the hand or the object, which can provide strong pose priors to assist SDF learning. Since the feature is based on canonical hand and object poses, it helps to disentangles shape learning from pose learning. + +The existing work [63] proposes to use hand poses for reconstructing object meshes but does not consider using pose priors to reconstruct hand meshes. Another work [11] only deploys coarse geometry in terms of the hand wrist object locations, which fails to capture fine-grained details. In this work, we aim to strengthen the kinematic feature with geometry transformation of $x$ to poses of all the hand joints (see Figure 3) for both the hand and the object reconstruction. However, it is challenging to directly predict hand pose parameters [6,28,66]. To improve the hand pose estimation, + +we propose to first predict sparse 3D joint locations $j_{h}$ from the image and then use inverse kinematics to derive pose transformations $\theta_{h}$ from the predicted joints. In this way, we are able to obtain kinematic features $e_{h}$ and $e_{o}$ for the hand and the object respectively. + +The visual feature encodes the visual appearance for the point $x$ to provide more shape details. Prior works [11, 26] use the same global visual feature for all the points, e.g., averaging the feature map of a SDF feature encoder on the spatial dimension. Such global visual features suffers from imprecise geometry alignment between a point and its visual appearance. To alleviate the limitation, inspired by [47], we apply the geometry transformation to extract aligned local visual features. Moreover, to address hard cases with occlusions and motion blur in a single image $I_{t}$ , we propose to enhance the local visual feature with its temporal contexts from videos using a spatio-temporal transformer. We denote the local visual feature of a point as $e_v$ . Finally, we concatenate the kinematic feature and local visual feature to predict the signed distance for $x$ : + +$$ +\begin{array}{l} \begin{array}{l} \mathrm {S D F} _ {\text {h a n d}} (x) = f _ {h} ([ e _ {v}; e _ {h} ]), \\ \mathrm {S D F} _ {\text {e x p}} (x) = f _ {h} ([ e _ {v}; e _ {h} ]) \end{array} \tag {1} \\ \mathrm {S D F} _ {\text {o b j e c t}} (x) = f _ {o} ([ e _ {v}; e _ {o} ]), \\ \end{array} +$$ + +where $f_{h}$ and $f_{o}$ are the hand SDF decoder and the object SDF decoder respectively. + +In the following, we first present the proposed geometry-driven kinematic feature and visual feature encodings in Section 3.2 and 3.3 respectively. Then, in Section 3.4 we introduce different strategies of sharing image backbones for hand and object pose predictors as well as the SDF feature encoder. Finally, the training strategy of our model is described in Section 3.5. + +![](images/f34350c6deed27ad5477d84b749b1f608ddac3aa1cd56b24bf45eee708218c83.jpg) + +![](images/d3016ffe59ac56b52df47e56a812684a22855a7924bb78ac556142fa44c5473a.jpg) + +![](images/551a9977fb27793d12b2e9380a58e895ad757e56a21c4cf6256218d284242093.jpg) +Figure 3. We define hand and object features by transforming queries $x$ into hand- and object-centered coordinate systems. Compared to AlignSDF [11] (left), each hand joint in our method defines its own coordinate frame. + +![](images/2351cd87d20dfea867192f8de15c2fd463fbaa5e9e442690d712867b537726fb.jpg) + +# 3.2. Kinematic Feature Encoding + +Hand and object pose estimation. Directly regressing hand pose parameters of MANO from image features [11, 19, 21] has proved to be difficult [6, 28, 66]. In contrast, predicting sparse 3D joint locations is easier and can achieve higher accuracy. Therefore, we first train a 3D hand joint prediction model which produces volumetric heatmaps [38, 44] for 21 hand joints. We use a differentiable soft-argmax operator [50] to extract 3D coordinates $\psi_h \in \mathbb{R}^{21 \times 3}$ of hand joints from the heatmaps. We then obtain an analytic solution for hand poses $\theta_h \in \mathbb{R}^{16 \times 3}, \phi_h \in \mathbb{R}^{16 \times 3}$ from estimated 3D joints $\psi_h$ using inverse kinematics, where each $\theta_{h,i} \in \mathbb{R}^3$ and $\theta_{h,i} \in \mathbb{R}^3$ denote the relative pose of $i_{th}$ joint in terms of rotation and translation with respect to its ancestor joint. Here, we only calculate the rotation and use the default limb lengths provided by the MANO model. Specifically, we first compute the pose of the hand wrist using the template pose defined in MANO, and then follow the hand kinematic chain to solve the pose of other finger joints recursively. More details are presented in the supplementary material. + +For the object pose estimation, it is often difficult to accurately estimate the rotation of the object since many objects have a high degree of symmetry and are often occluded by hands. We therefore follow [11] and only estimate the center position of the object $\psi_{o} \in \mathbb{R}^{3}$ relative to the hand wrist. + +Hand kinematic feature. Given the 3D point $x$ , we generate the hand kinematic feature $e_h \in \mathbb{R}^{51}$ by transforming $x$ into canonical coordinate frames defined by hand joints. Figure 3(top,right) illustrates the proposed geometry transformation for the hand. For the $i_{th}$ hand joint pose $\theta_{h,i}, \phi_{h,i}$ , the pose transformation $T_p(x, \theta_{h,i}, \phi_{h,i})$ to obtain the local + +hand kinematic feature $e_{h,i}\in \mathbb{R}^3$ is defined as + +$$ +G _ {h, i} = \prod_ {j \in A (i)} \left[ \begin{array}{c c} \exp (\theta_ {h, j}) & \phi_ {h, j} \\ \hline 0 & 1 \end{array} \right], \tag {2} +$$ + +$$ +e _ {h, i} = T _ {p} (x, \theta_ {h, i}, \phi_ {h, i}) = \widetilde {H} (G _ {h, i} ^ {- 1} \cdot H (x)), +$$ + +where $A(i)$ denotes the ordered set of ancestors of the $i_{th}$ joint. We use Rodrigues formula $\exp (\cdot)$ to convert $\theta_{h,i}$ into the form of a rotation matrix. By traversing the hand kinematic chain, we obtain the global transformation $G_{h,i}\in \mathbb{R}^{4\times 4}$ for the $i_{th}$ joint. Then, we take the inverse of $G_{h,i}$ to transform $x$ into the $i_{th}$ hand joint canonical coordinates. $H(\cdot)$ transforms $x$ into homogeneous coordinates while $\widetilde{H} (\cdot)$ transforms homogeneous coordinates back to Euclidean coordinates. Given local kinematic features $e_{h,i}$ , the hand kinematic feature $e_h\in \mathbb{R}^{51}$ is defined as: + +$$ +e _ {h} = \left[ x, e _ {h, 1}, \dots , e _ {h, 1 6} \right]. \tag {3} +$$ + +Object kinematic feature. To obtain geometry-aware SDF for object reconstruction, we propose object kinematic feature $e_{o} \in \mathbb{R}^{72}$ . Following [11], we use estimated object center $\psi_{o}$ to transform $x$ into the object canonical coordinate frame by the translation transformation $x_{oc} = T_t(x, \psi_o) = x - \psi_o$ . As the grasping hand pose also gives hints about the shape of the manipulated object, similar to [63] we incorporate the knowledge of hand poses into object reconstruction. To this end, for each joint $i$ and its estimated 3D location $\psi_{h,i}$ , we transform $x$ by translation as + +$$ +e _ {o, i} = T _ {t} \left(x, \psi_ {h, i}\right) = x - \psi_ {j, i}. \tag {4} +$$ + +Given the importance of the wrist motion for object grasping, we also transform $x$ into the canonical coordinate system of the hand wrist $x_{ow} = T_p(x,\theta_{h,1},\phi_{h,1}) = \widetilde{H}(G_{h,1}^{-1}\cdot H(x))$ , which normalizes the orientation of the grasping and further simplifies the task for the SDF object decoder. The object kinematic feature is then defined by $e_o\in \mathbb{R}^{72}$ as + +$$ +e _ {o} = \left[ x, x _ {o c}, e _ {o, 1}, \dots , e _ {o, 2 1}, x _ {o w} \right]. \tag {5} +$$ + +Figure 3(bottom,right) illustrates the proposed geometry transformation for the object kinematic feature. + +# 3.3. Visual Feature Encoding + +Geometry-aligned visual feature. Previous works [11, 26] typically predict signed distances from global image features that lack spatial resolution. Motivated by [47], we aim to generate geometry-aligned local image features for each input point $x$ . Assume $v_{t}^{r} \in \mathbb{R}^{16 \times 16 \times d}$ is the feature map generated from the SDF feature encoder, e.g. a ResNet model [22], where $16 \times 16$ is the spatial feature resolution and $d$ is the feature dimension. We project the 3D input point $x$ to $\hat{x}$ + +![](images/e508e991fdf28c4839c4ac860e62390834ab9612d013c2de1f2f1294c9129aea.jpg) +(a) Single backbone. + +![](images/5f164d4994fa36e30dfd0fb25c76d4c17e360706b8c42017358c9dbf0291e986.jpg) +(b) Symmetric backbone. + +![](images/205075424b7e82596f85b701c053aeddd710494c31f70fcc7951701294bc47ab.jpg) +(c) Asymmetric backbone. +Figure 4. Illustrations of three image backbone sharing strategies. + +on the image plane with the camera projection matrix and use bilinear sampling to obtain a local feature $e_v$ from the location on the feature map corresponding to $\hat{x}$ . + +Temporally-enhanced visual feature. To improve the robustness of visual features in a single frame $I_{t}$ from occlusion or motion blur, we propose to exploit temporal information from videos to refine $v_{t}^{r}$ . Note that due to non-rigid hand motions, we do not assume video frames to contain different views of the same rigid scene. We make use of the spatial-temporal transformer architecture [1,4] to efficiently propagate image features across frames. Assume $v_{t-1}^{r}, \dots, v_{t+1}^{r}$ are the feature maps from neighboring frames of $I_{t}$ in a video. We flatten all the feature maps as a sequence in the spatial-temporal dimension leading to $3 \times 16 \times 16$ tokens fed into the transformer model. We reshape the output features of the transformer into a feature map again for $I_{t}$ , denoted as $v_{t} \in \mathbb{R}^{16 \times 16 \times d}$ . By aggregating spatial and temporal information from multiple frames, $v_{t}$ becomes more robust to the noise and can potentially produce more stable reconstruction results compared to $v_{t}^{r}$ . Our full gSDF model relies on the feature map $v_{t}$ to compute the local visual feature $e_{v}$ for the given input point $x$ . + +# 3.4. Image Backbone Sharing Strategy + +As shown in Figure 2, our model contains three branches for hand and object pose estimations as well as for SDF feature encoding. These different branches may share image backbones which might be beneficial with the multi-task learning. In this section, we describe three alternative strategies for sharing image backbones in our model. + +Single image backbone (Figure 4a). We only employ one + +single image backbone for both pose and shape predictions. This is the strategy used in AlignSDF [11]. + +Symmetric image backbone (Figure 4b). To disentangle pose and shape learning, we share the image backbone for hand and object pose estimation, but use a different backbone to extract visual features for SDFs learning. + +Asymmetric image backbone (Figure 4c). Since hand pose estimation plays a critical role in the task, we use a separate backbone to predict the hand pose, while share the image backbone for object pose predictor and SDF feature encoder. + +# 3.5. Training + +We apply a two-stage training strategy. In the first stage, we train the hand pose predictor to predict hand joint coordinates $\psi_h$ with $\ell 2$ loss $\mathcal{L}_{hp}$ and an ordinal loss [43] $\mathcal{L}_{ord}$ to penalize the case if the predicted depth order between the $i_{th}$ joint and the $j_{th}$ joint is misaligned with the ground-truth relation $\mathbb{1}_{i,j}^{ord}$ , which are: + +$$ +\mathcal {L} _ {h p} = \frac {1}{2 1} \sum_ {i = 1} ^ {2 1} \left\| \psi_ {h, i} - \hat {\psi} _ {h, i} \right\| _ {2} ^ {2}, \tag {6} +$$ + +$$ +\mathcal {L} _ {o r d} = \sum_ {j = 2} ^ {2 1} \sum_ {i = 1} ^ {j - 1} \mathbb {1} _ {i, j} ^ {o r d} \times \left| \left(\psi_ {h, i} - \psi_ {h, j}\right) \cdot \vec {n} \right|, \tag {7} +$$ + +where $\vec{n} \in \mathbb{R}^3$ denotes the viewpoint direction. We randomly sample twenty virtual views to optimize $\mathcal{L}_{ord}$ . Since the proposed kinematic features are based on the predicted hand joints $\psi_h$ , we empirically find that pretraining the hand joint predictor in the first stage and then freezing its weights can achieve better performance. + +In the second training stage, we learn all the modules except the hand joint predictor in an end-to-end manner. We use the $\ell 2$ loss $\mathcal{L}_{op}$ to predict the object pose $\psi_{o}$ as follows: + +$$ +\mathcal {L} _ {o p} = \left\| \psi_ {o} - \hat {\psi} _ {o} \right\| _ {2} ^ {2} \tag {8} +$$ + +where $\hat{\psi}_o$ denote the ground-truth location for the object center. To train the SDFs, we sample many 3D points around the hand-object surface and calculate their ground-truth signed distances to the hand mesh and the object mesh. We use $\ell 1$ loss to optimize the SDF decoders: + +$$ +\mathcal {L} _ {h s d f} = \left\| \mathrm {S D F} _ {h a n d} - \mathrm {S D F} _ {h a n d} \right\| _ {1} ^ {1}, \tag {9} +$$ + +$$ +\mathcal {L} _ {o s d f} = \left\| \mathrm {S D F} _ {o b j} - \hat {\mathrm {S D F}} _ {o b j} \right\| _ {1} ^ {1}, +$$ + +where $\hat{\mathrm{SDF}}_{hand}$ and $\hat{\mathrm{SDF}}_{obj}$ denote ground-truth signed distances to the hand and the object, respectively. The overall training objective $\mathcal{L}_{shape}$ in the second training stage is: + +$$ +\mathcal {L} _ {\text {s h a p e}} = \mathcal {L} _ {\text {o p}} + 0. 5 \times \mathcal {L} _ {\text {h s d f}} + 0. 5 \times \mathcal {L} _ {\text {o s d f}}. \tag {10} +$$ + +# 4. Experiments + +We conduct extensive experiments on two 3D hand-object reconstruction benchmarks to evaluate the effectiveness of our proposed gSDF model. + +# 4.1. Datasets + +ObMan [21] is a large-scale synthetic dataset that contains diverse hand grasping poses on a wide range of objects imported from ShapeNet [8]. We follow previous methods [11,26,42,63] to generate data for SDFs training. First, we remove meshes that contain too many double-sided triangles, which results in 87,190 hand-object meshes. Then, we fit the hand-object mesh into a unit cube and sample 40,000 points inside the cube. For each sampled point, we compute its signed distance to the ground-truth hand mesh and object mesh, respectively. At test time, we report the performance on the whole ObMan test set of 6,285 testing samples. + +DexYCB [9] is currently the largest real dataset that captures hand and object interactions in videos. Following [11,60], we focus on right-hand samples and use the official s0 split. We follow the same steps as in ObMan to obtain SDF training samples. To reduce the temporal redundancy, we downsample the video data to 6 frames per second, which results in 29,656 training samples and 5,928 testing samples. + +# 4.2. Evaluation metrics + +We follow prior works to comprehensively evaluate the 3D reconstructions with multiple metrics as below. + +Hand Chamfer Distance $(\mathrm{CD_h})$ . We evaluate Chamfer distance $(\mathrm{cm}^2)$ between our reconstructed hand mesh and the ground-truth hand mesh. We follow previous works [11, 26] to optimize the scale and translation to align the reconstructed mesh with the ground truth and sample 30,000 points on both meshes to compute Chamfer distance. We report the median Chamfer distance on the test set to reflect the quality of our reconstructed hand mesh. + +Hand F-score $(\mathbf{FS_h})$ . Since Chamfer distance is vulnerable to outliers [52, 63], we also report the F-score to evaluate the predicted hand mesh. After aligning the hand mesh with its ground truth, we report F-score at $1\mathrm{mm}$ $(\mathrm{FS_h}@\mathrm{l})$ and $5\mathrm{mm}$ $(\mathrm{FS_h}@\mathrm{5})$ thresholds. + +Object Chamfer Distance $(\mathrm{CD_o})$ . Following [11, 26], we first use the optimized hand scale and translation to transform the reconstructed object mesh. Then, we follow the same process as $\mathrm{CD_h}$ to compute $\mathrm{CD_o}$ $(\mathrm{cm}^2)$ and evaluate the quality of our reconstructed object mesh. + +Object F-score $(\mathrm{FS_o})$ .We follow the previous work [63] to evaluate the reconstructed object mesh using F-score at 5 mm $\mathrm{(FS_o@5)}$ and $10\mathrm{mm}$ $\mathrm{(FS_o@10)}$ thresholds. + +Hand Joint Error $(\mathbf{E_h})$ . To measure the hand pose estimation accuracy, we compute the mean joint error (cm) relative to the hand wrist over all 21 joints in the form of $\ell 2$ distance. + +Table 1. Hand reconstruction performance with different hand kinematic features $\mathbf{K}_{*}^{h}$ and visual feature $\mathrm{V}_1$ on DexYCB dataset. + +
Wrist onlyAll jointsCDh↓FSh@1↑FSh@5↑
K1h××0.3640.1540.764
K2h×0.3440.1670.776
K3h×0.3170.1710.788
+ +Table 2. Object reconstruction performance with different object kinematic features ${\mathrm{K}}_{ * }^{o}$ and visual feature ${\mathrm{V}}_{1}$ on DexYCB dataset. + +
Obj poseHand poseCDo↓FSo@5↑FSo@10↑
K1o××2.060.3920.660
K2o×1.930.3960.668
K3o1.710.4180.689
+ +Object Center Error $(\mathbf{E_o})$ . To evaluate the accuracy of our predicted object translation, we report the $\ell 2$ distance (cm) between the prediction and its ground truth. + +Additionally, we report Contact ratio $(\mathrm{C}_r)$ , Penetration depth $(\mathrm{P}_d)$ and Intersection volume $(\mathrm{I}_v)$ [11,21,26,60,62] to present more details about the interaction between the hand mesh and the object mesh. Please see supplementary material for more details. + +# 4.3. Implementation details + +Model architecture. We use ResNet-18 [22] as our image backbone. For hand and object pose estimation, we adopt volumetric heatmaps of spatial resolution $64 \times 64 \times 64$ to localize hand joints and the object center in 3D space. For the spatial-temporal transformer, we use 16 transformer layers with 4 attention heads. We present more details about our model architecture in supplementary material. + +Training details. We take the image crop of the hand-object region according to their bounding boxes for DexYCB benchmark. Then, we modify camera intrinsic and extrinsic parameters [35,64] accordingly and take the cropped image as the input to our model. The spatial size of input images is $256 \times 256$ for all our models. We perform data augmentation including rotation $\left[\left[-45^{\circ}, 45^{\circ}\right]\right)$ and color jittering. During SDF training, we randomly sample 1000 points (500 points inside the mesh and 500 points outside the mesh) for the hand and the object, respectively. We train our model with a batch size of 256 for 1600 epochs on both ObMan and DexYCB using the Adam optimizer [27] with 4 NVIDIA RTX 3090 GPUs. We use an initial learning rate of $1 \times 10^{-4}$ and decay it by half every 600 epochs. It takes 22 hours for training on DexYCB and 60 hours on ObMan dataset. + +# 4.4. Ablation studies + +We carry out ablations on the DexYCB dataset to validate different components in our gSDF model. We evaluate different settings of hand kinematic features $(\mathbf{K}_*^h$ in Table 1), object kinematic features $(\mathbf{K}_*^o$ in Table 2), and visual features + +Table 3. Hand-object reconstruction performance with different visual features on DexYCB dataset. The visual features are combined with the best kinematic features ${\mathrm{K}}_{3}^{h}$ (Table 1) and ${\mathrm{K}}_{3}^{o}$ (Table 2) to reconstruct hand and object respectively. + +
GlobalLocalTransformerCDh↓FSh@1↑FSh@5↑CDo↓FSo@5↑FSo@10↑Eh↓Eo↓
SpatialTemp.
V1×××0.3170.1710.7881.710.4180.6891.441.91
V2×××0.3100.1720.7951.710.4260.6941.441.98
V3××0.3040.1740.7971.600.4340.7031.441.94
V4×0.3020.1770.8011.550.4370.7091.441.96
+ +Table 4. Hand-object reconstruction performance using different image backbone sharing strategies on DexYCB dataset. The ablation is carried out with visual features ${\mathrm{V}}_{1}$ and kinematic features ${\mathrm{K}}_{3}^{h}$ and ${\mathrm{K}}_{3}^{o}$ . + +
BackboneCDh↓FSh@1↑FSh@5↑CDo↓FSo@5↑FSo@10↑Eh↓Eo↓
Single0.4110.1480.7411.880.4020.6741.721.83
Symmetric0.3240.1680.7791.840.4050.6721.461.93
Asymmetric0.3170.1710.7881.710.4180.6891.441.91
+ +$(\mathrm{V}_{*}$ in Table 3). We use the asymmetric image backbone if not otherwise mentioned. + +Hand kinematic feature. In Table 1, we evaluate the contribution of the proposed hand kinematic features for 3D hand reconstruction. The model in $\mathrm{K}_1^h$ does not use any pose priors to transform the 3D point. The model in $\mathrm{K}_2^h$ only uses the hand wrist pose to transform the 3D point as AlignSDF [11]. Our model in $\mathrm{K}_3^h$ computes the transformations to all the hand joints, which achieves the best performance on all the evaluation metrics. Compared to $\mathrm{K}_1^h$ without any pose priors, our model achieves more than $12\%$ and $9\%$ improvement on $\mathrm{CD_h}$ and $\mathrm{FS_h}@\mathbb{1}$ , respectively. Compared to $\mathrm{K}_2^h$ with only hand wrist, our model greatly reduces the hand Chamfer distance from $0.344~\mathrm{cm}^2$ to $0.317~\mathrm{cm}^2$ , leading to $7.8\%$ relative gains. These results demonstrate the significance of pose priors and the advantage of gSDF for 3D hand reconstruction. + +Object kinematic feature. In Table 2, we validate the effectiveness of our proposed object kinematic feature. The model in $\mathrm{K}_1^o$ does not contain any pose priors, while the model in $\mathrm{K}_2^o$ aligns query points to the object center as in [11]. Our model in $\mathrm{K}_3^o$ further employs the hand pose to produce the object kinematic feature, which significantly boosts the performance for the object reconstruction on different metrics. Compared to $\mathrm{K}_2^o$ , our proposed object kinematic feature achieves more than $11\%$ and $5.5\%$ improvement on $\mathrm{CD_o}$ and $\mathrm{FS_o}@\mathsf{5}$ , respectively. + +Visual features. We compare different visual features for SDF prediction in Table 3. $\mathrm{V}_{1}$ uses the global visual feature e.g. the average pooling of ResNet feature map as in previous works [11,26]. Our local visual features $\mathrm{V}_{2}$ derived from the geometry alignment with the query point reduces the hand Chamfer distance from $0.317~\mathrm{cm}^2$ to $0.310~\mathrm{cm}^2$ . However, it shows less improvement on the object shape accuracy. In $\mathrm{V}_{3}$ and $\mathrm{V}_{4}$ , we use the transformer model to refine the feature maps. To ablate the improvement from the transformer architecture and from the temporal information + +![](images/a2fc2087c8926407024aceddb57fdb907fca43dbfaaaaa85e1f9f3ce0af2e779.jpg) +Input Images + +![](images/406f8b2bb7be3f36f49ae72e85ff726200b4de097e075921745b6128b33911ee.jpg) +Our single-frame model + +![](images/26e3356a3edb06d32680c7a5458a24dd7cf4a5f55113205c55775f19b3067664.jpg) +Our video model + +![](images/28775e7b0390b3fd2da60253534f1ed3dae6e3f88f9c8fbcaa8a1330a7df6733.jpg) +Figure 5. The qualitative comparison between our single-frame model built with the transformer and our video model. + +![](images/e5dd54af45fb9666196be863cfdc724c7150dc7069e43683a2f8a6f6d723b5a9.jpg) + +![](images/41160320a085061c1f3ae14b3c5bfc76c6c84c9fd77cc608cffb6e5fb116b782.jpg) + +in videos, we only use transformer for each single frame in $\mathrm{V}_3$ while use it for multiple frames in $\mathrm{V}_4$ . We can see that the transformer architecture alone is beneficial for the reconstruction. Enhancing the visual features with temporal contexts further improves the performance in terms of all the evaluation metrics especially for the objects. In Figure 5, compared with our single-frame model built with the transformer, our video model can make more robust predictions under some hard cases (e.g., motion blur). Although the reconstruction of the can is not accurate in the first example, our model tends to produce more regular shapes. + +Image backbone sharing strategy. Results of using different strategies for image backbone sharing are presented in Table 4. We train all the three models using the two-stage strategy described in Section 3.5. The model with one single backbone achieves the worst performance under most of the evaluation metrics. This is because the pose learning and shape learning compete with each other during training. The symmetric strategy to separate backbones for pose and SDFs performs better than the single backbone model. Our asymmetric strategy with a separate backbone for hand pose estimation and a shared backbone for object pose and SDF feature encoder achieves the best performance. We also em + +Table 5. Comparison with state-of-the-art methods on the image ObMan dataset. + +
MethodsCDh↓FSh@1↑FSh@5↑CDo↓FSo@5↑FSo@10↑Eh↓Eo↓
Hasson et al. [21]0.4150.1380.7513.600.3590.5901.13-
Karunratanakul et al. [26]0.261--6.80----
Ye et al. [63]----0.4200.630--
Chen et al. [11]0.1360.3020.9133.380.4040.6361.273.29
gSDF (Ours)0.1120.3320.9353.140.4380.6600.933.43
+ +Table 6. Comparison with state-of-the-art methods on the video Dex YCB dataset. + +
MethodsCDh↓FSh@1↑FSh@5↑CDo↓FSo@5↑FSo@10↑Eh↓Eo↓
Hasson et al. [21]0.5370.1150.6471.940.3830.6421.67-
Karunratanakul et al. [26]0.3640.1540.7642.060.3920.660--
Chen et al. [11]0.3580.1620.7671.830.4100.6791.581.78
Chen et al. [11] 1†0.3440.1670.7761.810.4130.6871.571.93
gSDF (Ours)0.3020.1770.8011.550.4370.7091.441.96
+ +![](images/39ed9750feff3fbe2547e84316f8642767f8b8ec8ed882144508820c4b5274f1.jpg) + +![](images/205b452a5802100b9a01591043a110f94a42796fa140c94ead9ad5f905272765.jpg) + +![](images/49fd956fdc8cf147437a8ea4bb3365039671495734ec7a8acc49948ce00567d7.jpg) + +![](images/4356e5359f399a9406dafdcca70a9c95aeb4cfecc956d0725fc62934972df905.jpg) + +![](images/1eb8bb9f7720efd45bf01a89fe7988b0bb76d992220e316a637f66312945b4a4.jpg) +Figure 6. Qualitative results of our model on test images from the ObMan and DexYCB benchmarks. Our model produces convincing results for different grasping poses and diverse objects. + +![](images/2960a5e8e974a67f4ef83809c2bcc825b37473220cb57827905ed333df3de6c5.jpg) + +![](images/13903a0ca9925e1c8d065ca5669db7edc88b0a5c6a8633ea2861e24b978089c6.jpg) + +![](images/fb079a5f525650ce642ab4943ca68a1f9da21fb77bbeea60f190c6504edb56fb.jpg) + +![](images/1bc77af7e87f4b7f2fc219f484870514e2eca4acdfd27cfaba7935f56455eb5c.jpg) + +![](images/cd8232bcd997e1e35719325604ebd9ef49b1e440174c1123c8eababd70c9ede1.jpg) + +![](images/455aa6d4cb069cbda3bdfd07ae3af3788347a47a97e7b6bf3b1c722d96e09edf.jpg) + +![](images/55549b002e014a326591bd0db01eecf830dcbc552d7b6e759e22d9acb42f3a10.jpg) + +![](images/e71cbc42877831d036ebc5f2191c6f3f0ee3353ef108860ec09872e723f9edd3.jpg) + +![](images/1477bc6049d11ebb049d1f9cb5e0b3fbb848177ddeb296f18a0cec1d8ea3faf2.jpg) + +![](images/8aa7c02088f82d10f587d942b76aaec39801e04d3d08362d7948bb43920f281c.jpg) + +![](images/7e0c697f296ef13edc1a09cd92d627cf1d4c37d2d96708553ee12c6fabfe7cfc.jpg) + +![](images/340174b095ecda11c76d010375a3400d3fd57a086421c8a6e4c409f6b4cfa3f4.jpg) + +![](images/6fa8633c18178a6fecbe50d2643461c1fd579a32b1b6f18b46979035e5974956.jpg) + +![](images/9bac27d76484944de1854e78079c3b4a5d0904b61ba8481043de1194ff3bb557.jpg) + +![](images/db353de4603cc3652877d9be443d09e004a74e6bb8c66d3ed4fbbd08d6baec8f.jpg) + +pirically find that learning the object pose and SDFs together improves both the pose accuracy and the shape accuracy. The possible reason is that estimating object pose also helps our model to focus more on hand-object regions and boosts the 3D reconstruction accuracy. + +# 4.5. Comparison with state of the art + +We compare our gSDF model with state-of-the-art methods on ObMan and DexYCB benchmarks. In Figure 6, we qualitatively demonstrate that our approach can produce convincing 3D hand-object reconstruction results. + +ObMan. Table 5 shows the comparison of hand and object reconstruction results on the synthetic ObMan dataset. Since ObMan does not contain video data, we do not use the spatial-temporal transformer in this model. The proposed gSDF outperforms previous methods by a significant margin. Compared with the recent method [63] that only reconstructs hand-held objects, our joint method produces more accurate object meshes. gSDF achieves a $17.6\%$ improvement on $\mathrm{CD_h}$ and a $7.1\%$ improvement on $\mathrm{CD_o}$ over the state-of-the-art accuracy, which indicates that our model can better reconstruct both hand meshes and diverse object meshes. + +DexYCB. Table 6 presents results on the DexYCB benchmark. We also show the performance of AlignSDF [11] with two backbones ([11]-2BB). Our model demonstrates a large improvement over recent methods. In particular, it advances the state-of-the-art accuracy on $\mathrm{CD_h}$ and $\mathrm{CD_o}$ by $12.2\%$ and $14.4\%$ , respectively. The high accuracy of gSDF on DexYCB demonstrates that it generalizes well to real images. + +# 5. Conclusion + +In this work, we propose a geometry-driven SDF (gSDF) approach for 3D hand and object reconstruction. We explicitly model the underlying 3D geometry to guide the SDF learning. We first estimate poses of hands and objects according to kinematic chains of pose transformations, and then derive kinematic features and local visual features using the geometry information for signed distance prediction. Extensive experiments on ObMan and DexYCB datasets demonstrate the effectiveness of our proposed method. + +Acknowledgements. This work was granted access to the HPC resources of IDRIS under the allocation AD011013147 made by GENCI. This work was funded in part by the French government under management of Agence Nationale de la Recherche as part of the "Investissements d'avenir" program, reference ANR19-P3IA-0001 (PRAIRIE 3IA Institute) and by Louis Vuitton ENS Chair on Artificial Intelligence. We thank Yana Hasson for helpful discussions. + +# References + +[1] Anurag Arnab, Mostafa Dehghani, Georg Heigold, Chen Sun, Mario Lucic, and Cordelia Schmid. ViViT: A video vision transformer. In ICCV, 2021. 5 +[2] Seungryul Baek, Kwang In Kim, and Tae-Kyun Kim. Pushing the envelope for RGB-based dense 3D hand pose estimation via neural rendering. In CVPR, 2019. 2 +[3] Luca Ballan, Aparna Taneja, Jürgen Gall, Luc Van Gool, and Marc Pollefeys. Motion capture of hands in action using discriminative salient points. In ECCV, 2012. 2 +[4] Gedas Bertasius, Heng Wang, and Lorenzo Torresani. Is space-time attention all you need for video understanding? In ICML, 2021. 5 +[5] Adnane Boukhayma, Rodrigo de Bem, and Philip HS Torr. 3D hand shape and pose from images in the wild. In CVPR, 2019. 2 +[6] Romain Brégier. Deep regression on manifolds: a 3D rotation case study. In 3DV, 2021. 3, 4 +[7] Zhe Cao, Ilija Radosavovic, Angjoo Kanazawa, and Jitendra Malik. Reconstructing hand-object interactions in the wild. In ICCV, 2021. 1, 2 +[8] Angel X Chang, Thomas Funkhouser, Leonidas Guibas, Pat Hanrahan, Qixing Huang, Zimo Li, Silvio Savarese, Manolis Savva, Shuran Song, Hao Su, et al. ShapeNet: An information-rich 3D model repository. arXiv preprint arXiv:1512.03012, 2015. 6 +[9] Yu-Wei Chao, Wei Yang, Yu Xiang, Pavlo Molchanov, Ankur Handa, Jonathan Tremblay, Yashraj S Narang, Karl Van Wyk, Umar Iqbal, Stan Birchfield, et al. DexYCB: A benchmark for capturing hand grasping of objects. In CVPR, 2021. 1, 2, 6 +[10] Xingyu Chen, Yufeng Liu, Chongyang Ma, Jianlong Chang, Huayan Wang, Tian Chen, Xiaoyan Guo, Pengfei Wan, and Wen Zheng. Camera-space hand mesh recovery via semantic aggregation and adaptive 2D-1D registration. In CVPR, 2021. 2 +[11] Zerui Chen, Yana Hasson, Cordelia Schmid, and Ivan Laptev. AlignSDF: Pose-Aligned signed distance fields for handobject reconstruction. In ECCV, 2022. 1, 2, 3, 4, 5, 6, 7, 8 +[12] Zhiqin Chen and Hao Zhang. Learning implicit fields for generative shape modeling. In CVPR, 2019. 2 +[13] Enric Corona, Tomas Hodan, Minh Vo, Francesc Moreno-Noguer, Chris Sweeney, Richard Newcombe, and Lingni Ma. LISA: Learning implicit shape and appearance of hands. In CVPR, 2022. 2 +[14] Thibault Groueix, Matthew Fisher, Vladimir G Kim, Bryan C Russell, and Mathieu Aubry. A papier-mâché approach to learning 3D surface generation. In CVPR, 2018. 2 +[15] Henning Hamer, Juergen Gall, Thibaut Weise, and Luc Van Gool. An object-dependent hand pose prior from sparse training data. In CVPR, 2010. 2 +[16] Henning Hamer, Konrad Schindler, Esther Koller-Meier, and Luc Van Gool. Tracking a hand manipulating an object. In ICCV, 2009. 2 + +[17] Shreyas Hampali, Mahdi Rad, Markus Oberweger, and Vincent Lepetit. HOnnotate: A method for 3D annotation of hand and object poses. In CVPR, 2020. 1, 2 +[18] Shreyas Hampali, Sayan Deb Sarkar, Mahdi Rad, and Vincent Lepetit. Keypoint Transformer: Solving joint identification in challenging hands and object interactions for accurate 3D pose estimation. In CVPR, 2022. 1, 2 +[19] Yana Hasson, Bugra Tekin, Federica Bogo, Ivan Laptev, Marc Pollefeys, and Cordelia Schmid. Leveraging photometric consistency over time for sparsely supervised hand-object reconstruction. In CVPR, 2020. 1, 2, 4 +[20] Yana Hasson, Gül Varol, Cordelia Schmid, and Ivan Laptev. Towards unconstrained joint hand-object reconstruction from RGB videos. In 3DV, 2021. 2 +[21] Yana Hasson, Gul Varol, Dimitrios Tzionas, Igor Kalevatykh, Michael J Black, Ivan Laptev, and Cordelia Schmid. Learning joint reconstruction of hands and manipulated objects. In CVPR, 2019. 1, 2, 4, 6, 8 +[22] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In CVPR, 2016. 4, 6 +[23] Tony Heap and David Hogg. Towards 3D hand tracking using a deformable model. In FG, 1996. 2 +[24] Umar Iqbal, Pavlo Molchanov, Thomas Breuel Juergen Gall, and Jan Kautz. Hand pose estimation via latent 2.5D heatmap regression. In ECCV, 2018. 1, 2 +[25] Korrawe Karunratanakul, Adrian Spurr, Zicong Fan, Otmar Hilliges, and Siyu Tang. A skeleton-driven neural occupancy representation for articulated hands. In 3DV, 2021. 2 +[26] Korrawe Karunratanakul, Jinlong Yang, Yan Zhang, Michael J Black, Krikamol Muandet, and Siyu Tang. Grasping Field: Learning implicit representations for human grasps. In 3DV, 2020. 1, 2, 3, 4, 6, 7, 8 +[27] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. 6 +[28] Nikos Kolotouros, Georgios Pavlakos, Michael J Black, and Kostas Daniilidis. Learning to reconstruct 3D human pose and shape via model-fitting in the loop. In ICCV, 2019. 3, 4 +[29] Dominik Kulon, Riza Alp Güler, I. Kokkinos, M. Bronstein, and S. Zafeiriou. Weakly-supervised mesh-convolutional hand reconstruction in the wild. In CVPR, 2020. 2 +[30] Dominik Kulon, Haoyang Wang, Riza Alp Güler, Michael M. Bronstein, and Stefanos Zafeiriou. Single image 3D hand reconstruction with mesh convolutions. In BMVC, 2019. 2 +[31] Vincent Lepetit. Recent advances in 3D object and hand pose estimation. arXiv preprint arXiv:2006.05927, 2020. 2 +[32] Mengcheng Li, Liang An, Hongwen Zhang, Lianpeng Wu, Feng Chen, Tao Yu, and Yebin Liu. Interacting attention graph for single image two-hand reconstruction. In CVPR, 2022. 2 +[33] William E Lorensen and Harvey E Cline. Marching Cubes: A high resolution 3D surface construction algorithm. TOG, 1987. 3 +[34] Jun Lv, Wenqiang Xu, Lixin Yang, Sucheng Qian, Chongzhao Mao, and Cewu Lu. HandTailor: Towards high-precision monocular 3D hand recovery. In BMVC, 2021. 2 + +[35] Dushyant Mehta, Helge Rhodin, Dan Casas, Pascal Fua, Oleksandr Sotnychenko, Weipeng Xu, and Christian Theobalt. Monocular 3D human pose estimation in the wild using improved CNN supervision. In 3DV, 2017. 6 +[36] Hao Meng, Sheng Jin, Wentao Liu, Chen Qian, Mengxiang Lin, Wanli Ouyang, and Ping Luo. 3D interacting hand pose estimation by hand de-occlusion and removal. In ECCV, 2022. 2 +[37] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy Networks: Learning 3D reconstruction in function space. In CVPR, 2019. 2 +[38] Gyeongsik Moon, Ju Yong Chang, and Kyoung Mu Lee. V2V- PoseNet: Voxel-to-voxel prediction network for accurate 3D hand and human pose estimation from a single depth map. In CVPR, 2018. 1, 2, 4 +[39] Franziska Mueller, Florian Bernard, Oleksandr Sotnychenko, Dushyant Mehta, Srinath Sridhar, Dan Casas, and Christian Theobalt. Ganerated hands for real-time 3D hand tracking from monocular RGB. In CVPR, 2018. 2 +[40] Franziska Mueller, Micah Davis, Florian Bernard, Oleksandr Sotnychenko, Micekal Verschooor, Miguel A Otaduy, Dan Casas, and Christian Theobalt. Real-time pose and shape reconstruction of two interacting hands with a single depth camera. TOG, 2019. 2 +[41] Iason Oikonomidis, Nikolaos Kyriazis, and Antonis A Argyros. Full DOF tracking of a hand interacting with an object by modeling occlusions and physical constraints. In ICCV, 2011. 2 +[42] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. DeepSDF: Learning continuous signed distance functions for shape representation. In CVPR, 2019. 2, 6 +[43] Georgios Pavlakos, Xiaowei Zhou, and Kostas Daniilidis. Ordinal depth supervision for 3D human pose estimation. In CVPR, 2018. 5 +[44] Georgios Pavlakos, Xiaowei Zhou, Konstantinos G Derpanis, and Kostas Daniilidis. Coarse-to-fine volumetric prediction for single-image 3D human pose. In CVPR, 2017. 4 +[45] James M Rehg and Takeo Kanade. Visual tracking of high DOF articulated structures: an application to human hand tracking. In ECCV, 1994. 2 +[46] Javier Romero, Dimitrios Tzionas, and Michael J. Black. Embodied Hands: Modeling and capturing hands and bodies together. TOG, 2017. 1, 2 +[47] Shunsuke Saito, Zeng Huang, Ryota Natsume, Shigeo Morishima, Angjoo Kanazawa, and Hao Li. PiFu: Pixel-aligned implicit function for high-resolution clothed human digitization. In ICCV, 2019. 3, 4 +[48] Adrian Spurr, Aneesh Dahiya, Xi Wang, Xuong Zhang, and Otmar Hilliges. Self-supervised 3D hand pose estimation from monocular RGB via contrastive learning. In ICCV, 2021. 2 +[49] Srinath Sridhar, Franziska Mueller, Michael Zollhöfer, Dan Casas, Antti Oulasvirta, and Christian Theobalt. Real-time joint tracking of a hand manipulating an object from RGB-D input. In ECCV, 2016. 2 + +[50] Xiao Sun, Bin Xiao, Fangyin Wei, Shuang Liang, and Yichen Wei. Integral human pose regression. In ECCV, 2018. 2, 4 +[51] Danhang Tang, Hyung Jin Chang, Alykhan Tejani, and Tae Kyun Kim. Latent regression forest: Structured estimation of 3D articulated hand posture. In CVPR, 2014. 1, 2 +[52] Maxim Tatarchenko, Stephan R Richter, René Ranftl, Zhuwen Li, Vladlen Koltun, and Thomas Brox. What do single-view 3D reconstruction networks learn? In CVPR, 2019. 6 +[53] Bugra Tekin, Federica Bogo, and Marc Pollefeys. $\mathrm{H} + \mathrm{O}$ : Unified egocentric recognition of 3D hand-object poses and interactions. In CVPR, 2019. 1, 2 +[54] Tze Ho Elden Tse, Kwang In Kim, Ales Leonardis, and Hyung Jin Chang. Collaborative learning for hand and object reconstruction with attention-guided graph convolution. In CVPR, 2022. 1, 2 +[55] Aggeliki Tsoli and Antonis A Argyros. Joint 3D tracking of a deformable object in interaction with a hand. In ECCV, 2018. 2 +[56] Dimitrios Tzionas and Juergen Gall. 3D object reconstruction from hand-object interactions. In ICCV, 2015. 2 +[57] Jiayi Wang, Franziska Mueller, Florian Bernard, Suzanne Sorli, Oleksandr Sotnychenko, Neng Qian, Miguel A Otaduy, Dan Casas, and Christian Theobalt. RGB2Hands: Real-time tracking of 3D hand interactions from monocular RGB video. TOG, 2020. 2 +[58] Yangang Wang, Jianyuan Min, Jianjie Zhang, Yebin Liu, Feng Xu, Qionghai Dai, and Jinxiang Chai. Video-based hand manipulation capture through composite motion control. TOG, 2013. 2 +[59] Fu Xiong, Boshen Zhang, Yang Xiao, Zhiguo Cao, Taidong Yu, Joey Tianyi Zhou, and Junsong Yuan. A2J: Anchor-to-joint regression network for 3D articulated pose estimation from a single depth image. In ICCV, 2019. 2 +[60] Lixin Yang, Kailin Li, Xinyu Zhan, Jun Lv, Wenqiang Xu, Jiefeng Li, and Cewu Lu. ArtiBoost: Boosting articulated 3D hand-object pose estimation via online exploration and synthesis. In CVPR, 2022. 2, 6 +[61] Lixin Yang, Kailin Li, Xinyu Zhan, Fei Wu, Anran Xu, Liu Liu, and Cewu Lu. OakInk: A large-scale knowledge repository for understanding hand-object interaction. In CVPR, 2022. 1, 2 +[62] Lixin Yang, Xinyu Zhan, Kailin Li, Wenqiang Xu, Jiefeng Li, and Cewu Lu. CPF: Learning a contact potential field to model the hand-object interaction. In ICCV, 2021. 1, 2, 6 +[63] Yufei Ye, Abhinav Gupta, and Shubham Tulsiani. What's in your hands? 3D reconstruction of generic objects in hands. In CVPR, 2022. 2, 3, 4, 6, 8 +[64] Frank Yu, Mathieu Salzmann, Pascal Fua, and Helge Rhodin. PCLs: Geometry-aware neural reconstruction of 3D pose with perspective crop layers. In CVPR, 2021. 6 +[65] Shanxin Yuan, Guillermo Garcia-Hernando, Björn Stenger, Gyeongsik Moon, Ju Yong Chang, Kyoung Mu Lee, Pavlo Molchanov, Jan Kautz, Sina Honari, Liuhao Ge, Junsong Yuan, Xinghao Chen, Guijin Wang, Fan Yang, Kai Akiyama, Yang Wu, Qingfu Wan, Meysam Madadi, Sergio Escalera, Shile Li, Dongheui Lee, Iason Oikonomidis, Antonis Argyros, and Tae-Kyun Kim. Depth-based 3D hand pose estimation: + +From current achievements to future goals. In CVPR, June 2018. 2 +[66] Yi Zhou, Connelly Barnes, Jingwan Lu, Jimei Yang, and Hao Li. On the continuity of rotation representations in neural networks. In CVPR, 2019. 3, 4 +[67] Christian Zimmermann and Thomas Brox. Learning to estimate 3D hand pose from single RGB images. In ICCV, 2017. 1, 2 \ No newline at end of file diff --git a/2023/gSDF_ Geometry-Driven Signed Distance Functions for 3D Hand-Object Reconstruction/images.zip b/2023/gSDF_ Geometry-Driven Signed Distance Functions for 3D Hand-Object Reconstruction/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..90a346d8ceb587b6e2b209f5fbef640a6e9d2019 --- /dev/null +++ b/2023/gSDF_ Geometry-Driven Signed Distance Functions for 3D Hand-Object Reconstruction/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce23f18c90b83420698f9453cb684a588806aa4f3237c7ffa597206806598c6b +size 566655 diff --git a/2023/gSDF_ Geometry-Driven Signed Distance Functions for 3D Hand-Object Reconstruction/layout.json b/2023/gSDF_ Geometry-Driven Signed Distance Functions for 3D Hand-Object Reconstruction/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..7b0c196521721c98307caceb51bd8efa96d9a1cf --- /dev/null +++ b/2023/gSDF_ Geometry-Driven Signed Distance Functions for 3D Hand-Object Reconstruction/layout.json @@ -0,0 +1,13021 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 137, + 103, + 457, + 138 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 137, + 103, + 457, + 138 + ], + "spans": [ + { + "bbox": [ + 137, + 103, + 457, + 138 + ], + "type": "text", + "content": "gSDF: Geometry-Driven Signed Distance Functions for 3D Hand-Object Reconstruction" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 128, + 162, + 186, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 128, + 162, + 186, + 175 + ], + "spans": [ + { + "bbox": [ + 128, + 162, + 186, + 175 + ], + "type": "text", + "content": "Zerui Chen" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 210, + 162, + 272, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 210, + 162, + 272, + 174 + ], + "spans": [ + { + "bbox": [ + 210, + 162, + 272, + 174 + ], + "type": "text", + "content": "Shizhe Chen" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 298, + 162, + 380, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 298, + 162, + 380, + 174 + ], + "spans": [ + { + "bbox": [ + 298, + 162, + 380, + 174 + ], + "type": "text", + "content": "Cordelia Schmid" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 405, + 162, + 464, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 405, + 162, + 464, + 175 + ], + "spans": [ + { + "bbox": [ + 405, + 162, + 464, + 175 + ], + "type": "text", + "content": "Ivan Laptev" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 99, + 176, + 493, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 176, + 493, + 190 + ], + "spans": [ + { + "bbox": [ + 99, + 176, + 493, + 190 + ], + "type": "text", + "content": "Inria, École normale supérieure, CNRS, PSL Research Univ., 75005 Paris, France" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 221, + 193, + 369, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 221, + 193, + 369, + 202 + ], + "spans": [ + { + "bbox": [ + 221, + 193, + 369, + 202 + ], + "type": "text", + "content": "firstname.lastname@inria.fr" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 143, + 232, + 191, + 244 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 232, + 191, + 244 + ], + "spans": [ + { + "bbox": [ + 143, + 232, + 191, + 244 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 258, + 290, + 474 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 258, + 290, + 474 + ], + "spans": [ + { + "bbox": [ + 46, + 258, + 290, + 474 + ], + "type": "text", + "content": "Signed distance functions (SDFs) is an attractive framework that has recently shown promising results for 3D shape reconstruction from images. SDFs seamlessly generalize to different shape resolutions and topologies but lack explicit modelling of the underlying 3D geometry. In this work, we exploit the hand structure and use it as guidance for SDF-based shape reconstruction. In particular, we address reconstruction of hands and manipulated objects from monocular RGB images. To this end, we estimate poses of hands and objects and use them to guide 3D reconstruction. More specifically, we predict kinematic chains of pose transformations and align SDFs with highly-articulated hand poses. We improve the visual features of 3D points with geometry alignment and further leverage temporal information to enhance the robustness to occlusion and motion blurs. We conduct extensive experiments on the challenging ObMan and DexYCB benchmarks and demonstrate significant improvements of the proposed method over the state of the art." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 499, + 128, + 512 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 499, + 128, + 512 + ], + "spans": [ + { + "bbox": [ + 47, + 499, + 128, + 512 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 46, + 521, + 288, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 521, + 288, + 616 + ], + "spans": [ + { + "bbox": [ + 46, + 521, + 288, + 616 + ], + "type": "text", + "content": "Understanding how hands interact with objects is becoming increasingly important for widespread applications, including virtual reality, robotic manipulation and human-computer interaction. Compared to 3D estimation of sparse hand joints [24,38,51,53,67], joint reconstruction of hands and object meshes [11, 18, 21, 26, 62] provides rich information about hand-object interactions and has received increased attention in recent years." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 46, + 617, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 617, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 617, + 288, + 714 + ], + "type": "text", + "content": "To reconstruct high-quality meshes, some recent works [9, 17, 61] explore multi-view image inputs. Multi-view images, however, are less common both for training and testing scenarios. In this work, we focus on a more practical and user-friendly setting where we aim to reconstruct hand and object meshes from monocular RGB images. Given the ill-posed nature of the task, many existing methods [7, 19, 21, 54, 62] employ parametric mesh models (e.g., MANO [46]) to im" + } + ] + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 310, + 234, + 389, + 293 + ], + "blocks": [ + { + "bbox": [ + 310, + 234, + 389, + 293 + ], + "lines": [ + { + "bbox": [ + 310, + 234, + 389, + 293 + ], + "spans": [ + { + "bbox": [ + 310, + 234, + 389, + 293 + ], + "type": "image", + "image_path": "527adaaf0d94728c6cef36247c014db1a2971619c5891a07228162e574525f61.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 320, + 293, + 359, + 306 + ], + "blocks": [ + { + "bbox": [ + 320, + 293, + 359, + 306 + ], + "lines": [ + { + "bbox": [ + 320, + 293, + 359, + 306 + ], + "spans": [ + { + "bbox": [ + 320, + 293, + 359, + 306 + ], + "type": "image", + "image_path": "0101e1ed915fabefe96f599e74a8be60a2f28721973d0da39113d65b0623b487.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 327, + 318, + 384, + 376 + ], + "blocks": [ + { + "bbox": [ + 320, + 311, + 339, + 319 + ], + "lines": [ + { + "bbox": [ + 320, + 311, + 339, + 319 + ], + "spans": [ + { + "bbox": [ + 320, + 311, + 339, + 319 + ], + "type": "text", + "content": "gSDF" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 327, + 318, + 384, + 376 + ], + "lines": [ + { + "bbox": [ + 327, + 318, + 384, + 376 + ], + "spans": [ + { + "bbox": [ + 327, + 318, + 384, + 376 + ], + "type": "image", + "image_path": "e9a24ed55ed9a034f6e98cdc5905a4854a353a9a30987c24fef26688e57761a0.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 326, + 376, + 358, + 388 + ], + "blocks": [ + { + "bbox": [ + 326, + 376, + 358, + 388 + ], + "lines": [ + { + "bbox": [ + 326, + 376, + 358, + 388 + ], + "spans": [ + { + "bbox": [ + 326, + 376, + 358, + 388 + ], + "type": "image", + "image_path": "930162f97d68298d59444a1e5ab0920b6530a1751ebcfac02c1fe9dbc0ea6f13.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 310, + 390, + 350, + 448 + ], + "blocks": [ + { + "bbox": [ + 310, + 390, + 350, + 448 + ], + "lines": [ + { + "bbox": [ + 310, + 390, + 350, + 448 + ], + "spans": [ + { + "bbox": [ + 310, + 390, + 350, + 448 + ], + "type": "image", + "image_path": "1b1769b767cc71b8b5a0b055e8b51352741b96b36c76b761da995b8b93eb34ff.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 462, + 547, + 552 + ], + "lines": [ + { + "bbox": [ + 305, + 462, + 547, + 552 + ], + "spans": [ + { + "bbox": [ + 305, + 462, + 547, + 552 + ], + "type": "text", + "content": "Figure 1. We aim to reconstruct 3D hand and object meshes from monocular images (top). Our method gSDF (middle) first predicts 3D hand joints (blue) and object locations (red) from input images. We use estimated hand poses and object locations to incorporate strong geometric priors into SDF by generating hand- and object-aware kinematic features for each SDF query point. Our resulting gSDF model generates accurate results for real images with various objects and grasping hand poses (bottom)." + } + ] + } + ], + "index": 34, + "angle": 0, + "type": "image_caption" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 353, + 399, + 386, + 440 + ], + "blocks": [ + { + "bbox": [ + 353, + 399, + 386, + 440 + ], + "lines": [ + { + "bbox": [ + 353, + 399, + 386, + 440 + ], + "spans": [ + { + "bbox": [ + 353, + 399, + 386, + 440 + ], + "type": "image", + "image_path": "3fbc938809057c339e88a7bb8d0ff47ccf23b8af98ac8617d0fd901344b357d9.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 390, + 236, + 467, + 293 + ], + "blocks": [ + { + "bbox": [ + 390, + 236, + 467, + 293 + ], + "lines": [ + { + "bbox": [ + 390, + 236, + 467, + 293 + ], + "spans": [ + { + "bbox": [ + 390, + 236, + 467, + 293 + ], + "type": "image", + "image_path": "4a1a563a15f47748a088359df125a6570a6b1dc58f1080f2d38d0b31e57163e9.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 420, + 293, + 439, + 307 + ], + "blocks": [ + { + "bbox": [ + 420, + 293, + 439, + 307 + ], + "lines": [ + { + "bbox": [ + 420, + 293, + 439, + 307 + ], + "spans": [ + { + "bbox": [ + 420, + 293, + 439, + 307 + ], + "type": "image", + "image_path": "ad192d46e9560e1148a60d5966194b8e3af4f3fc412c3047cab7eb537367d8ed.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 414, + 319, + 464, + 375 + ], + "blocks": [ + { + "bbox": [ + 402, + 312, + 420, + 320 + ], + "lines": [ + { + "bbox": [ + 402, + 312, + 420, + 320 + ], + "spans": [ + { + "bbox": [ + 402, + 312, + 420, + 320 + ], + "type": "text", + "content": "gSDF" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 414, + 319, + 464, + 375 + ], + "lines": [ + { + "bbox": [ + 414, + 319, + 464, + 375 + ], + "spans": [ + { + "bbox": [ + 414, + 319, + 464, + 375 + ], + "type": "image", + "image_path": "b25647d7d93da1380b6c3a96306a849f93a0482f741ad7b0b0e71833b5ed57de.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + } + ], + "index": 24 + }, + { + "type": "image", + "bbox": [ + 414, + 375, + 439, + 388 + ], + "blocks": [ + { + "bbox": [ + 414, + 375, + 439, + 388 + ], + "lines": [ + { + "bbox": [ + 414, + 375, + 439, + 388 + ], + "spans": [ + { + "bbox": [ + 414, + 375, + 439, + 388 + ], + "type": "image", + "image_path": "62f855d278fa1773f1ccd345c22d665c228c5025bdf03be93579addd06590db3.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 394, + 389, + 430, + 447 + ], + "blocks": [ + { + "bbox": [ + 394, + 389, + 430, + 447 + ], + "lines": [ + { + "bbox": [ + 394, + 389, + 430, + 447 + ], + "spans": [ + { + "bbox": [ + 394, + 389, + 430, + 447 + ], + "type": "image", + "image_path": "497e1728de1b8fbc5a2151c53cdd64addffc970583c47f9fcdf96fd3962bb525.jpg" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_body" + } + ], + "index": 26 + }, + { + "type": "image", + "bbox": [ + 468, + 236, + 545, + 293 + ], + "blocks": [ + { + "bbox": [ + 468, + 236, + 545, + 293 + ], + "lines": [ + { + "bbox": [ + 468, + 236, + 545, + 293 + ], + "spans": [ + { + "bbox": [ + 468, + 236, + 545, + 293 + ], + "type": "image", + "image_path": "32542e1d490c939963f930ab36bafdb27f211b811804d61961326a153108a97d.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + } + ], + "index": 27 + }, + { + "type": "image", + "bbox": [ + 480, + 293, + 515, + 307 + ], + "blocks": [ + { + "bbox": [ + 480, + 293, + 515, + 307 + ], + "lines": [ + { + "bbox": [ + 480, + 293, + 515, + 307 + ], + "spans": [ + { + "bbox": [ + 480, + 293, + 515, + 307 + ], + "type": "image", + "image_path": "08b8aa3975bd581498a60f555dd8d76e8ded955cf04ab2f8770c8ea9c8e38b36.jpg" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 479, + 312, + 496, + 320 + ], + "lines": [ + { + "bbox": [ + 479, + 312, + 496, + 320 + ], + "spans": [ + { + "bbox": [ + 479, + 312, + 496, + 320 + ], + "type": "text", + "content": "gSDF" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_caption" + } + ], + "index": 28 + }, + { + "type": "image", + "bbox": [ + 486, + 328, + 531, + 360 + ], + "blocks": [ + { + "bbox": [ + 486, + 328, + 531, + 360 + ], + "lines": [ + { + "bbox": [ + 486, + 328, + 531, + 360 + ], + "spans": [ + { + "bbox": [ + 486, + 328, + 531, + 360 + ], + "type": "image", + "image_path": "edbdf5237d3dd6f6eb8457cc4d27704ccd1490f337b318a0858c637ff68236a9.jpg" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_body" + } + ], + "index": 30 + }, + { + "type": "image", + "bbox": [ + 494, + 376, + 516, + 388 + ], + "blocks": [ + { + "bbox": [ + 494, + 376, + 516, + 388 + ], + "lines": [ + { + "bbox": [ + 494, + 376, + 516, + 388 + ], + "spans": [ + { + "bbox": [ + 494, + 376, + 516, + 388 + ], + "type": "image", + "image_path": "70890cfc46200935f42539a0e94cd1527bee74410b7c6512a569baa97443b19a.jpg" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_body" + } + ], + "index": 31 + }, + { + "type": "image", + "bbox": [ + 468, + 389, + 507, + 448 + ], + "blocks": [ + { + "bbox": [ + 468, + 389, + 507, + 448 + ], + "lines": [ + { + "bbox": [ + 468, + 389, + 507, + 448 + ], + "spans": [ + { + "bbox": [ + 468, + 389, + 507, + 448 + ], + "type": "image", + "image_path": "1d92d9abaeb0505145b8c2e27e8d30a41950b3b74e2df184eb0c889c34160c26.jpg" + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_body" + } + ], + "index": 32 + }, + { + "type": "image", + "bbox": [ + 511, + 402, + 541, + 436 + ], + "blocks": [ + { + "bbox": [ + 511, + 402, + 541, + 436 + ], + "lines": [ + { + "bbox": [ + 511, + 402, + 541, + 436 + ], + "spans": [ + { + "bbox": [ + 511, + 402, + 541, + 436 + ], + "type": "image", + "image_path": "79e91292827ed0312f99c1f1bd6de33f036784d32fdf77c6fa9dbc407bd1262d.jpg" + } + ] + } + ], + "index": 33, + "angle": 0, + "type": "image_body" + } + ], + "index": 33 + }, + { + "bbox": [ + 304, + 567, + 547, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 567, + 547, + 616 + ], + "spans": [ + { + "bbox": [ + 304, + 567, + 547, + 616 + ], + "type": "text", + "content": "pose prior knowledge and reduce ambiguities in 3D hand reconstruction. MANO hand meshes, however, have relatively limited resolution and can be suboptimal for the precise capture of hand-object interactions." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 304, + 617, + 547, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 617, + 547, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 617, + 547, + 714 + ], + "type": "text", + "content": "To reconstruct detailed hand and object meshes, another line of efforts [11, 26] employ signed distance functions (SDFs). Grasping Field [26] makes the first attempt to model hand and object surfaces using SDFs. However, it does not explicitly associate 3D geometry with image cues and has no prior knowledge incorporated in SDFs, leading to unrealistic meshes. AlignSDF [11] proposes to align SDFs with respect to global poses (i.e., the hand wrist transformation and the" + } + ] + } + ], + "index": 36 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "text", + "content": "12890" + } + ] + } + ], + "index": 37 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 120 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 120 + ], + "type": "text", + "content": "object translation) and produces improved results. However, it is still challenging to capture geometric details for more complex hand motions and manipulations of diverse objects, which involve the articulation of multiple fingers." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 121, + 287, + 299 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 121, + 287, + 299 + ], + "spans": [ + { + "bbox": [ + 48, + 121, + 287, + 299 + ], + "type": "text", + "content": "To address limitations of prior works, we propose a geometry-driven SDF (gSDF) method that encodes strong pose priors and improves reconstruction by disentangling pose and shape estimation (see Figure 1). To this end, we first predict sparse 3D hand joints from images and derive full kinematic chains of local pose transformations from joint locations using inverse kinematics. Instead of only using the global pose as in [11], we optimize SDFs with respect to poses of all the hand joints, which leads to a more fine-grained alignment between the 3D shape and articulated hand poses. In addition, we project 3D points onto the image plane to extract geometry-aligned visual features for signed distance prediction. The visual features are further refined with spatio-temporal contexts using a transformer model to enhance the robustness to occlusions and motion blurs." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 300, + 287, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 300, + 287, + 467 + ], + "spans": [ + { + "bbox": [ + 48, + 300, + 287, + 467 + ], + "type": "text", + "content": "We conduct extensive ablation experiments to show the effectiveness of different components in our approach. The proposed gSDF model greatly advances state-of-the-art accuracy on the challenging ObMan and DexYCB benchmarks. Our contributions can be summarized in three-fold: (i) To embed strong pose priors into SDFs, we propose to align the SDF shape with its underlying kinematic chains of pose transformations, which reduces ambiguities in 3D reconstruction. (ii) To further reduce the misalignment induced by inaccurate pose estimations, we propose to extract geometry-aligned local visual features and enhance the robustness with spatio-temporal contexts. (iii) We conduct comprehensive experiments to show that our approach outperforms state-of-the-art results by a significant margin." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 478, + 133, + 491 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 478, + 133, + 491 + ], + "spans": [ + { + "bbox": [ + 48, + 478, + 133, + 491 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 498, + 287, + 557 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 498, + 287, + 557 + ], + "spans": [ + { + "bbox": [ + 48, + 498, + 287, + 557 + ], + "type": "text", + "content": "This paper focuses on jointly reconstructing hands and hand-held objects from RGB images. In this section, we first review previous works on the 3D hand pose and shape estimation. We then discuss relevant works on the joint reconstruction of hands and objects." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 558, + 287, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 558, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 558, + 287, + 712 + ], + "type": "text", + "content": "3D hand pose and shape estimation. The topic of 3D hand pose estimation has received widespread attention since the 90s [23, 45] and has seen significant progress in recent years [31, 65]. Methods which take RGB images as input [24, 36, 38, 39, 48, 50, 51, 53, 59, 67] often estimate sparse 3D hand joint locations from visual data using well-designed deep neural networks. Though these methods can achieve high estimation accuracy, their outputs of 3D sparse joints provide limited information about the 3D hand surface, which is critical in AR/VR applications. Following the introduction of the anthropomorphic parametric hand mesh model MANO [46], several works [2, 5, 10, 18, 29, 30, 32, 34, 40, 57] estimate the MANO hand shape and pose parameters to" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 307, + 72, + 547, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 72, + 547, + 191 + ], + "spans": [ + { + "bbox": [ + 307, + 72, + 547, + 191 + ], + "type": "text", + "content": "recover the full hand surface. However, MANO has a limited mesh resolution and cannot produce fine surface details. Neural implicit functions [13,25] have the potential to reconstruct more realistic high resolution hand surfaces [12,37,42]. In this work, we combine the advantages of sparse, parametric and implicit modelling. We predict sparse 3D joints accurately from images and estimate the MANO parameters using inverse kinematics. We then optimize neural implicit functions with respect to underlying kinematic structures and reconstruct realistic meshes." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 307, + 198, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 198, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 307, + 198, + 547, + 713 + ], + "type": "text", + "content": "3D hand and object reconstruction. Joint reconstruction of hand and object meshes provides a more comprehensive view about how hands interact with manipulated objects in the 3D space and has received more attention in the past few years. Previous works often rely on multiview correspondence [3,9,17,41,58,61] or additional depth information [15, 16, 49, 55, 56] to approach this task. In this work, we focus on a more challenging setting and perform a joint reconstruction from monocular RGB images. Given the ill-posed nature of this problem, many works [7, 18-21, 54, 60, 62] deploy MANO, which encodes hand prior knowledge learned from hand scans, to reconstruct hand meshes. To further simplify the object reconstruction task, several works [18, 60, 62] make a strong assumption that the ground-truth object model is available at test time. Our work and some previous efforts [11, 21, 26] relax this assumption and assume unknown object models. Hasson et al. [21] employ a differentiable MANO layer to estimate the hand shape and AtlasNet [14] to reconstruct the manipulated object. However, both MANO and AtlasNet can only produce meshes of limited resolution, which prevents the modelling of detailed contacts between hands and objects. To generate more detailed surfaces, Karunratanakul et al. [26] introduce grasping fields and propose to use SDFs to reconstruct both hand and object meshes. However, such a model-free approach does not capture any prior knowledge about hands or objects, which can lead to predicting unrealistic 3D geometry. To mitigate this, Ye et al. [63] propose to use hand poses estimated from an off-the-shelf model to help reconstruct the hand-held object mesh. The main difference with our work is that we jointly reconstruct hand meshes and object meshes using our proposed model, which is more challenging. Also, in addition to using hand poses to help capture the object shapes, we predict object poses and show their benefits for SDF-based object reconstruction. Another work AlignSDF [11] optimizes SDFs with respect to estimated hand-object global poses and encodes pose priors into SDFs. In addition to using global poses as a guide for SDFs, we propose to learn SDFs from the full kinematic chains of local pose transformations, and achieve a more precise alignment between the 3D shape and the underlying poses. To further handle hard cases induced by occlusion or motion blur where pose estimations are inaccurate, we leverage" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "type": "text", + "content": "12891" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 49, + 72, + 547, + 242 + ], + "blocks": [ + { + "bbox": [ + 49, + 72, + 547, + 242 + ], + "lines": [ + { + "bbox": [ + 49, + 72, + 547, + 242 + ], + "spans": [ + { + "bbox": [ + 49, + 72, + 547, + 242 + ], + "type": "image", + "image_path": "9b8c982f7483297e13d7b608462e4d57d786c39f3cc17a94fefbd4d06d0d9e33.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 251, + 547, + 275 + ], + "lines": [ + { + "bbox": [ + 46, + 251, + 547, + 275 + ], + "spans": [ + { + "bbox": [ + 46, + 251, + 547, + 275 + ], + "type": "text", + "content": "Figure 2. The overview of our proposed single-frame model. Our method reconstructs realistic hand and object meshes from a single RGB image. Marching Cubes algorithm [33] is used at test time to extract meshes." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 292, + 288, + 317 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 292, + 288, + 317 + ], + "spans": [ + { + "bbox": [ + 46, + 292, + 288, + 317 + ], + "type": "text", + "content": "a transformer to accumulate corresponding image features from multiple frames and benefit the geometry recovery." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 328, + 104, + 341 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 328, + 104, + 341 + ], + "spans": [ + { + "bbox": [ + 47, + 328, + 104, + 341 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 349, + 289, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 349, + 289, + 458 + ], + "spans": [ + { + "bbox": [ + 46, + 349, + 289, + 458 + ], + "type": "text", + "content": "This section presents our geometry-driven SDF (gSDF) method for 3D hand and object reconstruction from monocular RGB images. We aim to learn two signed distance functions " + }, + { + "bbox": [ + 46, + 349, + 289, + 458 + ], + "type": "inline_equation", + "content": "\\mathrm{SDF}_{hand}" + }, + { + "bbox": [ + 46, + 349, + 289, + 458 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 349, + 289, + 458 + ], + "type": "inline_equation", + "content": "\\mathrm{SDF}_{obj}" + }, + { + "bbox": [ + 46, + 349, + 289, + 458 + ], + "type": "text", + "content": " to implicitly represent 3D shapes for the hand and the object. The " + }, + { + "bbox": [ + 46, + 349, + 289, + 458 + ], + "type": "inline_equation", + "content": "\\mathrm{SDF}_{hand}" + }, + { + "bbox": [ + 46, + 349, + 289, + 458 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 349, + 289, + 458 + ], + "type": "inline_equation", + "content": "\\mathrm{SDF}_{obj}" + }, + { + "bbox": [ + 46, + 349, + 289, + 458 + ], + "type": "text", + "content": " map a query 3D point " + }, + { + "bbox": [ + 46, + 349, + 289, + 458 + ], + "type": "inline_equation", + "content": "x\\in \\mathbb{R}^3" + }, + { + "bbox": [ + 46, + 349, + 289, + 458 + ], + "type": "text", + "content": " to a signed distance from the hand surface and object surface, respectively. The Marching Cubes algorithm [33] can thus be employed to reconstruct the hand and the object from " + }, + { + "bbox": [ + 46, + 349, + 289, + 458 + ], + "type": "inline_equation", + "content": "\\mathrm{SDF}_{hand}" + }, + { + "bbox": [ + 46, + 349, + 289, + 458 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 349, + 289, + 458 + ], + "type": "inline_equation", + "content": "\\mathrm{SDF}_{obj}" + }, + { + "bbox": [ + 46, + 349, + 289, + 458 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 466, + 155, + 479 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 466, + 155, + 479 + ], + "spans": [ + { + "bbox": [ + 47, + 466, + 155, + 479 + ], + "type": "text", + "content": "3.1. Overview of gSDF" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 485, + 288, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 485, + 288, + 533 + ], + "spans": [ + { + "bbox": [ + 46, + 485, + 288, + 533 + ], + "type": "text", + "content": "Figure 2 illustrates the overview of our gSDF reconstruction approach. Given an image " + }, + { + "bbox": [ + 46, + 485, + 288, + 533 + ], + "type": "inline_equation", + "content": "I_{t}" + }, + { + "bbox": [ + 46, + 485, + 288, + 533 + ], + "type": "text", + "content": ", we extract two types of features to predict the signed distance for each query point " + }, + { + "bbox": [ + 46, + 485, + 288, + 533 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 46, + 485, + 288, + 533 + ], + "type": "text", + "content": ", namely kinematic features and visual features." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 533, + 288, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 533, + 288, + 593 + ], + "spans": [ + { + "bbox": [ + 46, + 533, + 288, + 593 + ], + "type": "text", + "content": "The kinematic feature encodes the position of " + }, + { + "bbox": [ + 46, + 533, + 288, + 593 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 46, + 533, + 288, + 593 + ], + "type": "text", + "content": " under the coordinate system of the hand or the object, which can provide strong pose priors to assist SDF learning. Since the feature is based on canonical hand and object poses, it helps to disentangles shape learning from pose learning." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 594, + 289, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 594, + 289, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 594, + 289, + 714 + ], + "type": "text", + "content": "The existing work [63] proposes to use hand poses for reconstructing object meshes but does not consider using pose priors to reconstruct hand meshes. Another work [11] only deploys coarse geometry in terms of the hand wrist object locations, which fails to capture fine-grained details. In this work, we aim to strengthen the kinematic feature with geometry transformation of " + }, + { + "bbox": [ + 46, + 594, + 289, + 714 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 46, + 594, + 289, + 714 + ], + "type": "text", + "content": " to poses of all the hand joints (see Figure 3) for both the hand and the object reconstruction. However, it is challenging to directly predict hand pose parameters [6,28,66]. To improve the hand pose estimation," + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 292, + 547, + 353 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 292, + 547, + 353 + ], + "spans": [ + { + "bbox": [ + 304, + 292, + 547, + 353 + ], + "type": "text", + "content": "we propose to first predict sparse 3D joint locations " + }, + { + "bbox": [ + 304, + 292, + 547, + 353 + ], + "type": "inline_equation", + "content": "j_{h}" + }, + { + "bbox": [ + 304, + 292, + 547, + 353 + ], + "type": "text", + "content": " from the image and then use inverse kinematics to derive pose transformations " + }, + { + "bbox": [ + 304, + 292, + 547, + 353 + ], + "type": "inline_equation", + "content": "\\theta_{h}" + }, + { + "bbox": [ + 304, + 292, + 547, + 353 + ], + "type": "text", + "content": " from the predicted joints. In this way, we are able to obtain kinematic features " + }, + { + "bbox": [ + 304, + 292, + 547, + 353 + ], + "type": "inline_equation", + "content": "e_{h}" + }, + { + "bbox": [ + 304, + 292, + 547, + 353 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 292, + 547, + 353 + ], + "type": "inline_equation", + "content": "e_{o}" + }, + { + "bbox": [ + 304, + 292, + 547, + 353 + ], + "type": "text", + "content": " for the hand and the object respectively." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 357, + 548, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 357, + 548, + 536 + ], + "spans": [ + { + "bbox": [ + 304, + 357, + 548, + 536 + ], + "type": "text", + "content": "The visual feature encodes the visual appearance for the point " + }, + { + "bbox": [ + 304, + 357, + 548, + 536 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 304, + 357, + 548, + 536 + ], + "type": "text", + "content": " to provide more shape details. Prior works [11, 26] use the same global visual feature for all the points, e.g., averaging the feature map of a SDF feature encoder on the spatial dimension. Such global visual features suffers from imprecise geometry alignment between a point and its visual appearance. To alleviate the limitation, inspired by [47], we apply the geometry transformation to extract aligned local visual features. Moreover, to address hard cases with occlusions and motion blur in a single image " + }, + { + "bbox": [ + 304, + 357, + 548, + 536 + ], + "type": "inline_equation", + "content": "I_{t}" + }, + { + "bbox": [ + 304, + 357, + 548, + 536 + ], + "type": "text", + "content": ", we propose to enhance the local visual feature with its temporal contexts from videos using a spatio-temporal transformer. We denote the local visual feature of a point as " + }, + { + "bbox": [ + 304, + 357, + 548, + 536 + ], + "type": "inline_equation", + "content": "e_v" + }, + { + "bbox": [ + 304, + 357, + 548, + 536 + ], + "type": "text", + "content": ". Finally, we concatenate the kinematic feature and local visual feature to predict the signed distance for " + }, + { + "bbox": [ + 304, + 357, + 548, + 536 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 304, + 357, + 548, + 536 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 365, + 555, + 545, + 583 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 365, + 555, + 545, + 583 + ], + "spans": [ + { + "bbox": [ + 365, + 555, + 545, + 583 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\begin{array}{l} \\mathrm {S D F} _ {\\text {h a n d}} (x) = f _ {h} ([ e _ {v}; e _ {h} ]), \\\\ \\mathrm {S D F} _ {\\text {e x p}} (x) = f _ {h} ([ e _ {v}; e _ {h} ]) \\end{array} \\tag {1} \\\\ \\mathrm {S D F} _ {\\text {o b j e c t}} (x) = f _ {o} ([ e _ {v}; e _ {o} ]), \\\\ \\end{array}", + "image_path": "aeb219115b5b67f985813d99c50299e058ea3f3ab36efb2c3f2203ca1783527d.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 601, + 547, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 601, + 547, + 625 + ], + "spans": [ + { + "bbox": [ + 304, + 601, + 547, + 625 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 601, + 547, + 625 + ], + "type": "inline_equation", + "content": "f_{h}" + }, + { + "bbox": [ + 304, + 601, + 547, + 625 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 601, + 547, + 625 + ], + "type": "inline_equation", + "content": "f_{o}" + }, + { + "bbox": [ + 304, + 601, + 547, + 625 + ], + "type": "text", + "content": " are the hand SDF decoder and the object SDF decoder respectively." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 630, + 548, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 630, + 548, + 712 + ], + "spans": [ + { + "bbox": [ + 304, + 630, + 548, + 712 + ], + "type": "text", + "content": "In the following, we first present the proposed geometry-driven kinematic feature and visual feature encodings in Section 3.2 and 3.3 respectively. Then, in Section 3.4 we introduce different strategies of sharing image backbones for hand and object pose predictors as well as the SDF feature encoder. Finally, the training strategy of our model is described in Section 3.5." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "text", + "content": "12892" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 50, + 70, + 168, + 152 + ], + "blocks": [ + { + "bbox": [ + 50, + 70, + 168, + 152 + ], + "lines": [ + { + "bbox": [ + 50, + 70, + 168, + 152 + ], + "spans": [ + { + "bbox": [ + 50, + 70, + 168, + 152 + ], + "type": "image", + "image_path": "f34350c6deed27ad5477d84b749b1f608ddac3aa1cd56b24bf45eee708218c83.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 176, + 70, + 284, + 152 + ], + "blocks": [ + { + "bbox": [ + 176, + 70, + 284, + 152 + ], + "lines": [ + { + "bbox": [ + 176, + 70, + 284, + 152 + ], + "spans": [ + { + "bbox": [ + 176, + 70, + 284, + 152 + ], + "type": "image", + "image_path": "d3016ffe59ac56b52df47e56a812684a22855a7924bb78ac556142fa44c5473a.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 49, + 153, + 167, + 245 + ], + "blocks": [ + { + "bbox": [ + 49, + 153, + 167, + 245 + ], + "lines": [ + { + "bbox": [ + 49, + 153, + 167, + 245 + ], + "spans": [ + { + "bbox": [ + 49, + 153, + 167, + 245 + ], + "type": "image", + "image_path": "551a9977fb27793d12b2e9380a58e895ad757e56a21c4cf6256218d284242093.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 255, + 288, + 300 + ], + "lines": [ + { + "bbox": [ + 46, + 255, + 288, + 300 + ], + "spans": [ + { + "bbox": [ + 46, + 255, + 288, + 300 + ], + "type": "text", + "content": "Figure 3. We define hand and object features by transforming queries " + }, + { + "bbox": [ + 46, + 255, + 288, + 300 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 46, + 255, + 288, + 300 + ], + "type": "text", + "content": " into hand- and object-centered coordinate systems. Compared to AlignSDF [11] (left), each hand joint in our method defines its own coordinate frame." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 171, + 153, + 284, + 245 + ], + "blocks": [ + { + "bbox": [ + 171, + 153, + 284, + 245 + ], + "lines": [ + { + "bbox": [ + 171, + 153, + 284, + 245 + ], + "spans": [ + { + "bbox": [ + 171, + 153, + 284, + 245 + ], + "type": "image", + "image_path": "2351cd87d20dfea867192f8de15c2fd463fbaa5e9e442690d712867b537726fb.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 327, + 205, + 340 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 327, + 205, + 340 + ], + "spans": [ + { + "bbox": [ + 47, + 327, + 205, + 340 + ], + "type": "text", + "content": "3.2. Kinematic Feature Encoding" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 348, + 289, + 576 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 348, + 289, + 576 + ], + "spans": [ + { + "bbox": [ + 46, + 348, + 289, + 576 + ], + "type": "text", + "content": "Hand and object pose estimation. Directly regressing hand pose parameters of MANO from image features [11, 19, 21] has proved to be difficult [6, 28, 66]. In contrast, predicting sparse 3D joint locations is easier and can achieve higher accuracy. Therefore, we first train a 3D hand joint prediction model which produces volumetric heatmaps [38, 44] for 21 hand joints. We use a differentiable soft-argmax operator [50] to extract 3D coordinates " + }, + { + "bbox": [ + 46, + 348, + 289, + 576 + ], + "type": "inline_equation", + "content": "\\psi_h \\in \\mathbb{R}^{21 \\times 3}" + }, + { + "bbox": [ + 46, + 348, + 289, + 576 + ], + "type": "text", + "content": " of hand joints from the heatmaps. We then obtain an analytic solution for hand poses " + }, + { + "bbox": [ + 46, + 348, + 289, + 576 + ], + "type": "inline_equation", + "content": "\\theta_h \\in \\mathbb{R}^{16 \\times 3}, \\phi_h \\in \\mathbb{R}^{16 \\times 3}" + }, + { + "bbox": [ + 46, + 348, + 289, + 576 + ], + "type": "text", + "content": " from estimated 3D joints " + }, + { + "bbox": [ + 46, + 348, + 289, + 576 + ], + "type": "inline_equation", + "content": "\\psi_h" + }, + { + "bbox": [ + 46, + 348, + 289, + 576 + ], + "type": "text", + "content": " using inverse kinematics, where each " + }, + { + "bbox": [ + 46, + 348, + 289, + 576 + ], + "type": "inline_equation", + "content": "\\theta_{h,i} \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 46, + 348, + 289, + 576 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 348, + 289, + 576 + ], + "type": "inline_equation", + "content": "\\theta_{h,i} \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 46, + 348, + 289, + 576 + ], + "type": "text", + "content": " denote the relative pose of " + }, + { + "bbox": [ + 46, + 348, + 289, + 576 + ], + "type": "inline_equation", + "content": "i_{th}" + }, + { + "bbox": [ + 46, + 348, + 289, + 576 + ], + "type": "text", + "content": " joint in terms of rotation and translation with respect to its ancestor joint. Here, we only calculate the rotation and use the default limb lengths provided by the MANO model. Specifically, we first compute the pose of the hand wrist using the template pose defined in MANO, and then follow the hand kinematic chain to solve the pose of other finger joints recursively. More details are presented in the supplementary material." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 578, + 288, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 578, + 288, + 639 + ], + "spans": [ + { + "bbox": [ + 46, + 578, + 288, + 639 + ], + "type": "text", + "content": "For the object pose estimation, it is often difficult to accurately estimate the rotation of the object since many objects have a high degree of symmetry and are often occluded by hands. We therefore follow [11] and only estimate the center position of the object " + }, + { + "bbox": [ + 46, + 578, + 288, + 639 + ], + "type": "inline_equation", + "content": "\\psi_{o} \\in \\mathbb{R}^{3}" + }, + { + "bbox": [ + 46, + 578, + 288, + 639 + ], + "type": "text", + "content": " relative to the hand wrist." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 641, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 641, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 641, + 289, + 715 + ], + "type": "text", + "content": "Hand kinematic feature. Given the 3D point " + }, + { + "bbox": [ + 46, + 641, + 289, + 715 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 46, + 641, + 289, + 715 + ], + "type": "text", + "content": ", we generate the hand kinematic feature " + }, + { + "bbox": [ + 46, + 641, + 289, + 715 + ], + "type": "inline_equation", + "content": "e_h \\in \\mathbb{R}^{51}" + }, + { + "bbox": [ + 46, + 641, + 289, + 715 + ], + "type": "text", + "content": " by transforming " + }, + { + "bbox": [ + 46, + 641, + 289, + 715 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 46, + 641, + 289, + 715 + ], + "type": "text", + "content": " into canonical coordinate frames defined by hand joints. Figure 3(top,right) illustrates the proposed geometry transformation for the hand. For the " + }, + { + "bbox": [ + 46, + 641, + 289, + 715 + ], + "type": "inline_equation", + "content": "i_{th}" + }, + { + "bbox": [ + 46, + 641, + 289, + 715 + ], + "type": "text", + "content": " hand joint pose " + }, + { + "bbox": [ + 46, + 641, + 289, + 715 + ], + "type": "inline_equation", + "content": "\\theta_{h,i}, \\phi_{h,i}" + }, + { + "bbox": [ + 46, + 641, + 289, + 715 + ], + "type": "text", + "content": ", the pose transformation " + }, + { + "bbox": [ + 46, + 641, + 289, + 715 + ], + "type": "inline_equation", + "content": "T_p(x, \\theta_{h,i}, \\phi_{h,i})" + }, + { + "bbox": [ + 46, + 641, + 289, + 715 + ], + "type": "text", + "content": " to obtain the local" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 305, + 72, + 494, + 84 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 72, + 494, + 84 + ], + "spans": [ + { + "bbox": [ + 305, + 72, + 494, + 84 + ], + "type": "text", + "content": "hand kinematic feature " + }, + { + "bbox": [ + 305, + 72, + 494, + 84 + ], + "type": "inline_equation", + "content": "e_{h,i}\\in \\mathbb{R}^3" + }, + { + "bbox": [ + 305, + 72, + 494, + 84 + ], + "type": "text", + "content": " is defined as" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 336, + 93, + 545, + 125 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 336, + 93, + 545, + 125 + ], + "spans": [ + { + "bbox": [ + 336, + 93, + 545, + 125 + ], + "type": "interline_equation", + "content": "G _ {h, i} = \\prod_ {j \\in A (i)} \\left[ \\begin{array}{c c} \\exp (\\theta_ {h, j}) & \\phi_ {h, j} \\\\ \\hline 0 & 1 \\end{array} \\right], \\tag {2}", + "image_path": "90c02d215e2925c07734335293c86bee415b80d2020c9d9b43efc308c77376e6.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 337, + 128, + 514, + 144 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 337, + 128, + 514, + 144 + ], + "spans": [ + { + "bbox": [ + 337, + 128, + 514, + 144 + ], + "type": "interline_equation", + "content": "e _ {h, i} = T _ {p} (x, \\theta_ {h, i}, \\phi_ {h, i}) = \\widetilde {H} (G _ {h, i} ^ {- 1} \\cdot H (x)),", + "image_path": "b61b5080fccbabce84f97324439afd1b8473e56727ab4dc2d913f2c7944d5206.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 152, + 547, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 152, + 547, + 274 + ], + "spans": [ + { + "bbox": [ + 305, + 152, + 547, + 274 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 152, + 547, + 274 + ], + "type": "inline_equation", + "content": "A(i)" + }, + { + "bbox": [ + 305, + 152, + 547, + 274 + ], + "type": "text", + "content": " denotes the ordered set of ancestors of the " + }, + { + "bbox": [ + 305, + 152, + 547, + 274 + ], + "type": "inline_equation", + "content": "i_{th}" + }, + { + "bbox": [ + 305, + 152, + 547, + 274 + ], + "type": "text", + "content": " joint. We use Rodrigues formula " + }, + { + "bbox": [ + 305, + 152, + 547, + 274 + ], + "type": "inline_equation", + "content": "\\exp (\\cdot)" + }, + { + "bbox": [ + 305, + 152, + 547, + 274 + ], + "type": "text", + "content": " to convert " + }, + { + "bbox": [ + 305, + 152, + 547, + 274 + ], + "type": "inline_equation", + "content": "\\theta_{h,i}" + }, + { + "bbox": [ + 305, + 152, + 547, + 274 + ], + "type": "text", + "content": " into the form of a rotation matrix. By traversing the hand kinematic chain, we obtain the global transformation " + }, + { + "bbox": [ + 305, + 152, + 547, + 274 + ], + "type": "inline_equation", + "content": "G_{h,i}\\in \\mathbb{R}^{4\\times 4}" + }, + { + "bbox": [ + 305, + 152, + 547, + 274 + ], + "type": "text", + "content": " for the " + }, + { + "bbox": [ + 305, + 152, + 547, + 274 + ], + "type": "inline_equation", + "content": "i_{th}" + }, + { + "bbox": [ + 305, + 152, + 547, + 274 + ], + "type": "text", + "content": " joint. Then, we take the inverse of " + }, + { + "bbox": [ + 305, + 152, + 547, + 274 + ], + "type": "inline_equation", + "content": "G_{h,i}" + }, + { + "bbox": [ + 305, + 152, + 547, + 274 + ], + "type": "text", + "content": " to transform " + }, + { + "bbox": [ + 305, + 152, + 547, + 274 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 305, + 152, + 547, + 274 + ], + "type": "text", + "content": " into the " + }, + { + "bbox": [ + 305, + 152, + 547, + 274 + ], + "type": "inline_equation", + "content": "i_{th}" + }, + { + "bbox": [ + 305, + 152, + 547, + 274 + ], + "type": "text", + "content": " hand joint canonical coordinates. " + }, + { + "bbox": [ + 305, + 152, + 547, + 274 + ], + "type": "inline_equation", + "content": "H(\\cdot)" + }, + { + "bbox": [ + 305, + 152, + 547, + 274 + ], + "type": "text", + "content": " transforms " + }, + { + "bbox": [ + 305, + 152, + 547, + 274 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 305, + 152, + 547, + 274 + ], + "type": "text", + "content": " into homogeneous coordinates while " + }, + { + "bbox": [ + 305, + 152, + 547, + 274 + ], + "type": "inline_equation", + "content": "\\widetilde{H} (\\cdot)" + }, + { + "bbox": [ + 305, + 152, + 547, + 274 + ], + "type": "text", + "content": " transforms homogeneous coordinates back to Euclidean coordinates. Given local kinematic features " + }, + { + "bbox": [ + 305, + 152, + 547, + 274 + ], + "type": "inline_equation", + "content": "e_{h,i}" + }, + { + "bbox": [ + 305, + 152, + 547, + 274 + ], + "type": "text", + "content": ", the hand kinematic feature " + }, + { + "bbox": [ + 305, + 152, + 547, + 274 + ], + "type": "inline_equation", + "content": "e_h\\in \\mathbb{R}^{51}" + }, + { + "bbox": [ + 305, + 152, + 547, + 274 + ], + "type": "text", + "content": " is defined as:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 373, + 284, + 545, + 299 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 373, + 284, + 545, + 299 + ], + "spans": [ + { + "bbox": [ + 373, + 284, + 545, + 299 + ], + "type": "interline_equation", + "content": "e _ {h} = \\left[ x, e _ {h, 1}, \\dots , e _ {h, 1 6} \\right]. \\tag {3}", + "image_path": "9e976a515117f8a335d0913029a4f2c1629a3cb0f535abccf1fa7ebcc746458f.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 308, + 547, + 429 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 308, + 547, + 429 + ], + "spans": [ + { + "bbox": [ + 305, + 308, + 547, + 429 + ], + "type": "text", + "content": "Object kinematic feature. To obtain geometry-aware SDF for object reconstruction, we propose object kinematic feature " + }, + { + "bbox": [ + 305, + 308, + 547, + 429 + ], + "type": "inline_equation", + "content": "e_{o} \\in \\mathbb{R}^{72}" + }, + { + "bbox": [ + 305, + 308, + 547, + 429 + ], + "type": "text", + "content": ". Following [11], we use estimated object center " + }, + { + "bbox": [ + 305, + 308, + 547, + 429 + ], + "type": "inline_equation", + "content": "\\psi_{o}" + }, + { + "bbox": [ + 305, + 308, + 547, + 429 + ], + "type": "text", + "content": " to transform " + }, + { + "bbox": [ + 305, + 308, + 547, + 429 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 305, + 308, + 547, + 429 + ], + "type": "text", + "content": " into the object canonical coordinate frame by the translation transformation " + }, + { + "bbox": [ + 305, + 308, + 547, + 429 + ], + "type": "inline_equation", + "content": "x_{oc} = T_t(x, \\psi_o) = x - \\psi_o" + }, + { + "bbox": [ + 305, + 308, + 547, + 429 + ], + "type": "text", + "content": ". As the grasping hand pose also gives hints about the shape of the manipulated object, similar to [63] we incorporate the knowledge of hand poses into object reconstruction. To this end, for each joint " + }, + { + "bbox": [ + 305, + 308, + 547, + 429 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 305, + 308, + 547, + 429 + ], + "type": "text", + "content": " and its estimated 3D location " + }, + { + "bbox": [ + 305, + 308, + 547, + 429 + ], + "type": "inline_equation", + "content": "\\psi_{h,i}" + }, + { + "bbox": [ + 305, + 308, + 547, + 429 + ], + "type": "text", + "content": ", we transform " + }, + { + "bbox": [ + 305, + 308, + 547, + 429 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 305, + 308, + 547, + 429 + ], + "type": "text", + "content": " by translation as" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 363, + 438, + 545, + 452 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 363, + 438, + 545, + 452 + ], + "spans": [ + { + "bbox": [ + 363, + 438, + 545, + 452 + ], + "type": "interline_equation", + "content": "e _ {o, i} = T _ {t} \\left(x, \\psi_ {h, i}\\right) = x - \\psi_ {j, i}. \\tag {4}", + "image_path": "c7784e0ae4a7a1b5ca303baa8591651cf2667cffb93fe11a52961d6cbfaa840e.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 460, + 547, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 460, + 547, + 533 + ], + "spans": [ + { + "bbox": [ + 304, + 460, + 547, + 533 + ], + "type": "text", + "content": "Given the importance of the wrist motion for object grasping, we also transform " + }, + { + "bbox": [ + 304, + 460, + 547, + 533 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 304, + 460, + 547, + 533 + ], + "type": "text", + "content": " into the canonical coordinate system of the hand wrist " + }, + { + "bbox": [ + 304, + 460, + 547, + 533 + ], + "type": "inline_equation", + "content": "x_{ow} = T_p(x,\\theta_{h,1},\\phi_{h,1}) = \\widetilde{H}(G_{h,1}^{-1}\\cdot H(x))" + }, + { + "bbox": [ + 304, + 460, + 547, + 533 + ], + "type": "text", + "content": ", which normalizes the orientation of the grasping and further simplifies the task for the SDF object decoder. The object kinematic feature is then defined by " + }, + { + "bbox": [ + 304, + 460, + 547, + 533 + ], + "type": "inline_equation", + "content": "e_o\\in \\mathbb{R}^{72}" + }, + { + "bbox": [ + 304, + 460, + 547, + 533 + ], + "type": "text", + "content": " as" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 354, + 544, + 545, + 557 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 354, + 544, + 545, + 557 + ], + "spans": [ + { + "bbox": [ + 354, + 544, + 545, + 557 + ], + "type": "interline_equation", + "content": "e _ {o} = \\left[ x, x _ {o c}, e _ {o, 1}, \\dots , e _ {o, 2 1}, x _ {o w} \\right]. \\tag {5}", + "image_path": "7fc73fe5abd57dc4336af392ed1ac39c13be568fc3cd3ad47752ee8faa39b15b.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 305, + 567, + 545, + 591 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 567, + 545, + 591 + ], + "spans": [ + { + "bbox": [ + 305, + 567, + 545, + 591 + ], + "type": "text", + "content": "Figure 3(bottom,right) illustrates the proposed geometry transformation for the object kinematic feature." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 306, + 599, + 444, + 612 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 599, + 444, + 612 + ], + "spans": [ + { + "bbox": [ + 306, + 599, + 444, + 612 + ], + "type": "text", + "content": "3.3. Visual Feature Encoding" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 617, + 547, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 617, + 547, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 617, + 547, + 714 + ], + "type": "text", + "content": "Geometry-aligned visual feature. Previous works [11, 26] typically predict signed distances from global image features that lack spatial resolution. Motivated by [47], we aim to generate geometry-aligned local image features for each input point " + }, + { + "bbox": [ + 304, + 617, + 547, + 714 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 304, + 617, + 547, + 714 + ], + "type": "text", + "content": ". Assume " + }, + { + "bbox": [ + 304, + 617, + 547, + 714 + ], + "type": "inline_equation", + "content": "v_{t}^{r} \\in \\mathbb{R}^{16 \\times 16 \\times d}" + }, + { + "bbox": [ + 304, + 617, + 547, + 714 + ], + "type": "text", + "content": " is the feature map generated from the SDF feature encoder, e.g. a ResNet model [22], where " + }, + { + "bbox": [ + 304, + 617, + 547, + 714 + ], + "type": "inline_equation", + "content": "16 \\times 16" + }, + { + "bbox": [ + 304, + 617, + 547, + 714 + ], + "type": "text", + "content": " is the spatial feature resolution and " + }, + { + "bbox": [ + 304, + 617, + 547, + 714 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 304, + 617, + 547, + 714 + ], + "type": "text", + "content": " is the feature dimension. We project the 3D input point " + }, + { + "bbox": [ + 304, + 617, + 547, + 714 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 304, + 617, + 547, + 714 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 304, + 617, + 547, + 714 + ], + "type": "inline_equation", + "content": "\\hat{x}" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "12893" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 50, + 72, + 284, + 132 + ], + "blocks": [ + { + "bbox": [ + 50, + 72, + 284, + 132 + ], + "lines": [ + { + "bbox": [ + 50, + 72, + 284, + 132 + ], + "spans": [ + { + "bbox": [ + 50, + 72, + 284, + 132 + ], + "type": "image", + "image_path": "e508e991fdf28c4839c4ac860e62390834ab9612d013c2de1f2f1294c9129aea.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 133, + 136, + 201, + 145 + ], + "lines": [ + { + "bbox": [ + 133, + 136, + 201, + 145 + ], + "spans": [ + { + "bbox": [ + 133, + 136, + 201, + 145 + ], + "type": "text", + "content": "(a) Single backbone." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 50, + 148, + 284, + 209 + ], + "blocks": [ + { + "bbox": [ + 50, + 148, + 284, + 209 + ], + "lines": [ + { + "bbox": [ + 50, + 148, + 284, + 209 + ], + "spans": [ + { + "bbox": [ + 50, + 148, + 284, + 209 + ], + "type": "image", + "image_path": "5f164d4994fa36e30dfd0fb25c76d4c17e360706b8c42017358c9dbf0291e986.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 126, + 212, + 208, + 220 + ], + "lines": [ + { + "bbox": [ + 126, + 212, + 208, + 220 + ], + "spans": [ + { + "bbox": [ + 126, + 212, + 208, + 220 + ], + "type": "text", + "content": "(b) Symmetric backbone." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 50, + 223, + 285, + 284 + ], + "blocks": [ + { + "bbox": [ + 50, + 223, + 285, + 284 + ], + "lines": [ + { + "bbox": [ + 50, + 223, + 285, + 284 + ], + "spans": [ + { + "bbox": [ + 50, + 223, + 285, + 284 + ], + "type": "image", + "image_path": "205075424b7e82596f85b701c053aeddd710494c31f70fcc7951701294bc47ab.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 124, + 288, + 209, + 297 + ], + "lines": [ + { + "bbox": [ + 124, + 288, + 209, + 297 + ], + "spans": [ + { + "bbox": [ + 124, + 288, + 209, + 297 + ], + "type": "text", + "content": "(c) Asymmetric backbone." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 47, + 307, + 286, + 319 + ], + "lines": [ + { + "bbox": [ + 47, + 307, + 286, + 319 + ], + "spans": [ + { + "bbox": [ + 47, + 307, + 286, + 319 + ], + "type": "text", + "content": "Figure 4. Illustrations of three image backbone sharing strategies." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 338, + 287, + 374 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 338, + 287, + 374 + ], + "spans": [ + { + "bbox": [ + 46, + 338, + 287, + 374 + ], + "type": "text", + "content": "on the image plane with the camera projection matrix and use bilinear sampling to obtain a local feature " + }, + { + "bbox": [ + 46, + 338, + 287, + 374 + ], + "type": "inline_equation", + "content": "e_v" + }, + { + "bbox": [ + 46, + 338, + 287, + 374 + ], + "type": "text", + "content": " from the location on the feature map corresponding to " + }, + { + "bbox": [ + 46, + 338, + 287, + 374 + ], + "type": "inline_equation", + "content": "\\hat{x}" + }, + { + "bbox": [ + 46, + 338, + 287, + 374 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 376, + 287, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 376, + 287, + 604 + ], + "spans": [ + { + "bbox": [ + 46, + 376, + 287, + 604 + ], + "type": "text", + "content": "Temporally-enhanced visual feature. To improve the robustness of visual features in a single frame " + }, + { + "bbox": [ + 46, + 376, + 287, + 604 + ], + "type": "inline_equation", + "content": "I_{t}" + }, + { + "bbox": [ + 46, + 376, + 287, + 604 + ], + "type": "text", + "content": " from occlusion or motion blur, we propose to exploit temporal information from videos to refine " + }, + { + "bbox": [ + 46, + 376, + 287, + 604 + ], + "type": "inline_equation", + "content": "v_{t}^{r}" + }, + { + "bbox": [ + 46, + 376, + 287, + 604 + ], + "type": "text", + "content": ". Note that due to non-rigid hand motions, we do not assume video frames to contain different views of the same rigid scene. We make use of the spatial-temporal transformer architecture [1,4] to efficiently propagate image features across frames. Assume " + }, + { + "bbox": [ + 46, + 376, + 287, + 604 + ], + "type": "inline_equation", + "content": "v_{t-1}^{r}, \\dots, v_{t+1}^{r}" + }, + { + "bbox": [ + 46, + 376, + 287, + 604 + ], + "type": "text", + "content": " are the feature maps from neighboring frames of " + }, + { + "bbox": [ + 46, + 376, + 287, + 604 + ], + "type": "inline_equation", + "content": "I_{t}" + }, + { + "bbox": [ + 46, + 376, + 287, + 604 + ], + "type": "text", + "content": " in a video. We flatten all the feature maps as a sequence in the spatial-temporal dimension leading to " + }, + { + "bbox": [ + 46, + 376, + 287, + 604 + ], + "type": "inline_equation", + "content": "3 \\times 16 \\times 16" + }, + { + "bbox": [ + 46, + 376, + 287, + 604 + ], + "type": "text", + "content": " tokens fed into the transformer model. We reshape the output features of the transformer into a feature map again for " + }, + { + "bbox": [ + 46, + 376, + 287, + 604 + ], + "type": "inline_equation", + "content": "I_{t}" + }, + { + "bbox": [ + 46, + 376, + 287, + 604 + ], + "type": "text", + "content": ", denoted as " + }, + { + "bbox": [ + 46, + 376, + 287, + 604 + ], + "type": "inline_equation", + "content": "v_{t} \\in \\mathbb{R}^{16 \\times 16 \\times d}" + }, + { + "bbox": [ + 46, + 376, + 287, + 604 + ], + "type": "text", + "content": ". By aggregating spatial and temporal information from multiple frames, " + }, + { + "bbox": [ + 46, + 376, + 287, + 604 + ], + "type": "inline_equation", + "content": "v_{t}" + }, + { + "bbox": [ + 46, + 376, + 287, + 604 + ], + "type": "text", + "content": " becomes more robust to the noise and can potentially produce more stable reconstruction results compared to " + }, + { + "bbox": [ + 46, + 376, + 287, + 604 + ], + "type": "inline_equation", + "content": "v_{t}^{r}" + }, + { + "bbox": [ + 46, + 376, + 287, + 604 + ], + "type": "text", + "content": ". Our full gSDF model relies on the feature map " + }, + { + "bbox": [ + 46, + 376, + 287, + 604 + ], + "type": "inline_equation", + "content": "v_{t}" + }, + { + "bbox": [ + 46, + 376, + 287, + 604 + ], + "type": "text", + "content": " to compute the local visual feature " + }, + { + "bbox": [ + 46, + 376, + 287, + 604 + ], + "type": "inline_equation", + "content": "e_{v}" + }, + { + "bbox": [ + 46, + 376, + 287, + 604 + ], + "type": "text", + "content": " for the given input point " + }, + { + "bbox": [ + 46, + 376, + 287, + 604 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 46, + 376, + 287, + 604 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 611, + 230, + 624 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 611, + 230, + 624 + ], + "spans": [ + { + "bbox": [ + 47, + 611, + 230, + 624 + ], + "type": "text", + "content": "3.4. Image Backbone Sharing Strategy" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 629, + 287, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 629, + 287, + 700 + ], + "spans": [ + { + "bbox": [ + 46, + 629, + 287, + 700 + ], + "type": "text", + "content": "As shown in Figure 2, our model contains three branches for hand and object pose estimations as well as for SDF feature encoding. These different branches may share image backbones which might be beneficial with the multi-task learning. In this section, we describe three alternative strategies for sharing image backbones in our model." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 701, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 701, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 47, + 701, + 287, + 714 + ], + "type": "text", + "content": "Single image backbone (Figure 4a). We only employ one" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 72, + 545, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 72, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 305, + 72, + 545, + 95 + ], + "type": "text", + "content": "single image backbone for both pose and shape predictions. This is the strategy used in AlignSDF [11]." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 96, + 545, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 96, + 545, + 144 + ], + "spans": [ + { + "bbox": [ + 304, + 96, + 545, + 144 + ], + "type": "text", + "content": "Symmetric image backbone (Figure 4b). To disentangle pose and shape learning, we share the image backbone for hand and object pose estimation, but use a different backbone to extract visual features for SDFs learning." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 144, + 547, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 144, + 547, + 192 + ], + "spans": [ + { + "bbox": [ + 304, + 144, + 547, + 192 + ], + "type": "text", + "content": "Asymmetric image backbone (Figure 4c). Since hand pose estimation plays a critical role in the task, we use a separate backbone to predict the hand pose, while share the image backbone for object pose predictor and SDF feature encoder." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 199, + 369, + 212 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 199, + 369, + 212 + ], + "spans": [ + { + "bbox": [ + 306, + 199, + 369, + 212 + ], + "type": "text", + "content": "3.5. Training" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 217, + 547, + 290 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 217, + 547, + 290 + ], + "spans": [ + { + "bbox": [ + 304, + 217, + 547, + 290 + ], + "type": "text", + "content": "We apply a two-stage training strategy. In the first stage, we train the hand pose predictor to predict hand joint coordinates " + }, + { + "bbox": [ + 304, + 217, + 547, + 290 + ], + "type": "inline_equation", + "content": "\\psi_h" + }, + { + "bbox": [ + 304, + 217, + 547, + 290 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 304, + 217, + 547, + 290 + ], + "type": "inline_equation", + "content": "\\ell 2" + }, + { + "bbox": [ + 304, + 217, + 547, + 290 + ], + "type": "text", + "content": " loss " + }, + { + "bbox": [ + 304, + 217, + 547, + 290 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{hp}" + }, + { + "bbox": [ + 304, + 217, + 547, + 290 + ], + "type": "text", + "content": " and an ordinal loss [43] " + }, + { + "bbox": [ + 304, + 217, + 547, + 290 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{ord}" + }, + { + "bbox": [ + 304, + 217, + 547, + 290 + ], + "type": "text", + "content": " to penalize the case if the predicted depth order between the " + }, + { + "bbox": [ + 304, + 217, + 547, + 290 + ], + "type": "inline_equation", + "content": "i_{th}" + }, + { + "bbox": [ + 304, + 217, + 547, + 290 + ], + "type": "text", + "content": " joint and the " + }, + { + "bbox": [ + 304, + 217, + 547, + 290 + ], + "type": "inline_equation", + "content": "j_{th}" + }, + { + "bbox": [ + 304, + 217, + 547, + 290 + ], + "type": "text", + "content": " joint is misaligned with the ground-truth relation " + }, + { + "bbox": [ + 304, + 217, + 547, + 290 + ], + "type": "inline_equation", + "content": "\\mathbb{1}_{i,j}^{ord}" + }, + { + "bbox": [ + 304, + 217, + 547, + 290 + ], + "type": "text", + "content": ", which are:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 362, + 299, + 545, + 331 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 362, + 299, + 545, + 331 + ], + "spans": [ + { + "bbox": [ + 362, + 299, + 545, + 331 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {h p} = \\frac {1}{2 1} \\sum_ {i = 1} ^ {2 1} \\left\\| \\psi_ {h, i} - \\hat {\\psi} _ {h, i} \\right\\| _ {2} ^ {2}, \\tag {6}", + "image_path": "fb7d78de0027b28500697a24a76be9fdf518c14ed9d067079a69599859428848.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 337, + 341, + 545, + 374 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 337, + 341, + 545, + 374 + ], + "spans": [ + { + "bbox": [ + 337, + 341, + 545, + 374 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {o r d} = \\sum_ {j = 2} ^ {2 1} \\sum_ {i = 1} ^ {j - 1} \\mathbb {1} _ {i, j} ^ {o r d} \\times \\left| \\left(\\psi_ {h, i} - \\psi_ {h, j}\\right) \\cdot \\vec {n} \\right|, \\tag {7}", + "image_path": "afe1147bf070f827f1d047dbdae2bf9ee948d7d05a0651a59276b7fc47ccbfc1.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 380, + 545, + 451 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 380, + 545, + 451 + ], + "spans": [ + { + "bbox": [ + 304, + 380, + 545, + 451 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 380, + 545, + 451 + ], + "type": "inline_equation", + "content": "\\vec{n} \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 304, + 380, + 545, + 451 + ], + "type": "text", + "content": " denotes the viewpoint direction. We randomly sample twenty virtual views to optimize " + }, + { + "bbox": [ + 304, + 380, + 545, + 451 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{ord}" + }, + { + "bbox": [ + 304, + 380, + 545, + 451 + ], + "type": "text", + "content": ". Since the proposed kinematic features are based on the predicted hand joints " + }, + { + "bbox": [ + 304, + 380, + 545, + 451 + ], + "type": "inline_equation", + "content": "\\psi_h" + }, + { + "bbox": [ + 304, + 380, + 545, + 451 + ], + "type": "text", + "content": ", we empirically find that pretraining the hand joint predictor in the first stage and then freezing its weights can achieve better performance." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 452, + 545, + 488 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 452, + 545, + 488 + ], + "spans": [ + { + "bbox": [ + 304, + 452, + 545, + 488 + ], + "type": "text", + "content": "In the second training stage, we learn all the modules except the hand joint predictor in an end-to-end manner. We use the " + }, + { + "bbox": [ + 304, + 452, + 545, + 488 + ], + "type": "inline_equation", + "content": "\\ell 2" + }, + { + "bbox": [ + 304, + 452, + 545, + 488 + ], + "type": "text", + "content": " loss " + }, + { + "bbox": [ + 304, + 452, + 545, + 488 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{op}" + }, + { + "bbox": [ + 304, + 452, + 545, + 488 + ], + "type": "text", + "content": " to predict the object pose " + }, + { + "bbox": [ + 304, + 452, + 545, + 488 + ], + "type": "inline_equation", + "content": "\\psi_{o}" + }, + { + "bbox": [ + 304, + 452, + 545, + 488 + ], + "type": "text", + "content": " as follows:" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 384, + 496, + 545, + 519 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 384, + 496, + 545, + 519 + ], + "spans": [ + { + "bbox": [ + 384, + 496, + 545, + 519 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {o p} = \\left\\| \\psi_ {o} - \\hat {\\psi} _ {o} \\right\\| _ {2} ^ {2} \\tag {8}", + "image_path": "a75867dc063e790685f947587a7788525b21f3c16a1326ec2f31ab1bf8831cb1.jpg" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 304, + 529, + 547, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 529, + 547, + 588 + ], + "spans": [ + { + "bbox": [ + 304, + 529, + 547, + 588 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 529, + 547, + 588 + ], + "type": "inline_equation", + "content": "\\hat{\\psi}_o" + }, + { + "bbox": [ + 304, + 529, + 547, + 588 + ], + "type": "text", + "content": " denote the ground-truth location for the object center. To train the SDFs, we sample many 3D points around the hand-object surface and calculate their ground-truth signed distances to the hand mesh and the object mesh. We use " + }, + { + "bbox": [ + 304, + 529, + 547, + 588 + ], + "type": "inline_equation", + "content": "\\ell 1" + }, + { + "bbox": [ + 304, + 529, + 547, + 588 + ], + "type": "text", + "content": " loss to optimize the SDF decoders:" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 351, + 597, + 545, + 624 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 351, + 597, + 545, + 624 + ], + "spans": [ + { + "bbox": [ + 351, + 597, + 545, + 624 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {h s d f} = \\left\\| \\mathrm {S D F} _ {h a n d} - \\mathrm {S D F} _ {h a n d} \\right\\| _ {1} ^ {1}, \\tag {9}", + "image_path": "24873923c0672492d2513d1274fef5c2942ca2945e9037d22d48c6ba30bd73b4.jpg" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 354, + 622, + 484, + 644 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 354, + 622, + 484, + 644 + ], + "spans": [ + { + "bbox": [ + 354, + 622, + 484, + 644 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {o s d f} = \\left\\| \\mathrm {S D F} _ {o b j} - \\hat {\\mathrm {S D F}} _ {o b j} \\right\\| _ {1} ^ {1},", + "image_path": "f5109c7e49a58bbdf74c769ea546d28ab6b77536071f1739a456caff71f7bb6d.jpg" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 304, + 653, + 545, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 653, + 545, + 689 + ], + "spans": [ + { + "bbox": [ + 304, + 653, + 545, + 689 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 653, + 545, + 689 + ], + "type": "inline_equation", + "content": "\\hat{\\mathrm{SDF}}_{hand}" + }, + { + "bbox": [ + 304, + 653, + 545, + 689 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 653, + 545, + 689 + ], + "type": "inline_equation", + "content": "\\hat{\\mathrm{SDF}}_{obj}" + }, + { + "bbox": [ + 304, + 653, + 545, + 689 + ], + "type": "text", + "content": " denote ground-truth signed distances to the hand and the object, respectively. The overall training objective " + }, + { + "bbox": [ + 304, + 653, + 545, + 689 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{shape}" + }, + { + "bbox": [ + 304, + 653, + 545, + 689 + ], + "type": "text", + "content": " in the second training stage is:" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 326, + 700, + 545, + 713 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 700, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 326, + 700, + 545, + 713 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {s h a p e}} = \\mathcal {L} _ {\\text {o p}} + 0. 5 \\times \\mathcal {L} _ {\\text {h s d f}} + 0. 5 \\times \\mathcal {L} _ {\\text {o s d f}}. \\tag {10}", + "image_path": "daf8eaf3d69b05715d86cc81b4b2a2334acc5d1d29a55f448408c61d193dfd0d.jpg" + } + ] + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "12894" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 71, + 128, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 71, + 128, + 85 + ], + "spans": [ + { + "bbox": [ + 47, + 71, + 128, + 85 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 91, + 288, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 91, + 288, + 128 + ], + "spans": [ + { + "bbox": [ + 46, + 91, + 288, + 128 + ], + "type": "text", + "content": "We conduct extensive experiments on two 3D hand-object reconstruction benchmarks to evaluate the effectiveness of our proposed gSDF model." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 137, + 110, + 148 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 137, + 110, + 148 + ], + "spans": [ + { + "bbox": [ + 47, + 137, + 110, + 148 + ], + "type": "text", + "content": "4.1. Datasets" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 156, + 287, + 287 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 156, + 287, + 287 + ], + "spans": [ + { + "bbox": [ + 46, + 156, + 287, + 287 + ], + "type": "text", + "content": "ObMan [21] is a large-scale synthetic dataset that contains diverse hand grasping poses on a wide range of objects imported from ShapeNet [8]. We follow previous methods [11,26,42,63] to generate data for SDFs training. First, we remove meshes that contain too many double-sided triangles, which results in 87,190 hand-object meshes. Then, we fit the hand-object mesh into a unit cube and sample 40,000 points inside the cube. For each sampled point, we compute its signed distance to the ground-truth hand mesh and object mesh, respectively. At test time, we report the performance on the whole ObMan test set of 6,285 testing samples." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 288, + 288, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 288, + 288, + 373 + ], + "spans": [ + { + "bbox": [ + 46, + 288, + 288, + 373 + ], + "type": "text", + "content": "DexYCB [9] is currently the largest real dataset that captures hand and object interactions in videos. Following [11,60], we focus on right-hand samples and use the official s0 split. We follow the same steps as in ObMan to obtain SDF training samples. To reduce the temporal redundancy, we downsample the video data to 6 frames per second, which results in 29,656 training samples and 5,928 testing samples." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 380, + 159, + 392 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 380, + 159, + 392 + ], + "spans": [ + { + "bbox": [ + 47, + 380, + 159, + 392 + ], + "type": "text", + "content": "4.2. Evaluation metrics" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 399, + 287, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 399, + 287, + 422 + ], + "spans": [ + { + "bbox": [ + 47, + 399, + 287, + 422 + ], + "type": "text", + "content": "We follow prior works to comprehensively evaluate the 3D reconstructions with multiple metrics as below." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 424, + 288, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 424, + 288, + 519 + ], + "spans": [ + { + "bbox": [ + 46, + 424, + 288, + 519 + ], + "type": "text", + "content": "Hand Chamfer Distance " + }, + { + "bbox": [ + 46, + 424, + 288, + 519 + ], + "type": "inline_equation", + "content": "(\\mathrm{CD_h})" + }, + { + "bbox": [ + 46, + 424, + 288, + 519 + ], + "type": "text", + "content": ". We evaluate Chamfer distance " + }, + { + "bbox": [ + 46, + 424, + 288, + 519 + ], + "type": "inline_equation", + "content": "(\\mathrm{cm}^2)" + }, + { + "bbox": [ + 46, + 424, + 288, + 519 + ], + "type": "text", + "content": " between our reconstructed hand mesh and the ground-truth hand mesh. We follow previous works [11, 26] to optimize the scale and translation to align the reconstructed mesh with the ground truth and sample 30,000 points on both meshes to compute Chamfer distance. We report the median Chamfer distance on the test set to reflect the quality of our reconstructed hand mesh." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 520, + 287, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 520, + 287, + 579 + ], + "spans": [ + { + "bbox": [ + 46, + 520, + 287, + 579 + ], + "type": "text", + "content": "Hand F-score " + }, + { + "bbox": [ + 46, + 520, + 287, + 579 + ], + "type": "inline_equation", + "content": "(\\mathbf{FS_h})" + }, + { + "bbox": [ + 46, + 520, + 287, + 579 + ], + "type": "text", + "content": ". Since Chamfer distance is vulnerable to outliers [52, 63], we also report the F-score to evaluate the predicted hand mesh. After aligning the hand mesh with its ground truth, we report F-score at " + }, + { + "bbox": [ + 46, + 520, + 287, + 579 + ], + "type": "inline_equation", + "content": "1\\mathrm{mm}" + }, + { + "bbox": [ + 46, + 520, + 287, + 579 + ], + "type": "inline_equation", + "content": "(\\mathrm{FS_h}@\\mathrm{l})" + }, + { + "bbox": [ + 46, + 520, + 287, + 579 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 520, + 287, + 579 + ], + "type": "inline_equation", + "content": "5\\mathrm{mm}" + }, + { + "bbox": [ + 46, + 520, + 287, + 579 + ], + "type": "inline_equation", + "content": "(\\mathrm{FS_h}@\\mathrm{5})" + }, + { + "bbox": [ + 46, + 520, + 287, + 579 + ], + "type": "text", + "content": " thresholds." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 581, + 287, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 581, + 287, + 640 + ], + "spans": [ + { + "bbox": [ + 46, + 581, + 287, + 640 + ], + "type": "text", + "content": "Object Chamfer Distance " + }, + { + "bbox": [ + 46, + 581, + 287, + 640 + ], + "type": "inline_equation", + "content": "(\\mathrm{CD_o})" + }, + { + "bbox": [ + 46, + 581, + 287, + 640 + ], + "type": "text", + "content": ". Following [11, 26], we first use the optimized hand scale and translation to transform the reconstructed object mesh. Then, we follow the same process as " + }, + { + "bbox": [ + 46, + 581, + 287, + 640 + ], + "type": "inline_equation", + "content": "\\mathrm{CD_h}" + }, + { + "bbox": [ + 46, + 581, + 287, + 640 + ], + "type": "text", + "content": " to compute " + }, + { + "bbox": [ + 46, + 581, + 287, + 640 + ], + "type": "inline_equation", + "content": "\\mathrm{CD_o}" + }, + { + "bbox": [ + 46, + 581, + 287, + 640 + ], + "type": "inline_equation", + "content": "(\\mathrm{cm}^2)" + }, + { + "bbox": [ + 46, + 581, + 287, + 640 + ], + "type": "text", + "content": " and evaluate the quality of our reconstructed object mesh." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 641, + 287, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 641, + 287, + 677 + ], + "spans": [ + { + "bbox": [ + 46, + 641, + 287, + 677 + ], + "type": "text", + "content": "Object F-score " + }, + { + "bbox": [ + 46, + 641, + 287, + 677 + ], + "type": "inline_equation", + "content": "(\\mathrm{FS_o})" + }, + { + "bbox": [ + 46, + 641, + 287, + 677 + ], + "type": "text", + "content": " .We follow the previous work [63] to evaluate the reconstructed object mesh using F-score at 5 mm " + }, + { + "bbox": [ + 46, + 641, + 287, + 677 + ], + "type": "inline_equation", + "content": "\\mathrm{(FS_o@5)}" + }, + { + "bbox": [ + 46, + 641, + 287, + 677 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 641, + 287, + 677 + ], + "type": "inline_equation", + "content": "10\\mathrm{mm}" + }, + { + "bbox": [ + 46, + 641, + 287, + 677 + ], + "type": "inline_equation", + "content": "\\mathrm{(FS_o@10)}" + }, + { + "bbox": [ + 46, + 641, + 287, + 677 + ], + "type": "text", + "content": " thresholds." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 677, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 288, + 713 + ], + "type": "text", + "content": "Hand Joint Error " + }, + { + "bbox": [ + 46, + 677, + 288, + 713 + ], + "type": "inline_equation", + "content": "(\\mathbf{E_h})" + }, + { + "bbox": [ + 46, + 677, + 288, + 713 + ], + "type": "text", + "content": ". To measure the hand pose estimation accuracy, we compute the mean joint error (cm) relative to the hand wrist over all 21 joints in the form of " + }, + { + "bbox": [ + 46, + 677, + 288, + 713 + ], + "type": "inline_equation", + "content": "\\ell 2" + }, + { + "bbox": [ + 46, + 677, + 288, + 713 + ], + "type": "text", + "content": " distance." + } + ] + } + ], + "index": 11 + }, + { + "type": "table", + "bbox": [ + 309, + 93, + 541, + 156 + ], + "blocks": [ + { + "bbox": [ + 305, + 70, + 547, + 92 + ], + "lines": [ + { + "bbox": [ + 305, + 70, + 547, + 92 + ], + "spans": [ + { + "bbox": [ + 305, + 70, + 547, + 92 + ], + "type": "text", + "content": "Table 1. Hand reconstruction performance with different hand kinematic features " + }, + { + "bbox": [ + 305, + 70, + 547, + 92 + ], + "type": "inline_equation", + "content": "\\mathbf{K}_{*}^{h}" + }, + { + "bbox": [ + 305, + 70, + 547, + 92 + ], + "type": "text", + "content": " and visual feature " + }, + { + "bbox": [ + 305, + 70, + 547, + 92 + ], + "type": "inline_equation", + "content": "\\mathrm{V}_1" + }, + { + "bbox": [ + 305, + 70, + 547, + 92 + ], + "type": "text", + "content": " on DexYCB dataset." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 309, + 93, + 541, + 156 + ], + "lines": [ + { + "bbox": [ + 309, + 93, + 541, + 156 + ], + "spans": [ + { + "bbox": [ + 309, + 93, + 541, + 156 + ], + "type": "table", + "html": "
Wrist onlyAll jointsCDh↓FSh@1↑FSh@5↑
K1h××0.3640.1540.764
K2h×0.3440.1670.776
K3h×0.3170.1710.788
", + "image_path": "d026c5cc1a0a07fbceaba1d3783b8bf3b416d878437b376cd7d998074ba3da53.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_body" + } + ], + "index": 13 + }, + { + "type": "table", + "bbox": [ + 307, + 188, + 541, + 249 + ], + "blocks": [ + { + "bbox": [ + 305, + 164, + 545, + 186 + ], + "lines": [ + { + "bbox": [ + 305, + 164, + 545, + 186 + ], + "spans": [ + { + "bbox": [ + 305, + 164, + 545, + 186 + ], + "type": "text", + "content": "Table 2. Object reconstruction performance with different object kinematic features " + }, + { + "bbox": [ + 305, + 164, + 545, + 186 + ], + "type": "inline_equation", + "content": "{\\mathrm{K}}_{ * }^{o}" + }, + { + "bbox": [ + 305, + 164, + 545, + 186 + ], + "type": "text", + "content": " and visual feature " + }, + { + "bbox": [ + 305, + 164, + 545, + 186 + ], + "type": "inline_equation", + "content": "{\\mathrm{V}}_{1}" + }, + { + "bbox": [ + 305, + 164, + 545, + 186 + ], + "type": "text", + "content": " on DexYCB dataset." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 307, + 188, + 541, + 249 + ], + "lines": [ + { + "bbox": [ + 307, + 188, + 541, + 249 + ], + "spans": [ + { + "bbox": [ + 307, + 188, + 541, + 249 + ], + "type": "table", + "html": "
Obj poseHand poseCDo↓FSo@5↑FSo@10↑
K1o××2.060.3920.660
K2o×1.930.3960.668
K3o1.710.4180.689
", + "image_path": "5e66986513695f2dc36c5edd15af610976b3ac4295da4516561605ecb0758bf9.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "table_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 255, + 547, + 292 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 255, + 547, + 292 + ], + "spans": [ + { + "bbox": [ + 304, + 255, + 547, + 292 + ], + "type": "text", + "content": "Object Center Error " + }, + { + "bbox": [ + 304, + 255, + 547, + 292 + ], + "type": "inline_equation", + "content": "(\\mathbf{E_o})" + }, + { + "bbox": [ + 304, + 255, + 547, + 292 + ], + "type": "text", + "content": ". To evaluate the accuracy of our predicted object translation, we report the " + }, + { + "bbox": [ + 304, + 255, + 547, + 292 + ], + "type": "inline_equation", + "content": "\\ell 2" + }, + { + "bbox": [ + 304, + 255, + 547, + 292 + ], + "type": "text", + "content": " distance (cm) between the prediction and its ground truth." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 293, + 547, + 351 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 293, + 547, + 351 + ], + "spans": [ + { + "bbox": [ + 304, + 293, + 547, + 351 + ], + "type": "text", + "content": "Additionally, we report Contact ratio " + }, + { + "bbox": [ + 304, + 293, + 547, + 351 + ], + "type": "inline_equation", + "content": "(\\mathrm{C}_r)" + }, + { + "bbox": [ + 304, + 293, + 547, + 351 + ], + "type": "text", + "content": ", Penetration depth " + }, + { + "bbox": [ + 304, + 293, + 547, + 351 + ], + "type": "inline_equation", + "content": "(\\mathrm{P}_d)" + }, + { + "bbox": [ + 304, + 293, + 547, + 351 + ], + "type": "text", + "content": " and Intersection volume " + }, + { + "bbox": [ + 304, + 293, + 547, + 351 + ], + "type": "inline_equation", + "content": "(\\mathrm{I}_v)" + }, + { + "bbox": [ + 304, + 293, + 547, + 351 + ], + "type": "text", + "content": " [11,21,26,60,62] to present more details about the interaction between the hand mesh and the object mesh. Please see supplementary material for more details." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 306, + 358, + 437, + 371 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 358, + 437, + 371 + ], + "spans": [ + { + "bbox": [ + 306, + 358, + 437, + 371 + ], + "type": "text", + "content": "4.3. Implementation details" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 376, + 545, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 376, + 545, + 460 + ], + "spans": [ + { + "bbox": [ + 304, + 376, + 545, + 460 + ], + "type": "text", + "content": "Model architecture. We use ResNet-18 [22] as our image backbone. For hand and object pose estimation, we adopt volumetric heatmaps of spatial resolution " + }, + { + "bbox": [ + 304, + 376, + 545, + 460 + ], + "type": "inline_equation", + "content": "64 \\times 64 \\times 64" + }, + { + "bbox": [ + 304, + 376, + 545, + 460 + ], + "type": "text", + "content": " to localize hand joints and the object center in 3D space. For the spatial-temporal transformer, we use 16 transformer layers with 4 attention heads. We present more details about our model architecture in supplementary material." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 461, + 547, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 461, + 547, + 640 + ], + "spans": [ + { + "bbox": [ + 304, + 461, + 547, + 640 + ], + "type": "text", + "content": "Training details. We take the image crop of the hand-object region according to their bounding boxes for DexYCB benchmark. Then, we modify camera intrinsic and extrinsic parameters [35,64] accordingly and take the cropped image as the input to our model. The spatial size of input images is " + }, + { + "bbox": [ + 304, + 461, + 547, + 640 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 304, + 461, + 547, + 640 + ], + "type": "text", + "content": " for all our models. We perform data augmentation including rotation " + }, + { + "bbox": [ + 304, + 461, + 547, + 640 + ], + "type": "inline_equation", + "content": "\\left[\\left[-45^{\\circ}, 45^{\\circ}\\right]\\right)" + }, + { + "bbox": [ + 304, + 461, + 547, + 640 + ], + "type": "text", + "content": " and color jittering. During SDF training, we randomly sample 1000 points (500 points inside the mesh and 500 points outside the mesh) for the hand and the object, respectively. We train our model with a batch size of 256 for 1600 epochs on both ObMan and DexYCB using the Adam optimizer [27] with 4 NVIDIA RTX 3090 GPUs. We use an initial learning rate of " + }, + { + "bbox": [ + 304, + 461, + 547, + 640 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-4}" + }, + { + "bbox": [ + 304, + 461, + 547, + 640 + ], + "type": "text", + "content": " and decay it by half every 600 epochs. It takes 22 hours for training on DexYCB and 60 hours on ObMan dataset." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 306, + 647, + 405, + 658 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 647, + 405, + 658 + ], + "spans": [ + { + "bbox": [ + 306, + 647, + 405, + 658 + ], + "type": "text", + "content": "4.4. Ablation studies" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 304, + 665, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 547, + 713 + ], + "type": "text", + "content": "We carry out ablations on the DexYCB dataset to validate different components in our gSDF model. We evaluate different settings of hand kinematic features " + }, + { + "bbox": [ + 304, + 665, + 547, + 713 + ], + "type": "inline_equation", + "content": "(\\mathbf{K}_*^h" + }, + { + "bbox": [ + 304, + 665, + 547, + 713 + ], + "type": "text", + "content": " in Table 1), object kinematic features " + }, + { + "bbox": [ + 304, + 665, + 547, + 713 + ], + "type": "inline_equation", + "content": "(\\mathbf{K}_*^o" + }, + { + "bbox": [ + 304, + 665, + 547, + 713 + ], + "type": "text", + "content": " in Table 2), and visual features" + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "12895" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 75, + 94, + 518, + 178 + ], + "blocks": [ + { + "bbox": [ + 46, + 70, + 545, + 93 + ], + "lines": [ + { + "bbox": [ + 46, + 70, + 545, + 93 + ], + "spans": [ + { + "bbox": [ + 46, + 70, + 545, + 93 + ], + "type": "text", + "content": "Table 3. Hand-object reconstruction performance with different visual features on DexYCB dataset. The visual features are combined with the best kinematic features " + }, + { + "bbox": [ + 46, + 70, + 545, + 93 + ], + "type": "inline_equation", + "content": "{\\mathrm{K}}_{3}^{h}" + }, + { + "bbox": [ + 46, + 70, + 545, + 93 + ], + "type": "text", + "content": " (Table 1) and " + }, + { + "bbox": [ + 46, + 70, + 545, + 93 + ], + "type": "inline_equation", + "content": "{\\mathrm{K}}_{3}^{o}" + }, + { + "bbox": [ + 46, + 70, + 545, + 93 + ], + "type": "text", + "content": " (Table 2) to reconstruct hand and object respectively." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 75, + 94, + 518, + 178 + ], + "lines": [ + { + "bbox": [ + 75, + 94, + 518, + 178 + ], + "spans": [ + { + "bbox": [ + 75, + 94, + 518, + 178 + ], + "type": "table", + "html": "
GlobalLocalTransformerCDh↓FSh@1↑FSh@5↑CDo↓FSo@5↑FSo@10↑Eh↓Eo↓
SpatialTemp.
V1×××0.3170.1710.7881.710.4180.6891.441.91
V2×××0.3100.1720.7951.710.4260.6941.441.98
V3××0.3040.1740.7971.600.4340.7031.441.94
V4×0.3020.1770.8011.550.4370.7091.441.96
", + "image_path": "3ae6d1026b212bb8a46a0bed9b316485f520e5f020e4197e666decd843b4555a.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 92, + 212, + 499, + 272 + ], + "blocks": [ + { + "bbox": [ + 46, + 189, + 544, + 211 + ], + "lines": [ + { + "bbox": [ + 46, + 189, + 544, + 211 + ], + "spans": [ + { + "bbox": [ + 46, + 189, + 544, + 211 + ], + "type": "text", + "content": "Table 4. Hand-object reconstruction performance using different image backbone sharing strategies on DexYCB dataset. The ablation is carried out with visual features " + }, + { + "bbox": [ + 46, + 189, + 544, + 211 + ], + "type": "inline_equation", + "content": "{\\mathrm{V}}_{1}" + }, + { + "bbox": [ + 46, + 189, + 544, + 211 + ], + "type": "text", + "content": " and kinematic features " + }, + { + "bbox": [ + 46, + 189, + 544, + 211 + ], + "type": "inline_equation", + "content": "{\\mathrm{K}}_{3}^{h}" + }, + { + "bbox": [ + 46, + 189, + 544, + 211 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 189, + 544, + 211 + ], + "type": "inline_equation", + "content": "{\\mathrm{K}}_{3}^{o}" + }, + { + "bbox": [ + 46, + 189, + 544, + 211 + ], + "type": "text", + "content": " ." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 92, + 212, + 499, + 272 + ], + "lines": [ + { + "bbox": [ + 92, + 212, + 499, + 272 + ], + "spans": [ + { + "bbox": [ + 92, + 212, + 499, + 272 + ], + "type": "table", + "html": "
BackboneCDh↓FSh@1↑FSh@5↑CDo↓FSo@5↑FSo@10↑Eh↓Eo↓
Single0.4110.1480.7411.880.4020.6741.721.83
Symmetric0.3240.1680.7791.840.4050.6721.461.93
Asymmetric0.3170.1710.7881.710.4180.6891.441.91
", + "image_path": "f5eba0e0f92278d1b358264d703c8164ec987251663f232535497be4f23948af.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 281, + 287, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 281, + 287, + 304 + ], + "spans": [ + { + "bbox": [ + 46, + 281, + 287, + 304 + ], + "type": "inline_equation", + "content": "(\\mathrm{V}_{*}" + }, + { + "bbox": [ + 46, + 281, + 287, + 304 + ], + "type": "text", + "content": " in Table 3). We use the asymmetric image backbone if not otherwise mentioned." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 306, + 287, + 474 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 306, + 287, + 474 + ], + "spans": [ + { + "bbox": [ + 46, + 306, + 287, + 474 + ], + "type": "text", + "content": "Hand kinematic feature. In Table 1, we evaluate the contribution of the proposed hand kinematic features for 3D hand reconstruction. The model in " + }, + { + "bbox": [ + 46, + 306, + 287, + 474 + ], + "type": "inline_equation", + "content": "\\mathrm{K}_1^h" + }, + { + "bbox": [ + 46, + 306, + 287, + 474 + ], + "type": "text", + "content": " does not use any pose priors to transform the 3D point. The model in " + }, + { + "bbox": [ + 46, + 306, + 287, + 474 + ], + "type": "inline_equation", + "content": "\\mathrm{K}_2^h" + }, + { + "bbox": [ + 46, + 306, + 287, + 474 + ], + "type": "text", + "content": " only uses the hand wrist pose to transform the 3D point as AlignSDF [11]. Our model in " + }, + { + "bbox": [ + 46, + 306, + 287, + 474 + ], + "type": "inline_equation", + "content": "\\mathrm{K}_3^h" + }, + { + "bbox": [ + 46, + 306, + 287, + 474 + ], + "type": "text", + "content": " computes the transformations to all the hand joints, which achieves the best performance on all the evaluation metrics. Compared to " + }, + { + "bbox": [ + 46, + 306, + 287, + 474 + ], + "type": "inline_equation", + "content": "\\mathrm{K}_1^h" + }, + { + "bbox": [ + 46, + 306, + 287, + 474 + ], + "type": "text", + "content": " without any pose priors, our model achieves more than " + }, + { + "bbox": [ + 46, + 306, + 287, + 474 + ], + "type": "inline_equation", + "content": "12\\%" + }, + { + "bbox": [ + 46, + 306, + 287, + 474 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 306, + 287, + 474 + ], + "type": "inline_equation", + "content": "9\\%" + }, + { + "bbox": [ + 46, + 306, + 287, + 474 + ], + "type": "text", + "content": " improvement on " + }, + { + "bbox": [ + 46, + 306, + 287, + 474 + ], + "type": "inline_equation", + "content": "\\mathrm{CD_h}" + }, + { + "bbox": [ + 46, + 306, + 287, + 474 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 306, + 287, + 474 + ], + "type": "inline_equation", + "content": "\\mathrm{FS_h}@\\mathbb{1}" + }, + { + "bbox": [ + 46, + 306, + 287, + 474 + ], + "type": "text", + "content": ", respectively. Compared to " + }, + { + "bbox": [ + 46, + 306, + 287, + 474 + ], + "type": "inline_equation", + "content": "\\mathrm{K}_2^h" + }, + { + "bbox": [ + 46, + 306, + 287, + 474 + ], + "type": "text", + "content": " with only hand wrist, our model greatly reduces the hand Chamfer distance from " + }, + { + "bbox": [ + 46, + 306, + 287, + 474 + ], + "type": "inline_equation", + "content": "0.344~\\mathrm{cm}^2" + }, + { + "bbox": [ + 46, + 306, + 287, + 474 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 46, + 306, + 287, + 474 + ], + "type": "inline_equation", + "content": "0.317~\\mathrm{cm}^2" + }, + { + "bbox": [ + 46, + 306, + 287, + 474 + ], + "type": "text", + "content": ", leading to " + }, + { + "bbox": [ + 46, + 306, + 287, + 474 + ], + "type": "inline_equation", + "content": "7.8\\%" + }, + { + "bbox": [ + 46, + 306, + 287, + 474 + ], + "type": "text", + "content": " relative gains. These results demonstrate the significance of pose priors and the advantage of gSDF for 3D hand reconstruction." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 474, + 287, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 474, + 287, + 594 + ], + "spans": [ + { + "bbox": [ + 46, + 474, + 287, + 594 + ], + "type": "text", + "content": "Object kinematic feature. In Table 2, we validate the effectiveness of our proposed object kinematic feature. The model in " + }, + { + "bbox": [ + 46, + 474, + 287, + 594 + ], + "type": "inline_equation", + "content": "\\mathrm{K}_1^o" + }, + { + "bbox": [ + 46, + 474, + 287, + 594 + ], + "type": "text", + "content": " does not contain any pose priors, while the model in " + }, + { + "bbox": [ + 46, + 474, + 287, + 594 + ], + "type": "inline_equation", + "content": "\\mathrm{K}_2^o" + }, + { + "bbox": [ + 46, + 474, + 287, + 594 + ], + "type": "text", + "content": " aligns query points to the object center as in [11]. Our model in " + }, + { + "bbox": [ + 46, + 474, + 287, + 594 + ], + "type": "inline_equation", + "content": "\\mathrm{K}_3^o" + }, + { + "bbox": [ + 46, + 474, + 287, + 594 + ], + "type": "text", + "content": " further employs the hand pose to produce the object kinematic feature, which significantly boosts the performance for the object reconstruction on different metrics. Compared to " + }, + { + "bbox": [ + 46, + 474, + 287, + 594 + ], + "type": "inline_equation", + "content": "\\mathrm{K}_2^o" + }, + { + "bbox": [ + 46, + 474, + 287, + 594 + ], + "type": "text", + "content": ", our proposed object kinematic feature achieves more than " + }, + { + "bbox": [ + 46, + 474, + 287, + 594 + ], + "type": "inline_equation", + "content": "11\\%" + }, + { + "bbox": [ + 46, + 474, + 287, + 594 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 474, + 287, + 594 + ], + "type": "inline_equation", + "content": "5.5\\%" + }, + { + "bbox": [ + 46, + 474, + 287, + 594 + ], + "type": "text", + "content": " improvement on " + }, + { + "bbox": [ + 46, + 474, + 287, + 594 + ], + "type": "inline_equation", + "content": "\\mathrm{CD_o}" + }, + { + "bbox": [ + 46, + 474, + 287, + 594 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 474, + 287, + 594 + ], + "type": "inline_equation", + "content": "\\mathrm{FS_o}@\\mathsf{5}" + }, + { + "bbox": [ + 46, + 474, + 287, + 594 + ], + "type": "text", + "content": ", respectively." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "text", + "content": "Visual features. We compare different visual features for SDF prediction in Table 3. " + }, + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\mathrm{V}_{1}" + }, + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "text", + "content": " uses the global visual feature e.g. the average pooling of ResNet feature map as in previous works [11,26]. Our local visual features " + }, + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\mathrm{V}_{2}" + }, + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "text", + "content": " derived from the geometry alignment with the query point reduces the hand Chamfer distance from " + }, + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "inline_equation", + "content": "0.317~\\mathrm{cm}^2" + }, + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "inline_equation", + "content": "0.310~\\mathrm{cm}^2" + }, + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "text", + "content": ". However, it shows less improvement on the object shape accuracy. In " + }, + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\mathrm{V}_{3}" + }, + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\mathrm{V}_{4}" + }, + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "text", + "content": ", we use the transformer model to refine the feature maps. To ablate the improvement from the transformer architecture and from the temporal information" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 336, + 290, + 394, + 349 + ], + "blocks": [ + { + "bbox": [ + 335, + 281, + 382, + 289 + ], + "lines": [ + { + "bbox": [ + 335, + 281, + 382, + 289 + ], + "spans": [ + { + "bbox": [ + 335, + 281, + 382, + 289 + ], + "type": "text", + "content": "Input Images" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 336, + 290, + 394, + 349 + ], + "lines": [ + { + "bbox": [ + 336, + 290, + 394, + 349 + ], + "spans": [ + { + "bbox": [ + 336, + 290, + 394, + 349 + ], + "type": "image", + "image_path": "a2fc2087c8926407024aceddb57fdb907fca43dbfaaaaa85e1f9f3ce0af2e779.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 395, + 290, + 454, + 349 + ], + "blocks": [ + { + "bbox": [ + 393, + 281, + 454, + 289 + ], + "lines": [ + { + "bbox": [ + 393, + 281, + 454, + 289 + ], + "spans": [ + { + "bbox": [ + 393, + 281, + 454, + 289 + ], + "type": "text", + "content": "Our single-frame model" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 395, + 290, + 454, + 349 + ], + "lines": [ + { + "bbox": [ + 395, + 290, + 454, + 349 + ], + "spans": [ + { + "bbox": [ + 395, + 290, + 454, + 349 + ], + "type": "image", + "image_path": "406f8b2bb7be3f36f49ae72e85ff726200b4de097e075921745b6128b33911ee.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 454, + 290, + 513, + 349 + ], + "blocks": [ + { + "bbox": [ + 462, + 281, + 505, + 289 + ], + "lines": [ + { + "bbox": [ + 462, + 281, + 505, + 289 + ], + "spans": [ + { + "bbox": [ + 462, + 281, + 505, + 289 + ], + "type": "text", + "content": "Our video model" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 454, + 290, + 513, + 349 + ], + "lines": [ + { + "bbox": [ + 454, + 290, + 513, + 349 + ], + "spans": [ + { + "bbox": [ + 454, + 290, + 513, + 349 + ], + "type": "image", + "image_path": "26e3356a3edb06d32680c7a5458a24dd7cf4a5f55113205c55775f19b3067664.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 336, + 350, + 394, + 408 + ], + "blocks": [ + { + "bbox": [ + 336, + 350, + 394, + 408 + ], + "lines": [ + { + "bbox": [ + 336, + 350, + 394, + 408 + ], + "spans": [ + { + "bbox": [ + 336, + 350, + 394, + 408 + ], + "type": "image", + "image_path": "28775e7b0390b3fd2da60253534f1ed3dae6e3f88f9c8fbcaa8a1330a7df6733.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 306, + 411, + 544, + 432 + ], + "lines": [ + { + "bbox": [ + 306, + 411, + 544, + 432 + ], + "spans": [ + { + "bbox": [ + 306, + 411, + 544, + 432 + ], + "type": "text", + "content": "Figure 5. The qualitative comparison between our single-frame model built with the transformer and our video model." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 395, + 350, + 454, + 408 + ], + "blocks": [ + { + "bbox": [ + 395, + 350, + 454, + 408 + ], + "lines": [ + { + "bbox": [ + 395, + 350, + 454, + 408 + ], + "spans": [ + { + "bbox": [ + 395, + 350, + 454, + 408 + ], + "type": "image", + "image_path": "e5dd54af45fb9666196be863cfdc724c7150dc7069e43683a2f8a6f6d723b5a9.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 454, + 350, + 513, + 408 + ], + "blocks": [ + { + "bbox": [ + 454, + 350, + 513, + 408 + ], + "lines": [ + { + "bbox": [ + 454, + 350, + 513, + 408 + ], + "spans": [ + { + "bbox": [ + 454, + 350, + 513, + 408 + ], + "type": "image", + "image_path": "41160320a085061c1f3ae14b3c5bfc76c6c84c9fd77cc608cffb6e5fb116b782.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 437, + 547, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 437, + 547, + 569 + ], + "spans": [ + { + "bbox": [ + 304, + 437, + 547, + 569 + ], + "type": "text", + "content": "in videos, we only use transformer for each single frame in " + }, + { + "bbox": [ + 304, + 437, + 547, + 569 + ], + "type": "inline_equation", + "content": "\\mathrm{V}_3" + }, + { + "bbox": [ + 304, + 437, + 547, + 569 + ], + "type": "text", + "content": " while use it for multiple frames in " + }, + { + "bbox": [ + 304, + 437, + 547, + 569 + ], + "type": "inline_equation", + "content": "\\mathrm{V}_4" + }, + { + "bbox": [ + 304, + 437, + 547, + 569 + ], + "type": "text", + "content": ". We can see that the transformer architecture alone is beneficial for the reconstruction. Enhancing the visual features with temporal contexts further improves the performance in terms of all the evaluation metrics especially for the objects. In Figure 5, compared with our single-frame model built with the transformer, our video model can make more robust predictions under some hard cases (e.g., motion blur). Although the reconstruction of the can is not accurate in the first example, our model tends to produce more regular shapes." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "text", + "content": "Image backbone sharing strategy. Results of using different strategies for image backbone sharing are presented in Table 4. We train all the three models using the two-stage strategy described in Section 3.5. The model with one single backbone achieves the worst performance under most of the evaluation metrics. This is because the pose learning and shape learning compete with each other during training. The symmetric strategy to separate backbones for pose and SDFs performs better than the single backbone model. Our asymmetric strategy with a separate backbone for hand pose estimation and a shared backbone for object pose and SDF feature encoder achieves the best performance. We also em" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "12896" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 80, + 82, + 511, + 167 + ], + "blocks": [ + { + "bbox": [ + 150, + 71, + 441, + 81 + ], + "lines": [ + { + "bbox": [ + 150, + 71, + 441, + 81 + ], + "spans": [ + { + "bbox": [ + 150, + 71, + 441, + 81 + ], + "type": "text", + "content": "Table 5. Comparison with state-of-the-art methods on the image ObMan dataset." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 80, + 82, + 511, + 167 + ], + "lines": [ + { + "bbox": [ + 80, + 82, + 511, + 167 + ], + "spans": [ + { + "bbox": [ + 80, + 82, + 511, + 167 + ], + "type": "table", + "html": "
MethodsCDh↓FSh@1↑FSh@5↑CDo↓FSo@5↑FSo@10↑Eh↓Eo↓
Hasson et al. [21]0.4150.1380.7513.600.3590.5901.13-
Karunratanakul et al. [26]0.261--6.80----
Ye et al. [63]----0.4200.630--
Chen et al. [11]0.1360.3020.9133.380.4040.6361.273.29
gSDF (Ours)0.1120.3320.9353.140.4380.6600.933.43
", + "image_path": "f193afc997567272abcba654f7fc1b6faa2c911f4a1bddffe5edddc857a7683a.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 80, + 188, + 511, + 274 + ], + "blocks": [ + { + "bbox": [ + 148, + 177, + 443, + 187 + ], + "lines": [ + { + "bbox": [ + 148, + 177, + 443, + 187 + ], + "spans": [ + { + "bbox": [ + 148, + 177, + 443, + 187 + ], + "type": "text", + "content": "Table 6. Comparison with state-of-the-art methods on the video Dex YCB dataset." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 80, + 188, + 511, + 274 + ], + "lines": [ + { + "bbox": [ + 80, + 188, + 511, + 274 + ], + "spans": [ + { + "bbox": [ + 80, + 188, + 511, + 274 + ], + "type": "table", + "html": "
MethodsCDh↓FSh@1↑FSh@5↑CDo↓FSo@5↑FSo@10↑Eh↓Eo↓
Hasson et al. [21]0.5370.1150.6471.940.3830.6421.67-
Karunratanakul et al. [26]0.3640.1540.7642.060.3920.660--
Chen et al. [11]0.3580.1620.7671.830.4100.6791.581.78
Chen et al. [11] 1†0.3440.1670.7761.810.4130.6871.571.93
gSDF (Ours)0.3020.1770.8011.550.4370.7091.441.96
", + "image_path": "c95ba4c9a3a84dd2134cac935eb17f6ba39829e7a15017704849f0aa861e80cc.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 66, + 280, + 113, + 326 + ], + "blocks": [ + { + "bbox": [ + 66, + 280, + 113, + 326 + ], + "lines": [ + { + "bbox": [ + 66, + 280, + 113, + 326 + ], + "spans": [ + { + "bbox": [ + 66, + 280, + 113, + 326 + ], + "type": "image", + "image_path": "39ed9750feff3fbe2547e84316f8642767f8b8ec8ed882144508820c4b5274f1.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 66, + 327, + 113, + 373 + ], + "blocks": [ + { + "bbox": [ + 66, + 327, + 113, + 373 + ], + "lines": [ + { + "bbox": [ + 66, + 327, + 113, + 373 + ], + "spans": [ + { + "bbox": [ + 66, + 327, + 113, + 373 + ], + "type": "image", + "image_path": "205b452a5802100b9a01591043a110f94a42796fa140c94ead9ad5f905272765.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 66, + 374, + 113, + 420 + ], + "blocks": [ + { + "bbox": [ + 66, + 374, + 113, + 420 + ], + "lines": [ + { + "bbox": [ + 66, + 374, + 113, + 420 + ], + "spans": [ + { + "bbox": [ + 66, + 374, + 113, + 420 + ], + "type": "image", + "image_path": "49fd956fdc8cf147437a8ea4bb3365039671495734ec7a8acc49948ce00567d7.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 66, + 422, + 112, + 468 + ], + "blocks": [ + { + "bbox": [ + 66, + 422, + 112, + 468 + ], + "lines": [ + { + "bbox": [ + 66, + 422, + 112, + 468 + ], + "spans": [ + { + "bbox": [ + 66, + 422, + 112, + 468 + ], + "type": "image", + "image_path": "4356e5359f399a9406dafdcca70a9c95aeb4cfecc956d0725fc62934972df905.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 66, + 468, + 113, + 514 + ], + "blocks": [ + { + "bbox": [ + 66, + 468, + 113, + 514 + ], + "lines": [ + { + "bbox": [ + 66, + 468, + 113, + 514 + ], + "spans": [ + { + "bbox": [ + 66, + 468, + 113, + 514 + ], + "type": "image", + "image_path": "1eb8bb9f7720efd45bf01a89fe7988b0bb76d992220e316a637f66312945b4a4.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 47, + 514, + 287, + 548 + ], + "lines": [ + { + "bbox": [ + 47, + 514, + 287, + 548 + ], + "spans": [ + { + "bbox": [ + 47, + 514, + 287, + 548 + ], + "type": "text", + "content": "Figure 6. Qualitative results of our model on test images from the ObMan and DexYCB benchmarks. Our model produces convincing results for different grasping poses and diverse objects." + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 115, + 281, + 160, + 322 + ], + "blocks": [ + { + "bbox": [ + 115, + 281, + 160, + 322 + ], + "lines": [ + { + "bbox": [ + 115, + 281, + 160, + 322 + ], + "spans": [ + { + "bbox": [ + 115, + 281, + 160, + 322 + ], + "type": "image", + "image_path": "2960a5e8e974a67f4ef83809c2bcc825b37473220cb57827905ed333df3de6c5.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 117, + 329, + 160, + 368 + ], + "blocks": [ + { + "bbox": [ + 117, + 329, + 160, + 368 + ], + "lines": [ + { + "bbox": [ + 117, + 329, + 160, + 368 + ], + "spans": [ + { + "bbox": [ + 117, + 329, + 160, + 368 + ], + "type": "image", + "image_path": "13903a0ca9925e1c8d065ca5669db7edc88b0a5c6a8633ea2861e24b978089c6.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 117, + 373, + 160, + 416 + ], + "blocks": [ + { + "bbox": [ + 117, + 373, + 160, + 416 + ], + "lines": [ + { + "bbox": [ + 117, + 373, + 160, + 416 + ], + "spans": [ + { + "bbox": [ + 117, + 373, + 160, + 416 + ], + "type": "image", + "image_path": "fb079a5f525650ce642ab4943ca68a1f9da21fb77bbeea60f190c6504edb56fb.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 116, + 422, + 160, + 459 + ], + "blocks": [ + { + "bbox": [ + 116, + 422, + 160, + 459 + ], + "lines": [ + { + "bbox": [ + 116, + 422, + 160, + 459 + ], + "spans": [ + { + "bbox": [ + 116, + 422, + 160, + 459 + ], + "type": "image", + "image_path": "1bc77af7e87f4b7f2fc219f484870514e2eca4acdfd27cfaba7935f56455eb5c.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 117, + 469, + 163, + 510 + ], + "blocks": [ + { + "bbox": [ + 117, + 469, + 163, + 510 + ], + "lines": [ + { + "bbox": [ + 117, + 469, + 163, + 510 + ], + "spans": [ + { + "bbox": [ + 117, + 469, + 163, + 510 + ], + "type": "image", + "image_path": "cd8232bcd997e1e35719325604ebd9ef49b1e440174c1123c8eababd70c9ede1.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 172, + 281, + 219, + 325 + ], + "blocks": [ + { + "bbox": [ + 172, + 281, + 219, + 325 + ], + "lines": [ + { + "bbox": [ + 172, + 281, + 219, + 325 + ], + "spans": [ + { + "bbox": [ + 172, + 281, + 219, + 325 + ], + "type": "image", + "image_path": "455aa6d4cb069cbda3bdfd07ae3af3788347a47a97e7b6bf3b1c722d96e09edf.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 172, + 326, + 219, + 373 + ], + "blocks": [ + { + "bbox": [ + 172, + 326, + 219, + 373 + ], + "lines": [ + { + "bbox": [ + 172, + 326, + 219, + 373 + ], + "spans": [ + { + "bbox": [ + 172, + 326, + 219, + 373 + ], + "type": "image", + "image_path": "55549b002e014a326591bd0db01eecf830dcbc552d7b6e759e22d9acb42f3a10.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 173, + 374, + 219, + 418 + ], + "blocks": [ + { + "bbox": [ + 173, + 374, + 219, + 418 + ], + "lines": [ + { + "bbox": [ + 173, + 374, + 219, + 418 + ], + "spans": [ + { + "bbox": [ + 173, + 374, + 219, + 418 + ], + "type": "image", + "image_path": "e71cbc42877831d036ebc5f2191c6f3f0ee3353ef108860ec09872e723f9edd3.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 173, + 420, + 219, + 466 + ], + "blocks": [ + { + "bbox": [ + 173, + 420, + 219, + 466 + ], + "lines": [ + { + "bbox": [ + 173, + 420, + 219, + 466 + ], + "spans": [ + { + "bbox": [ + 173, + 420, + 219, + 466 + ], + "type": "image", + "image_path": "1477bc6049d11ebb049d1f9cb5e0b3fbb848177ddeb296f18a0cec1d8ea3faf2.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 173, + 468, + 219, + 513 + ], + "blocks": [ + { + "bbox": [ + 173, + 468, + 219, + 513 + ], + "lines": [ + { + "bbox": [ + 173, + 468, + 219, + 513 + ], + "spans": [ + { + "bbox": [ + 173, + 468, + 219, + 513 + ], + "type": "image", + "image_path": "8aa7c02088f82d10f587d942b76aaec39801e04d3d08362d7948bb43920f281c.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 223, + 281, + 257, + 322 + ], + "blocks": [ + { + "bbox": [ + 223, + 281, + 257, + 322 + ], + "lines": [ + { + "bbox": [ + 223, + 281, + 257, + 322 + ], + "spans": [ + { + "bbox": [ + 223, + 281, + 257, + 322 + ], + "type": "image", + "image_path": "7e0c697f296ef13edc1a09cd92d627cf1d4c37d2d96708553ee12c6fabfe7cfc.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 225, + 327, + 257, + 369 + ], + "blocks": [ + { + "bbox": [ + 225, + 327, + 257, + 369 + ], + "lines": [ + { + "bbox": [ + 225, + 327, + 257, + 369 + ], + "spans": [ + { + "bbox": [ + 225, + 327, + 257, + 369 + ], + "type": "image", + "image_path": "340174b095ecda11c76d010375a3400d3fd57a086421c8a6e4c409f6b4cfa3f4.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 223, + 376, + 259, + 418 + ], + "blocks": [ + { + "bbox": [ + 223, + 376, + 259, + 418 + ], + "lines": [ + { + "bbox": [ + 223, + 376, + 259, + 418 + ], + "spans": [ + { + "bbox": [ + 223, + 376, + 259, + 418 + ], + "type": "image", + "image_path": "6fa8633c18178a6fecbe50d2643461c1fd579a32b1b6f18b46979035e5974956.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 224, + 425, + 265, + 460 + ], + "blocks": [ + { + "bbox": [ + 224, + 425, + 265, + 460 + ], + "lines": [ + { + "bbox": [ + 224, + 425, + 265, + 460 + ], + "spans": [ + { + "bbox": [ + 224, + 425, + 265, + 460 + ], + "type": "image", + "image_path": "9bac27d76484944de1854e78079c3b4a5d0904b61ba8481043de1194ff3bb557.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 223, + 468, + 264, + 510 + ], + "blocks": [ + { + "bbox": [ + 223, + 468, + 264, + 510 + ], + "lines": [ + { + "bbox": [ + 223, + 468, + 264, + 510 + ], + "spans": [ + { + "bbox": [ + 223, + 468, + 264, + 510 + ], + "type": "image", + "image_path": "db353de4603cc3652877d9be443d09e004a74e6bb8c66d3ed4fbbd08d6baec8f.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + } + ], + "index": 23 + }, + { + "bbox": [ + 46, + 555, + 288, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 555, + 288, + 616 + ], + "spans": [ + { + "bbox": [ + 46, + 555, + 288, + 616 + ], + "type": "text", + "content": "pirically find that learning the object pose and SDFs together improves both the pose accuracy and the shape accuracy. The possible reason is that estimating object pose also helps our model to focus more on hand-object regions and boosts the 3D reconstruction accuracy." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 47, + 620, + 222, + 633 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 620, + 222, + 633 + ], + "spans": [ + { + "bbox": [ + 47, + 620, + 222, + 633 + ], + "type": "text", + "content": "4.5. Comparison with state of the art" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 46, + 638, + 289, + 686 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 638, + 289, + 686 + ], + "spans": [ + { + "bbox": [ + 46, + 638, + 289, + 686 + ], + "type": "text", + "content": "We compare our gSDF model with state-of-the-art methods on ObMan and DexYCB benchmarks. In Figure 6, we qualitatively demonstrate that our approach can produce convincing 3D hand-object reconstruction results." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 304, + 281, + 547, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 281, + 547, + 413 + ], + "spans": [ + { + "bbox": [ + 304, + 281, + 547, + 413 + ], + "type": "text", + "content": "ObMan. Table 5 shows the comparison of hand and object reconstruction results on the synthetic ObMan dataset. Since ObMan does not contain video data, we do not use the spatial-temporal transformer in this model. The proposed gSDF outperforms previous methods by a significant margin. Compared with the recent method [63] that only reconstructs hand-held objects, our joint method produces more accurate object meshes. gSDF achieves a " + }, + { + "bbox": [ + 304, + 281, + 547, + 413 + ], + "type": "inline_equation", + "content": "17.6\\%" + }, + { + "bbox": [ + 304, + 281, + 547, + 413 + ], + "type": "text", + "content": " improvement on " + }, + { + "bbox": [ + 304, + 281, + 547, + 413 + ], + "type": "inline_equation", + "content": "\\mathrm{CD_h}" + }, + { + "bbox": [ + 304, + 281, + 547, + 413 + ], + "type": "text", + "content": " and a " + }, + { + "bbox": [ + 304, + 281, + 547, + 413 + ], + "type": "inline_equation", + "content": "7.1\\%" + }, + { + "bbox": [ + 304, + 281, + 547, + 413 + ], + "type": "text", + "content": " improvement on " + }, + { + "bbox": [ + 304, + 281, + 547, + 413 + ], + "type": "inline_equation", + "content": "\\mathrm{CD_o}" + }, + { + "bbox": [ + 304, + 281, + 547, + 413 + ], + "type": "text", + "content": " over the state-of-the-art accuracy, which indicates that our model can better reconstruct both hand meshes and diverse object meshes." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 304, + 416, + 547, + 500 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 416, + 547, + 500 + ], + "spans": [ + { + "bbox": [ + 304, + 416, + 547, + 500 + ], + "type": "text", + "content": "DexYCB. Table 6 presents results on the DexYCB benchmark. We also show the performance of AlignSDF [11] with two backbones ([11]-2BB). Our model demonstrates a large improvement over recent methods. In particular, it advances the state-of-the-art accuracy on " + }, + { + "bbox": [ + 304, + 416, + 547, + 500 + ], + "type": "inline_equation", + "content": "\\mathrm{CD_h}" + }, + { + "bbox": [ + 304, + 416, + 547, + 500 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 416, + 547, + 500 + ], + "type": "inline_equation", + "content": "\\mathrm{CD_o}" + }, + { + "bbox": [ + 304, + 416, + 547, + 500 + ], + "type": "text", + "content": " by " + }, + { + "bbox": [ + 304, + 416, + 547, + 500 + ], + "type": "inline_equation", + "content": "12.2\\%" + }, + { + "bbox": [ + 304, + 416, + 547, + 500 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 416, + 547, + 500 + ], + "type": "inline_equation", + "content": "14.4\\%" + }, + { + "bbox": [ + 304, + 416, + 547, + 500 + ], + "type": "text", + "content": ", respectively. The high accuracy of gSDF on DexYCB demonstrates that it generalizes well to real images." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 306, + 513, + 378, + 525 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 513, + 378, + 525 + ], + "spans": [ + { + "bbox": [ + 306, + 513, + 378, + 525 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 304, + 530, + 547, + 638 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 530, + 547, + 638 + ], + "spans": [ + { + "bbox": [ + 304, + 530, + 547, + 638 + ], + "type": "text", + "content": "In this work, we propose a geometry-driven SDF (gSDF) approach for 3D hand and object reconstruction. We explicitly model the underlying 3D geometry to guide the SDF learning. We first estimate poses of hands and objects according to kinematic chains of pose transformations, and then derive kinematic features and local visual features using the geometry information for signed distance prediction. Extensive experiments on ObMan and DexYCB datasets demonstrate the effectiveness of our proposed method." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 304, + 643, + 547, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 643, + 547, + 712 + ], + "spans": [ + { + "bbox": [ + 304, + 643, + 547, + 712 + ], + "type": "text", + "content": "Acknowledgements. This work was granted access to the HPC resources of IDRIS under the allocation AD011013147 made by GENCI. This work was funded in part by the French government under management of Agence Nationale de la Recherche as part of the \"Investissements d'avenir\" program, reference ANR19-P3IA-0001 (PRAIRIE 3IA Institute) and by Louis Vuitton ENS Chair on Artificial Intelligence. We thank Yana Hasson for helpful discussions." + } + ] + } + ], + "index": 33 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 693, + 287, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 693, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 47, + 693, + 287, + 712 + ], + "type": "inline_equation", + "content": "^{1\\dagger}" + }, + { + "bbox": [ + 47, + 693, + 287, + 712 + ], + "type": "text", + "content": " To make more fair comparison with Chen et al. [11], we adapt their model to the same asymmetric backbone structure as used in our method." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "12897" + } + ] + } + ], + "index": 34 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 107, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 107, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 107, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 92, + 288, + 712 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 53, + 92, + 287, + 124 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 92, + 287, + 124 + ], + "spans": [ + { + "bbox": [ + 53, + 92, + 287, + 124 + ], + "type": "text", + "content": "[1] Anurag Arnab, Mostafa Dehghani, Georg Heigold, Chen Sun, Mario Lucic, and Cordelia Schmid. ViViT: A video vision transformer. In ICCV, 2021. 5" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 126, + 287, + 159 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 126, + 287, + 159 + ], + "spans": [ + { + "bbox": [ + 53, + 126, + 287, + 159 + ], + "type": "text", + "content": "[2] Seungryul Baek, Kwang In Kim, and Tae-Kyun Kim. Pushing the envelope for RGB-based dense 3D hand pose estimation via neural rendering. In CVPR, 2019. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 161, + 288, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 161, + 288, + 195 + ], + "spans": [ + { + "bbox": [ + 53, + 161, + 288, + 195 + ], + "type": "text", + "content": "[3] Luca Ballan, Aparna Taneja, Jürgen Gall, Luc Van Gool, and Marc Pollefeys. Motion capture of hands in action using discriminative salient points. In ECCV, 2012. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 197, + 287, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 197, + 287, + 228 + ], + "spans": [ + { + "bbox": [ + 53, + 197, + 287, + 228 + ], + "type": "text", + "content": "[4] Gedas Bertasius, Heng Wang, and Lorenzo Torresani. Is space-time attention all you need for video understanding? In ICML, 2021. 5" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 232, + 288, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 232, + 288, + 262 + ], + "spans": [ + { + "bbox": [ + 53, + 232, + 288, + 262 + ], + "type": "text", + "content": "[5] Adnane Boukhayma, Rodrigo de Bem, and Philip HS Torr. 3D hand shape and pose from images in the wild. In CVPR, 2019. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 266, + 287, + 288 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 266, + 287, + 288 + ], + "spans": [ + { + "bbox": [ + 53, + 266, + 287, + 288 + ], + "type": "text", + "content": "[6] Romain Brégier. Deep regression on manifolds: a 3D rotation case study. In 3DV, 2021. 3, 4" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 290, + 288, + 322 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 290, + 288, + 322 + ], + "spans": [ + { + "bbox": [ + 53, + 290, + 288, + 322 + ], + "type": "text", + "content": "[7] Zhe Cao, Ilija Radosavovic, Angjoo Kanazawa, and Jitendra Malik. Reconstructing hand-object interactions in the wild. In ICCV, 2021. 1, 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 324, + 288, + 378 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 324, + 288, + 378 + ], + "spans": [ + { + "bbox": [ + 53, + 324, + 288, + 378 + ], + "type": "text", + "content": "[8] Angel X Chang, Thomas Funkhouser, Leonidas Guibas, Pat Hanrahan, Qixing Huang, Zimo Li, Silvio Savarese, Manolis Savva, Shuran Song, Hao Su, et al. ShapeNet: An information-rich 3D model repository. arXiv preprint arXiv:1512.03012, 2015. 6" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 381, + 288, + 435 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 381, + 288, + 435 + ], + "spans": [ + { + "bbox": [ + 53, + 381, + 288, + 435 + ], + "type": "text", + "content": "[9] Yu-Wei Chao, Wei Yang, Yu Xiang, Pavlo Molchanov, Ankur Handa, Jonathan Tremblay, Yashraj S Narang, Karl Van Wyk, Umar Iqbal, Stan Birchfield, et al. DexYCB: A benchmark for capturing hand grasping of objects. In CVPR, 2021. 1, 2, 6" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 437, + 287, + 492 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 437, + 287, + 492 + ], + "spans": [ + { + "bbox": [ + 48, + 437, + 287, + 492 + ], + "type": "text", + "content": "[10] Xingyu Chen, Yufeng Liu, Chongyang Ma, Jianlong Chang, Huayan Wang, Tian Chen, Xiaoyan Guo, Pengfei Wan, and Wen Zheng. Camera-space hand mesh recovery via semantic aggregation and adaptive 2D-1D registration. In CVPR, 2021. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 495, + 287, + 538 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 495, + 287, + 538 + ], + "spans": [ + { + "bbox": [ + 48, + 495, + 287, + 538 + ], + "type": "text", + "content": "[11] Zerui Chen, Yana Hasson, Cordelia Schmid, and Ivan Laptev. AlignSDF: Pose-Aligned signed distance fields for handobject reconstruction. In ECCV, 2022. 1, 2, 3, 4, 5, 6, 7, 8" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 540, + 287, + 563 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 540, + 287, + 563 + ], + "spans": [ + { + "bbox": [ + 48, + 540, + 287, + 563 + ], + "type": "text", + "content": "[12] Zhiqin Chen and Hao Zhang. Learning implicit fields for generative shape modeling. In CVPR, 2019. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 565, + 288, + 608 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 565, + 288, + 608 + ], + "spans": [ + { + "bbox": [ + 48, + 565, + 288, + 608 + ], + "type": "text", + "content": "[13] Enric Corona, Tomas Hodan, Minh Vo, Francesc Moreno-Noguer, Chris Sweeney, Richard Newcombe, and Lingni Ma. LISA: Learning implicit shape and appearance of hands. In CVPR, 2022. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 610, + 287, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 610, + 287, + 643 + ], + "spans": [ + { + "bbox": [ + 48, + 610, + 287, + 643 + ], + "type": "text", + "content": "[14] Thibault Groueix, Matthew Fisher, Vladimir G Kim, Bryan C Russell, and Mathieu Aubry. A papier-mâché approach to learning 3D surface generation. In CVPR, 2018. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 48, + 646, + 287, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 646, + 287, + 677 + ], + "spans": [ + { + "bbox": [ + 48, + 646, + 287, + 677 + ], + "type": "text", + "content": "[15] Henning Hamer, Juergen Gall, Thibaut Weise, and Luc Van Gool. An object-dependent hand pose prior from sparse training data. In CVPR, 2010. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 48, + 680, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 680, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 680, + 287, + 712 + ], + "type": "text", + "content": "[16] Henning Hamer, Konrad Schindler, Esther Koller-Meier, and Luc Van Gool. Tracking a hand manipulating an object. In ICCV, 2009. 2" + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 712 + ], + "type": "list", + "angle": 0, + "index": 36, + "blocks": [ + { + "bbox": [ + 307, + 73, + 547, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 547, + 106 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 547, + 106 + ], + "type": "text", + "content": "[17] Shreyas Hampali, Mahdi Rad, Markus Oberweger, and Vincent Lepetit. HOnnotate: A method for 3D annotation of hand and object poses. In CVPR, 2020. 1, 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 107, + 547, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 107, + 547, + 150 + ], + "spans": [ + { + "bbox": [ + 307, + 107, + 547, + 150 + ], + "type": "text", + "content": "[18] Shreyas Hampali, Sayan Deb Sarkar, Mahdi Rad, and Vincent Lepetit. Keypoint Transformer: Solving joint identification in challenging hands and object interactions for accurate 3D pose estimation. In CVPR, 2022. 1, 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 152, + 547, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 152, + 547, + 195 + ], + "spans": [ + { + "bbox": [ + 307, + 152, + 547, + 195 + ], + "type": "text", + "content": "[19] Yana Hasson, Bugra Tekin, Federica Bogo, Ivan Laptev, Marc Pollefeys, and Cordelia Schmid. Leveraging photometric consistency over time for sparsely supervised hand-object reconstruction. In CVPR, 2020. 1, 2, 4" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 197, + 547, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 197, + 547, + 228 + ], + "spans": [ + { + "bbox": [ + 307, + 197, + 547, + 228 + ], + "type": "text", + "content": "[20] Yana Hasson, Gül Varol, Cordelia Schmid, and Ivan Laptev. Towards unconstrained joint hand-object reconstruction from RGB videos. In 3DV, 2021. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 230, + 547, + 273 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 230, + 547, + 273 + ], + "spans": [ + { + "bbox": [ + 307, + 230, + 547, + 273 + ], + "type": "text", + "content": "[21] Yana Hasson, Gul Varol, Dimitrios Tzionas, Igor Kalevatykh, Michael J Black, Ivan Laptev, and Cordelia Schmid. Learning joint reconstruction of hands and manipulated objects. In CVPR, 2019. 1, 2, 4, 6, 8" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 275, + 547, + 306 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 275, + 547, + 306 + ], + "spans": [ + { + "bbox": [ + 307, + 275, + 547, + 306 + ], + "type": "text", + "content": "[22] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In CVPR, 2016. 4, 6" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 308, + 546, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 308, + 546, + 330 + ], + "spans": [ + { + "bbox": [ + 307, + 308, + 546, + 330 + ], + "type": "text", + "content": "[23] Tony Heap and David Hogg. Towards 3D hand tracking using a deformable model. In FG, 1996. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 332, + 547, + 364 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 332, + 547, + 364 + ], + "spans": [ + { + "bbox": [ + 307, + 332, + 547, + 364 + ], + "type": "text", + "content": "[24] Umar Iqbal, Pavlo Molchanov, Thomas Breuel Juergen Gall, and Jan Kautz. Hand pose estimation via latent 2.5D heatmap regression. In ECCV, 2018. 1, 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 365, + 546, + 398 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 365, + 546, + 398 + ], + "spans": [ + { + "bbox": [ + 307, + 365, + 546, + 398 + ], + "type": "text", + "content": "[25] Korrawe Karunratanakul, Adrian Spurr, Zicong Fan, Otmar Hilliges, and Siyu Tang. A skeleton-driven neural occupancy representation for articulated hands. In 3DV, 2021. 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 399, + 547, + 442 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 399, + 547, + 442 + ], + "spans": [ + { + "bbox": [ + 307, + 399, + 547, + 442 + ], + "type": "text", + "content": "[26] Korrawe Karunratanakul, Jinlong Yang, Yan Zhang, Michael J Black, Krikamol Muandet, and Siyu Tang. Grasping Field: Learning implicit representations for human grasps. In 3DV, 2020. 1, 2, 3, 4, 6, 7, 8" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 444, + 547, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 444, + 547, + 475 + ], + "spans": [ + { + "bbox": [ + 307, + 444, + 547, + 475 + ], + "type": "text", + "content": "[27] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. 6" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 307, + 478, + 546, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 478, + 546, + 510 + ], + "spans": [ + { + "bbox": [ + 307, + 478, + 546, + 510 + ], + "type": "text", + "content": "[28] Nikos Kolotouros, Georgios Pavlakos, Michael J Black, and Kostas Daniilidis. Learning to reconstruct 3D human pose and shape via model-fitting in the loop. In ICCV, 2019. 3, 4" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 307, + 512, + 546, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 512, + 546, + 544 + ], + "spans": [ + { + "bbox": [ + 307, + 512, + 546, + 544 + ], + "type": "text", + "content": "[29] Dominik Kulon, Riza Alp Güler, I. Kokkinos, M. Bronstein, and S. Zafeiriou. Weakly-supervised mesh-convolutional hand reconstruction in the wild. In CVPR, 2020. 2" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 307, + 545, + 547, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 545, + 547, + 578 + ], + "spans": [ + { + "bbox": [ + 307, + 545, + 547, + 578 + ], + "type": "text", + "content": "[30] Dominik Kulon, Haoyang Wang, Riza Alp Güler, Michael M. Bronstein, and Stefanos Zafeiriou. Single image 3D hand reconstruction with mesh convolutions. In BMVC, 2019. 2" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 307, + 579, + 546, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 579, + 546, + 601 + ], + "spans": [ + { + "bbox": [ + 307, + 579, + 546, + 601 + ], + "type": "text", + "content": "[31] Vincent Lepetit. Recent advances in 3D object and hand pose estimation. arXiv preprint arXiv:2006.05927, 2020. 2" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 307, + 602, + 547, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 602, + 547, + 643 + ], + "spans": [ + { + "bbox": [ + 307, + 602, + 547, + 643 + ], + "type": "text", + "content": "[32] Mengcheng Li, Liang An, Hongwen Zhang, Lianpeng Wu, Feng Chen, Tao Yu, and Yebin Liu. Interacting attention graph for single image two-hand reconstruction. In CVPR, 2022. 2" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 307, + 646, + 547, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 646, + 547, + 677 + ], + "spans": [ + { + "bbox": [ + 307, + 646, + 547, + 677 + ], + "type": "text", + "content": "[33] William E Lorensen and Harvey E Cline. Marching Cubes: A high resolution 3D surface construction algorithm. TOG, 1987. 3" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 307, + 680, + 546, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 680, + 546, + 712 + ], + "spans": [ + { + "bbox": [ + 307, + 680, + 546, + 712 + ], + "type": "text", + "content": "[34] Jun Lv, Wenqiang Xu, Lixin Yang, Sucheng Qian, Chongzhao Mao, and Cewu Lu. HandTailor: Towards high-precision monocular 3D hand recovery. In BMVC, 2021. 2" + } + ] + } + ], + "index": 35 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "type": "text", + "content": "12898" + } + ] + } + ], + "index": 37 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 712 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 288, + 116 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 288, + 116 + ], + "type": "text", + "content": "[35] Dushyant Mehta, Helge Rhodin, Dan Casas, Pascal Fua, Oleksandr Sotnychenko, Weipeng Xu, and Christian Theobalt. Monocular 3D human pose estimation in the wild using improved CNN supervision. In 3DV, 2017. 6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 118, + 288, + 160 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 118, + 288, + 160 + ], + "spans": [ + { + "bbox": [ + 48, + 118, + 288, + 160 + ], + "type": "text", + "content": "[36] Hao Meng, Sheng Jin, Wentao Liu, Chen Qian, Mengxiang Lin, Wanli Ouyang, and Ping Luo. 3D interacting hand pose estimation by hand de-occlusion and removal. In ECCV, 2022. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 163, + 288, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 163, + 288, + 205 + ], + "spans": [ + { + "bbox": [ + 48, + 163, + 288, + 205 + ], + "type": "text", + "content": "[37] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy Networks: Learning 3D reconstruction in function space. In CVPR, 2019. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 208, + 288, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 208, + 288, + 251 + ], + "spans": [ + { + "bbox": [ + 48, + 208, + 288, + 251 + ], + "type": "text", + "content": "[38] Gyeongsik Moon, Ju Yong Chang, and Kyoung Mu Lee. V2V- PoseNet: Voxel-to-voxel prediction network for accurate 3D hand and human pose estimation from a single depth map. In CVPR, 2018. 1, 2, 4" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 253, + 288, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 253, + 288, + 296 + ], + "spans": [ + { + "bbox": [ + 48, + 253, + 288, + 296 + ], + "type": "text", + "content": "[39] Franziska Mueller, Florian Bernard, Oleksandr Sotnychenko, Dushyant Mehta, Srinath Sridhar, Dan Casas, and Christian Theobalt. Ganerated hands for real-time 3D hand tracking from monocular RGB. In CVPR, 2018. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 297, + 288, + 352 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 297, + 288, + 352 + ], + "spans": [ + { + "bbox": [ + 48, + 297, + 288, + 352 + ], + "type": "text", + "content": "[40] Franziska Mueller, Micah Davis, Florian Bernard, Oleksandr Sotnychenko, Micekal Verschooor, Miguel A Otaduy, Dan Casas, and Christian Theobalt. Real-time pose and shape reconstruction of two interacting hands with a single depth camera. TOG, 2019. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 354, + 288, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 354, + 288, + 396 + ], + "spans": [ + { + "bbox": [ + 48, + 354, + 288, + 396 + ], + "type": "text", + "content": "[41] Iason Oikonomidis, Nikolaos Kyriazis, and Antonis A Argyros. Full DOF tracking of a hand interacting with an object by modeling occlusions and physical constraints. In ICCV, 2011. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 399, + 288, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 399, + 288, + 441 + ], + "spans": [ + { + "bbox": [ + 48, + 399, + 288, + 441 + ], + "type": "text", + "content": "[42] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. DeepSDF: Learning continuous signed distance functions for shape representation. In CVPR, 2019. 2, 6" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 444, + 288, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 444, + 288, + 475 + ], + "spans": [ + { + "bbox": [ + 48, + 444, + 288, + 475 + ], + "type": "text", + "content": "[43] Georgios Pavlakos, Xiaowei Zhou, and Kostas Daniilidis. Ordinal depth supervision for 3D human pose estimation. In CVPR, 2018. 5" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 478, + 288, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 478, + 288, + 510 + ], + "spans": [ + { + "bbox": [ + 48, + 478, + 288, + 510 + ], + "type": "text", + "content": "[44] Georgios Pavlakos, Xiaowei Zhou, Konstantinos G Derpanis, and Kostas Daniilidis. Coarse-to-fine volumetric prediction for single-image 3D human pose. In CVPR, 2017. 4" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 512, + 288, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 512, + 288, + 544 + ], + "spans": [ + { + "bbox": [ + 48, + 512, + 288, + 544 + ], + "type": "text", + "content": "[45] James M Rehg and Takeo Kanade. Visual tracking of high DOF articulated structures: an application to human hand tracking. In ECCV, 1994. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 545, + 288, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 545, + 288, + 578 + ], + "spans": [ + { + "bbox": [ + 48, + 545, + 288, + 578 + ], + "type": "text", + "content": "[46] Javier Romero, Dimitrios Tzionas, and Michael J. Black. Embodied Hands: Modeling and capturing hands and bodies together. TOG, 2017. 1, 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 580, + 288, + 622 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 580, + 288, + 622 + ], + "spans": [ + { + "bbox": [ + 48, + 580, + 288, + 622 + ], + "type": "text", + "content": "[47] Shunsuke Saito, Zeng Huang, Ryota Natsume, Shigeo Morishima, Angjoo Kanazawa, and Hao Li. PiFu: Pixel-aligned implicit function for high-resolution clothed human digitization. In ICCV, 2019. 3, 4" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 624, + 288, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 624, + 288, + 666 + ], + "spans": [ + { + "bbox": [ + 48, + 624, + 288, + 666 + ], + "type": "text", + "content": "[48] Adrian Spurr, Aneesh Dahiya, Xi Wang, Xuong Zhang, and Otmar Hilliges. Self-supervised 3D hand pose estimation from monocular RGB via contrastive learning. In ICCV, 2021. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 670, + 288, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 670, + 288, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 670, + 288, + 712 + ], + "type": "text", + "content": "[49] Srinath Sridhar, Franziska Mueller, Michael Zollhöfer, Dan Casas, Antti Oulasvirta, and Christian Theobalt. Real-time joint tracking of a hand manipulating an object from RGB-D input. In ECCV, 2016. 2" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 712 + ], + "type": "list", + "angle": 0, + "index": 32, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 95 + ], + "type": "text", + "content": "[50] Xiao Sun, Bin Xiao, Fangyin Wei, Shuang Liang, and Yichen Wei. Integral human pose regression. In ECCV, 2018. 2, 4" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 96, + 547, + 129 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 96, + 547, + 129 + ], + "spans": [ + { + "bbox": [ + 307, + 96, + 547, + 129 + ], + "type": "text", + "content": "[51] Danhang Tang, Hyung Jin Chang, Alykhan Tejani, and Tae Kyun Kim. Latent regression forest: Structured estimation of 3D articulated hand posture. In CVPR, 2014. 1, 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 130, + 545, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 130, + 545, + 162 + ], + "spans": [ + { + "bbox": [ + 307, + 130, + 545, + 162 + ], + "type": "text", + "content": "[52] Maxim Tatarchenko, Stephan R Richter, René Ranftl, Zhuwen Li, Vladlen Koltun, and Thomas Brox. What do single-view 3D reconstruction networks learn? In CVPR, 2019. 6" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 164, + 545, + 196 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 164, + 545, + 196 + ], + "spans": [ + { + "bbox": [ + 307, + 164, + 545, + 196 + ], + "type": "text", + "content": "[53] Bugra Tekin, Federica Bogo, and Marc Pollefeys. " + }, + { + "bbox": [ + 307, + 164, + 545, + 196 + ], + "type": "inline_equation", + "content": "\\mathrm{H} + \\mathrm{O}" + }, + { + "bbox": [ + 307, + 164, + 545, + 196 + ], + "type": "text", + "content": ": Unified egocentric recognition of 3D hand-object poses and interactions. In CVPR, 2019. 1, 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 198, + 545, + 240 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 198, + 545, + 240 + ], + "spans": [ + { + "bbox": [ + 307, + 198, + 545, + 240 + ], + "type": "text", + "content": "[54] Tze Ho Elden Tse, Kwang In Kim, Ales Leonardis, and Hyung Jin Chang. Collaborative learning for hand and object reconstruction with attention-guided graph convolution. In CVPR, 2022. 1, 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 243, + 545, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 243, + 545, + 274 + ], + "spans": [ + { + "bbox": [ + 307, + 243, + 545, + 274 + ], + "type": "text", + "content": "[55] Aggeliki Tsoli and Antonis A Argyros. Joint 3D tracking of a deformable object in interaction with a hand. In ECCV, 2018. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 277, + 545, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 277, + 545, + 297 + ], + "spans": [ + { + "bbox": [ + 307, + 277, + 545, + 297 + ], + "type": "text", + "content": "[56] Dimitrios Tzionas and Juergen Gall. 3D object reconstruction from hand-object interactions. In ICCV, 2015. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 300, + 545, + 353 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 300, + 545, + 353 + ], + "spans": [ + { + "bbox": [ + 307, + 300, + 545, + 353 + ], + "type": "text", + "content": "[57] Jiayi Wang, Franziska Mueller, Florian Bernard, Suzanne Sorli, Oleksandr Sotnychenko, Neng Qian, Miguel A Otaduy, Dan Casas, and Christian Theobalt. RGB2Hands: Real-time tracking of 3D hand interactions from monocular RGB video. TOG, 2020. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 355, + 545, + 398 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 355, + 545, + 398 + ], + "spans": [ + { + "bbox": [ + 307, + 355, + 545, + 398 + ], + "type": "text", + "content": "[58] Yangang Wang, Jianyuan Min, Jianjie Zhang, Yebin Liu, Feng Xu, Qionghai Dai, and Jinxiang Chai. Video-based hand manipulation capture through composite motion control. TOG, 2013. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 400, + 545, + 443 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 400, + 545, + 443 + ], + "spans": [ + { + "bbox": [ + 307, + 400, + 545, + 443 + ], + "type": "text", + "content": "[59] Fu Xiong, Boshen Zhang, Yang Xiao, Zhiguo Cao, Taidong Yu, Joey Tianyi Zhou, and Junsong Yuan. A2J: Anchor-to-joint regression network for 3D articulated pose estimation from a single depth image. In ICCV, 2019. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 445, + 545, + 488 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 445, + 545, + 488 + ], + "spans": [ + { + "bbox": [ + 307, + 445, + 545, + 488 + ], + "type": "text", + "content": "[60] Lixin Yang, Kailin Li, Xinyu Zhan, Jun Lv, Wenqiang Xu, Jiefeng Li, and Cewu Lu. ArtiBoost: Boosting articulated 3D hand-object pose estimation via online exploration and synthesis. In CVPR, 2022. 2, 6" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 491, + 545, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 491, + 545, + 533 + ], + "spans": [ + { + "bbox": [ + 307, + 491, + 545, + 533 + ], + "type": "text", + "content": "[61] Lixin Yang, Kailin Li, Xinyu Zhan, Fei Wu, Anran Xu, Liu Liu, and Cewu Lu. OakInk: A large-scale knowledge repository for understanding hand-object interaction. In CVPR, 2022. 1, 2" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 535, + 545, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 535, + 545, + 567 + ], + "spans": [ + { + "bbox": [ + 307, + 535, + 545, + 567 + ], + "type": "text", + "content": "[62] Lixin Yang, Xinyu Zhan, Kailin Li, Wenqiang Xu, Jiefeng Li, and Cewu Lu. CPF: Learning a contact potential field to model the hand-object interaction. In ICCV, 2021. 1, 2, 6" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 307, + 569, + 545, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 569, + 545, + 601 + ], + "spans": [ + { + "bbox": [ + 307, + 569, + 545, + 601 + ], + "type": "text", + "content": "[63] Yufei Ye, Abhinav Gupta, and Shubham Tulsiani. What's in your hands? 3D reconstruction of generic objects in hands. In CVPR, 2022. 2, 3, 4, 6, 8" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 307, + 603, + 545, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 603, + 545, + 635 + ], + "spans": [ + { + "bbox": [ + 307, + 603, + 545, + 635 + ], + "type": "text", + "content": "[64] Frank Yu, Mathieu Salzmann, Pascal Fua, and Helge Rhodin. PCLs: Geometry-aware neural reconstruction of 3D pose with perspective crop layers. In CVPR, 2021. 6" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 307, + 636, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 636, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 307, + 636, + 545, + 712 + ], + "type": "text", + "content": "[65] Shanxin Yuan, Guillermo Garcia-Hernando, Björn Stenger, Gyeongsik Moon, Ju Yong Chang, Kyoung Mu Lee, Pavlo Molchanov, Jan Kautz, Sina Honari, Liuhao Ge, Junsong Yuan, Xinghao Chen, Guijin Wang, Fan Yang, Kai Akiyama, Yang Wu, Qingfu Wan, Meysam Madadi, Sergio Escalera, Shile Li, Dongheui Lee, Iason Oikonomidis, Antonis Argyros, and Tae-Kyun Kim. Depth-based 3D hand pose estimation:" + } + ] + } + ], + "index": 31 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "type": "text", + "content": "12899" + } + ] + } + ], + "index": 33 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 162 + ], + "type": "list", + "angle": 0, + "index": 3, + "blocks": [ + { + "bbox": [ + 67, + 72, + 287, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 287, + 95 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 287, + 95 + ], + "type": "text", + "content": "From current achievements to future goals. In CVPR, June 2018. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 96, + 287, + 129 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 96, + 287, + 129 + ], + "spans": [ + { + "bbox": [ + 48, + 96, + 287, + 129 + ], + "type": "text", + "content": "[66] Yi Zhou, Connelly Barnes, Jingwan Lu, Jimei Yang, and Hao Li. On the continuity of rotation representations in neural networks. In CVPR, 2019. 3, 4" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 130, + 287, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 130, + 287, + 162 + ], + "spans": [ + { + "bbox": [ + 48, + 130, + 287, + 162 + ], + "type": "text", + "content": "[67] Christian Zimmermann and Thomas Brox. Learning to estimate 3D hand pose from single RGB images. In ICCV, 2017. 1, 2" + } + ] + } + ], + "index": 2 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "type": "text", + "content": "12900" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/iCLIP_ Bridging Image Classification and Contrastive Language-Image Pre-Training for Visual Recognition/3b34aaa2-686f-4539-b795-03e3193c1a2b_content_list.json b/2023/iCLIP_ Bridging Image Classification and Contrastive Language-Image Pre-Training for Visual Recognition/3b34aaa2-686f-4539-b795-03e3193c1a2b_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..e106ad88810537cf2d69cbcc8cacb38da267f621 --- /dev/null +++ b/2023/iCLIP_ Bridging Image Classification and Contrastive Language-Image Pre-Training for Visual Recognition/3b34aaa2-686f-4539-b795-03e3193c1a2b_content_list.json @@ -0,0 +1,1363 @@ +[ + { + "type": "text", + "text": "iCLIP: Bridging Image Classification and Contrastive Language-Image Pre-training for Visual Recognition", + "text_level": 1, + "bbox": [ + 125, + 128, + 843, + 175 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yixuan Wei $^{1,2}$ , Yue Cao $^{2*}$ , Zheng Zhang $^{2}$ , Houwen Peng $^{2}$ , Zhuliang Yao $^{1,2}$ , Zhenda Xie $^{1,2}$ , Han Hu $^{2}$ , Baining Guo $^{2}$ $^{1}$ Tsinghua University $^{2}$ Microsoft Research Asia", + "bbox": [ + 127, + 202, + 841, + 257 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 291, + 313, + 306 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "This paper presents a method that effectively combines two prevalent visual recognition methods, i.e., image classification and contrastive language-image pre-training, dubbed iCLIP. Instead of naïve multi-task learning that use two separate heads for each task, we fuse the two tasks in a deep fashion that adapts the image classification to share the same formula and the same model weights with the language-image pre-training. To further bridge these two tasks, we propose to enhance the category names in image classification tasks using external knowledge, such as their descriptions in dictionaries. Extensive experiments show that the proposed method combines the advantages of two tasks well: the strong discrimination ability in image classification tasks due to the clean category labels, and the good zero-shot ability in CLIP tasks ascribed to the richer semantics in the text descriptions. In particular, it reaches $82.9\\%$ top-1 accuracy on IN-1K, and meanwhile surpasses CLIP by $1.8\\%$ , with similar model size, on zero-shot recognition of Kornblith 12-dataset benchmark. The code and models are publicly available at https://github.com/weiyx16/iCLIP.", + "bbox": [ + 76, + 323, + 473, + 642 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 671, + 209, + 686 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Image classification is a classic visual problem whose goal is to classify images into a fixed set of pre-defined categories. For example, the widely used ImageNet dataset [8] carefully annotated 14 million images and categorize them into 21,841 categories chosen from the WordNet [36]. For image classification, each category provides a clear taxonomy that groups images of the same category together and separates images from different categories, and thus endows the learnt representation with strong discriminant ability. However, this classification ability is limited to a fixed set of categories [8, 29, 51].", + "bbox": [ + 75, + 696, + 468, + 863 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/af0f5f82fdbfa99cb6fc29b27f21da79ad9876dd462996b500e6c87e28d447a9.jpg", + "image_caption": [ + "Figure 1. An illustration of the proposed iCLIP framework. The iCLIP framework can take two types of annotations for training: classes and alt-texts. It converts the conventional image classification formula to share the same text encoder and the same cosine classifier as that used in the contrastive language-image pretraining (CLIP). It also uses a dictionary-enhanced approach to enrich the original class names in the image classification problem with external information involved in dictionaries. The deep fusion and knowledge-enriched classes both greatly improve the performance compared to naive multi-task learning or performing one of the two tasks alone." + ], + "image_footnote": [], + "bbox": [ + 550, + 294, + 830, + 474 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recently, the method that learns to contrast image-text pairs, known as contrastive language-image pre-training (abbr. CLIP), has well made up such shortage of the conventional image classification methods to achieve strong zero-shot recognition ability [24, 44]. These methods employ a contrastive learning framework, where images and their corresponding alt-texts are treated as positive pairs, while images with all other alt-texts are treated as negative pairs. Thanks to the rich semantics involved in the alt-texts, the images can be weakly connected to almost arbitrary categories that already appear in the alt-texts, resulting in its zero-shot ability. A drawback is that the image-text pairs are usually crawled from the internet without human labeling, leading to their noisy and ambiguous nature. Thus the learnt representations are often not conceptual compact, and may lack certain discriminative ability.", + "bbox": [ + 496, + 659, + 893, + 902 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 807, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Corresponding Author. The work is done when Yixuan Wei, Zhuliang Yao, and Zhenda Xie are interns at Microsoft Research Asia.", + "bbox": [ + 76, + 875, + 468, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "2776", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "This paper explores how to effectively combine these two powerful visual recognition and representation learning methods, to take advantages of both methods and data sources while relieving their shortages. We first try a naive multi-task learning framework that applies the original head networks of the two tasks on top of a shared visual encoder, and jointly learn the network with separate losses of the two tasks. This naive multi-task learning approach has been able to benefit each individual tasks, but the effect is marginal. We thus seek to fuse the two tasks more deeply, so that the advantages of the two tasks can be more effectively joined for better visual recognition, as well as for better transferable representations.", + "bbox": [ + 75, + 90, + 472, + 287 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To this end, our first technique is to deeply unify the formulations of image classification and CLIP learning. By examining their formulations, we found there are two main differences: 1) Different classification losses. Image classification tasks typically use a linear classification loss which has better fitting ability due to the non-normalized nature, while the CLIP-based methods adopt a cosine classifier which has better transferability for new domains and categories [2, 6, 9, 18, 38, 57]. 2) Different parameterization methods for classifier weights. Image classification tasks usually directly optimize the parametric classification weights without a need to process text semantics in class names. The CLIP method can be regarded as generating classifier weights through a text encoder and learns the text encoder instead. The text-encoder-based classifier allows sharing between alt-texts as well as modeling their relationships, which enables the ability to tackle any classes.", + "bbox": [ + 75, + 289, + 472, + 547 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Although the linear classifier and direct classifier weight parameterization have been common practice in image classification for many years, it is interesting to find that changing the old formulation as that in the CLIP approach has almost no performance degradation for pure image classification problems. This indicates that we can directly adapt the image classification formulation to the cosine classifier and the text encoder parameterization used by CLIP, with almost no loss. This also allows us to further share the text encoder for both class names and alt-texts. Our experiments show that this deep fusion approach performs much better than the naive multi-task method for both in-domain/zero-shot classification and multi-modal retrieval tasks learning (see 3).", + "bbox": [ + 75, + 550, + 472, + 762 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Another gap between the image classification and CLIP lies in the different text richness. Class names are usually in short, i.e., one or a few words, and sometimes are even ambiguous and polysemous in referring to specific semantics, for example, \"night bird\" can represents either \"owl\" or \"nightingale\". On the contrary, alt-texts in CLIP are usually full sentences containing rich information. To further bridge the gap between the image classification and CLIP, we propose a second technique that leverages the knowledge", + "bbox": [ + 75, + 763, + 472, + 902 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "base to enhance the original class names, such as the explanations in dictionaries. In our implementation, knowledge is simply encoded as a prefix/suffix prompt, as illustrated in Fig 1. Although simple, dictionary enhanced method shows to maintain the accuracy for pure image classification problem (see Table 1), while greatly improve the zero-shot and multi-modal retrieval performance as shown in Table 2 and 3. Note the process is just like human beings who learn new words or concepts through both real examples and explanations in dictionaries.", + "bbox": [ + 496, + 90, + 890, + 241 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "By these techniques, we present a framework that deeply fuses the two important tasks of image classification and contrastive language-image pre-training, dubbed iCLIP. Extensive experiments using different combinations of image classification and image-text pair datasets show that the iCLIP method can take advantages of both the discriminative power of image classification tasks and the zero-shot ability in CLIP-like tasks, and perform significantly better than conducting each task alone or the naive multi-task learning in both the in-domain/zero-shot classification and multi-modal retrieval problems. The iCLIP method also shows that learning a stronger transferable representation than using each of the two tasks alone, verified on a variety of downstream tasks, including ADE20K semantic segmentation [68], LVIS long-tail detection [17], and video action recognition [26], as well as different evaluation settings of few-shot and fine-tuning. Our contributions are summarized as follows:", + "bbox": [ + 496, + 242, + 892, + 513 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We combined two important vision tasks of image classification and contrastive language-image pretraining into a single framework.", + "- We found that the original image classification formulation can be adapted to CLIP approach with almost no performance degradation. With this finding, we present a deep fusion approach in which the two tasks share the same text encoder and the same classifier type, whose effectiveness is extensively verified on benchmarks.", + "- We proposed a simple yet effective method to introduce knowledge bases into image classification, addressing the ambiguous and polysemous issue of the originally short image names as well as further bridges the gap between classes and alt-texts. It also provides the first showcase of applying knowledge bases into computer vision problems." + ], + "bbox": [ + 517, + 523, + 890, + 801 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 500, + 814, + 640, + 830 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Supervised visual classification. Classification is almost ubiquitous for visual understanding tasks of various recognition granularity, e.g., image-level classification [12, 20, 28, 33, 49, 52, 58], object-level classification in", + "bbox": [ + 496, + 839, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2777", + "bbox": [ + 480, + 944, + 514, + 955 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "object detection [3, 15, 19, 45], pixel-level classification in semantic/instance segmentation [5, 35, 63], and video-level action classification [4, 13, 34, 43, 54]. In these tasks, the data is manually annotated to a fixed set of classes, e.g., the 1,000-class ImageNet-1K dataset [8], the 80-class COCO detection dataset [31], the 150-class ADE20K segmentation dataset [68], etc. Among these classification tasks, the image-level classification is particularly important, which has greatly advances the success of deep learning in computer vision, thanks to its high quality and transferable discriminative representations.", + "bbox": [ + 75, + 90, + 472, + 257 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The supervised visual classification is generally performed as a $K$ -way classification problem without considering the text semantics of the class names. The most common classifier is the linear classifier, where the classifier vector of each category is parameterized as model weights and is directly learnt through optimization [28].", + "bbox": [ + 75, + 258, + 470, + 349 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Contrastive language-image pre-training. Pioneered by CLIP [44] and Align [24], the contrastive language-image pre-training is now attracting more and more attention due to its strong zero-shot transfer capacity. These methods learn a network to pair an image and its associated alt-text, in which the image-text pairs are crawled from the Internet. With web-scale alt-text, it is possible to cover almost all classes, and these methods do show to perform very well for zero-shot recognition. In their frameworks, the images and texts are embedded using two separate encoders, and the output representations of the images and alt-texts are contrasted according to the positive and negative pairs.", + "bbox": [ + 75, + 351, + 472, + 532 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "While prior to CLIP and Align, there have been a few early works leveraging alt-text or text encoders for image recognition [10,14,16,25,41,46,67]. More follow-up works appeared after CLIP and Align, including Filip [62], DeClip [30], BASIC [42], LiT [66], LiMoE [39], TCL [60], and so on. A drawback of these method is that the image-text pairs are usually noisy without human labeling, leading to the learned representations are not conceptual compact, lacking strong discrimination ability.", + "bbox": [ + 75, + 534, + 472, + 671 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Introducing knowledge into AI systems. Our approach is also related to the expert systems in 1980s which heavily rely on a knowledge base for reasoning [23]. Recently, in natural language process, there also emerges boosting large-scale pretrained models by making use of encyclopedic [1,55] and commonsense knowledge [50]. However, in computer vision, the knowledge bases is not well explored. We hope our findings can encourage more attention to incorporate human knowledge into current vision systems.", + "bbox": [ + 75, + 672, + 472, + 808 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Combination of representation learning. Regarding individual strengths of different representation learning approaches, there have been several works trying to combine different representation learning approaches so as to take advantages of individuals' strength. For example, SLIP [37] combines CLIP learning with a self-supervised contrastive", + "bbox": [ + 75, + 810, + 472, + 902 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "learning approach. CoCa [65] combines the CLIP target with an image caption task, in hope to perform well for both understanding and generation problems. MaskCLIP [11] combines CLIP with masked image modeling based self-supervised learning. In contrast, our work also aims to effectively combine different representation learning approaches so as to take both advantages, specifically, the image classification and CLIP.", + "bbox": [ + 496, + 90, + 893, + 210 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Relationship to UniCL [61] Concurrent to our work, there is another work named UniCL [61] which also combines image classification with language-image pretraining. We hope the consistent knowledge will help the community in learning more powerful representations. Also note that there are two main differences comparing our framework to the UniCL framework [61]: 1) We involve all negative classifiers in training the supervised classification, while UniCL only involve negatives in a same batch. To make feasible all negative classifiers, we propose a GPU-distributed implementation that distributes the classifiers evenly into different GPUs. Our implementations show to have better in-domain accuracy compared to UniCL when the category number is as large as tens of thousands (76.3% vs. 70.5% as shown in Tab. 4). 2) We introduce a new dictionary enhanced approach to convert the class names with rich semantical text, which shows to be very beneficial for zero-shot image classification and multi-modal retrieval (see Tab. 2).", + "bbox": [ + 496, + 210, + 893, + 500 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 500, + 510, + 591, + 526 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this section, we first review existing methods on image classification and contrastive language-image pre-training tasks. Then, we propose a unified framework to bridge the two tasks in a deep fusion fashion. Finally, we introduce dictionary-enhanced category descriptions to further align the two taks on input label space.", + "bbox": [ + 496, + 536, + 893, + 628 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Preliminaries", + "text_level": 1, + "bbox": [ + 500, + 635, + 640, + 648 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Image Classification. Given a set of $<$ image, category label> pairs, i.e., $\\mathcal{D}^c = \\{(I_i,C_i)\\}_{i=1}^{|\\mathcal{D}^c|}$ , image classification task targets to predict the category label of a given image, through a visual encoder $f_v$ , and a parametric category classifier $h_c$ , illustrated in Fig. 2 (b). The parameters of $h_c$ is a matrix $W \\in \\mathcal{R}^{N \\times H}$ , where $N$ is the number of categories and $H$ is the dimension of visual embeddings. The visual encoder $f_v$ transforms each raw image $I_i$ to an embedding $v_i = f_v(I_i)$ , while the classifier $h_c$ predicts the distribution $P_i \\in \\mathcal{R}^N$ over all pre-defined categories via an inner product between $W$ and $v_i$ , i.e., $P_i = W \\cdot v_i$ (bias term is omitted for simplicity). Finally, a cross entropy is applied on $P_i$ and $C_i$ to calculate training loss, which is formulated as:", + "bbox": [ + 496, + 657, + 893, + 857 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = \\frac {- 1}{| \\mathcal {D} ^ {c} |} \\sum_ {(I _ {i}, C _ {i}) \\in \\mathcal {D} ^ {c}} \\log \\frac {\\exp (W _ {C _ {i}} \\cdot v _ {i})}{\\sum_ {j = 1} ^ {N} \\exp (W _ {j} \\cdot v _ {i})}, \\qquad (1)\n$$\n", + "text_format": "latex", + "bbox": [ + 542, + 864, + 893, + 905 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "2778", + "bbox": [ + 480, + 944, + 514, + 955 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/c8774f0ac673c5836a497e48d2b1456d23aac8254209126679141c31ed7b6276.jpg", + "image_caption": [ + "Figure 2. An illustration of iCLIP framework. $\\mathcal{B}$ is the batch size, $N$ is the number of categories and $G$ is the number of gpus. iCLIP unifies both contrastive language-image pre-training and classification tasks with shared text and visual encoder, taking alt-texts or dictionary enhanced class names as annotations. To reduce the computation, iCLIP distributes the enhanced class names over all gpus in forward, and gathers the embeddings for similarity calculation." + ], + "image_footnote": [], + "bbox": [ + 117, + 78, + 836, + 244 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $W_{j}$ is the parametric weight of $j$ -th category.", + "bbox": [ + 75, + 321, + 419, + 335 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Contrastive Language-Image Pre-training. Given a set of $<$ image, alt-text> pairs, i.e., $\\mathcal{D}^a = \\{(I_i,T_i^a)\\}_{i=1}^{|\\mathcal{D}^a|}$ , contrastive language-image pre-training targets to close the distances between paired image and text while enlarging those of unpaired ones, through a visual encoder $f_v$ and a text encoder $f_t$ , shown in Fig. 2 (a). They transform the image $I_i$ and the alt-text $T_i^a$ to feature embeddings $v_i$ and $s_i$ , respectively. A contrastive loss function is applied to shrink the cosine distance of $v_i$ and $s_i$ , which is defined as:", + "bbox": [ + 75, + 335, + 468, + 473 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = \\frac {- 1}{| \\mathcal {D} ^ {a} |} \\sum_ {\\substack {(I _ {i}, T _ {i} ^ {a}) \\\\ \\in \\mathcal {D} ^ {a}}} \\log \\frac {\\exp \\left(\\cos \\left(f _ {t} \\left(T _ {i} ^ {a}\\right) , v _ {i}\\right) / \\tau\\right)}{\\sum_ {T _ {j} ^ {a} \\in \\mathcal {T} ^ {a}} \\exp \\left(\\cos \\left(f _ {t} \\left(T _ {j} ^ {a}\\right) , v _ {i}\\right) / \\tau\\right)}, \\tag{2}\n$$\n", + "text_format": "latex", + "bbox": [ + 89, + 481, + 468, + 525 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\cos (\\cdot ,\\cdot)$ represents the cosine similarity between two embeddings, $\\mathcal{T}^a$ is all the alt-texts in a batch including one positive paired alt-text and $|\\mathcal{T}^a| - 1$ negative ones, and $\\tau$ is a temperature hyper-parameter to scale the similarities.", + "bbox": [ + 75, + 535, + 468, + 595 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Task differences. Comparing the formations of image classification and language-image pre-training, we can draw three main differences between them. 1) Training loss functions. Classification commonly adopts a cross-entropy loss on inner-product similarity, while image-text learning uses InfoNCE loss on cosine similarity. 2) Classifier types. Classification adopts a parametric category classifier, while image-text learning uses a text encoder. 3) Label granularity. Category names in classification are usually very short, i.e., one or few words, while the captions in image-text pretraining are full sentences containing rich semantics.", + "bbox": [ + 73, + 595, + 470, + 762 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. Bridge Image Classification and Contrastive Language-Image Pre-training", + "text_level": 1, + "bbox": [ + 76, + 771, + 468, + 803 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To bridge image classification and image-text alignment, we introduce three adaptations to align their training losses, unify the classifier types, and close the label granularity gap. The overall adaption is visualized in Fig. 3.", + "bbox": [ + 75, + 809, + 468, + 869 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Classification with Text Encoder. As formulated in Eq. (1), image classification commonly adopts a cross-", + "bbox": [ + 75, + 869, + 468, + 901 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/c2bed6768d0cd53928f163cd4f3c903942f1447b74e996c37a773bcc413d3570.jpg", + "image_caption": [ + "Figure 3. An illustration of our approach to bring image classification (a) to CLIP (b), from the perspective of loss function, classifier types and label granularity. We reformulate the linear classifier (a.1) with a text-encoder-based classifier (a.2), and enhance the class names with a text description from the dictionary (a.3)." + ], + "image_footnote": [], + "bbox": [ + 506, + 316, + 893, + 468 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "entropy loss on top of the inner-product similarity between the visual embedding $v_{i}$ and the parametric classifier $h_c$ . This formulation is not in line with the InfoNCE loss in Eq. (2), leading to a misalignment between the two paradigms. To address this issue, we adopt a cosine similarity for image classification, instead of the original inner-product similarity in Eq. (1), which formulates a cosine classifier as:", + "bbox": [ + 496, + 561, + 892, + 681 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = \\frac {- 1}{| \\mathcal {D} ^ {c} |} \\sum_ {\\left(I _ {i}, C _ {i}\\right) \\in \\mathcal {D} ^ {c}} \\log \\frac {\\exp \\left(\\cos \\left(W _ {C _ {i}} , v _ {i}\\right) / \\tau\\right)}{\\sum_ {j = 1} ^ {N} \\exp \\left(\\cos \\left(W _ {j} , v _ {i}\\right) / \\tau\\right)}. \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 519, + 688, + 890, + 724 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Cosine similarity is a common practice in metric learning [40]. It can smoothly align the supervised image classification with the cross-modal contrastive pre-training in terms of learning objective function, i.e., Eq. (2). Moreover, our experiments demonstrate that this cosine classifier performs on par with the traditional linear classifier (see Tab. 1).", + "bbox": [ + 496, + 733, + 893, + 824 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The cosine classifier aligns the training losses of two tasks. However, the annotations, i.e., category labels and captions, are modeled separately by the parametric category classifier $h_c$ and the text encoder $f_t$ . As analyzed in Sec. 4.3, shallowly combining the two tasks with a shared", + "bbox": [ + 496, + 825, + 893, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "2779", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "visual encoder $f_{v}$ and two separate task heads does not fully take advantage of the gold annotations in image classification and rich concepts in textual captions, resulting in a suboptimal solution with limited transferring capacity.", + "bbox": [ + 75, + 90, + 468, + 151 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To tackle this issue, we take label semantics into consideration and propose to utilize the text encoder $f_{t}$ as a meta classifier for image classification. Formally, we replace the label index $C_i$ with its class name $M_{i}$ , and generate the classifier weight $W$ on-the-fly through the text encoder $f_{t}$ which is shared with image-text pre-training. The new formulation is represented as:", + "bbox": [ + 75, + 151, + 470, + 257 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal{L} = \\frac{-1}{|\\mathcal{D}^{c}|}\\sum_{\\substack{(I_{i},M_{i})\\\\ \\in \\mathcal{D}^{c}}}\\log \\frac{\\exp\\left(\\cos\\left(f_{t}\\left(M_{i}\\right),v_{i}\\right) / \\tau\\right)}{\\sum_{j = 1}^{N}\\exp\\left(\\cos\\left(f_{t}\\left(M_{j}\\right),v_{i}\\right) / \\tau\\right)}. \\tag{4}\n$$\n", + "text_format": "latex", + "bbox": [ + 94, + 266, + 468, + 310 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In this way, the text encoder $f_{t}$ is not only used to extract semantics from gold category labels, but also capture textual information from image captions. Both the visual and textual encoders are shared across the two tasks, leading to a deep fusion of the two tasks.", + "bbox": [ + 75, + 319, + 468, + 393 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Classification with Dictionary Enhancement. The cosine classifier with text encoder as a meta network has largely unify the two tasks in model training. In this step, we further align them on input label granularity, reducing the disparity between label names (one or few words) and image captions (a complete sentence). Our proposal is to integrate external knowledge into label names. More specifically, for each label names, we introduce detailed descriptions from its corresponding synset in the dictionary WordNet [36] as the external knowledge and create a pseudo sentence as label for each categories. We combine the original class names and their dictionary descriptions to form the enhanced texts as the input to the text encoder. Also, we add a prompt to make the sentence more fluent. The final dictionary-enhanced description for each category is formed as:", + "bbox": [ + 75, + 396, + 468, + 635 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {T} ^ {c} = \\mathrm {A p h o t o o f a} \\left\\{\\mathrm {N A M E} \\right\\} _ {C _ {i}}, \\left\\{\\mathrm {D E S C R I P T I O N} \\right\\} _ {C _ {i}}. \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 99, + 647, + 468, + 665 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Such dictionary-enhanced descriptions have similar label granularity to alt-text, and thus further bring image classification closer to image-text alignment. Moreover, the description introduces more details of each category, being capable of reducing potential misconception. For example, the class \"night bird\" actually includes several kinds of birds, like owl, nightingale, etc. Such a category name cannot allow the model to learn precise representations due to the blurry concepts. If we augment the category with more external knowledge, such as \"a photo of a night bird, any bird associated with night: owl, nightingale, nighthawk\", it will help the model learn discriminative representation on distinguishing different concepts (e.g., bird species).", + "bbox": [ + 75, + 674, + 468, + 869 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "A Unified Framework. The above three steps adapt image classification to image-text alignment from the perspec", + "bbox": [ + 76, + 869, + 468, + 901 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "tive of training loss, classifier type and annotation granularity, respectively. Towards the final unification, we propose a new framework dubbed iCLIP, as presented in Fig. 2 (c), which bridges Image Classification and Image-Text Alignment with a unified contrastive learning loss formulated as:", + "bbox": [ + 496, + 90, + 890, + 167 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = \\frac {- 1}{| \\mathcal {D} |} \\sum_ {\\left(I _ {i}, T _ {i}\\right) \\in \\mathcal {D}} \\log \\frac {\\exp \\left(\\cos \\left(f _ {t} \\left(T _ {i}\\right) , v _ {i}\\right) / \\tau\\right)}{\\sum_ {T _ {j} \\in \\tau} \\exp \\left(\\cos \\left(f _ {t} \\left(T _ {j}\\right) , v _ {i}\\right) / \\tau\\right)}, \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 517, + 184, + 890, + 224 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\mathcal{D}$ is a set consisting of the image classification data $D^c$ and the image-text alignment data $D^a$ , i.e., $\\mathcal{D} = \\{\\mathcal{D}^c, D^a\\}$ , while $\\mathcal{T}$ indicates a combination of $T^c$ and $T^a$ , i.e., $\\mathcal{T} = \\{\\mathcal{T}^c, T^a\\}$ . Text label $T_i$ is either an image caption $T_i^a$ sampled from $T^a$ or a dictionary-enhanced description $T_i^c$ sampled from $T^c$ . It is worth noting that, with this unified framework, both the text encoder $f_t$ and the visual encoder $f_v$ are shared across the two tasks, achieving a deep fusion. The proposed unified framework is able to leverage any combination of tag-labeled and caption-labeled image datasets for pre-training. This combination allows the model to learn more discriminative representation, while capturing more visual concepts from the textual description. On the other hand, our iCLIP method is efficient.", + "bbox": [ + 496, + 229, + 890, + 441 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Distributed Implementation. In our iCLIP framework, the text embedding of each category is generated by the shared text encoder on-the-fly. This computation is affordable when the number of categories $N$ is not large. However, it will become infeasible if category number scales up to be large, such as 22k categories in ImageNet-22K [8]. To make the iCLIP framework feasible for large-category classification data in practice, we adopt a distribution implementation strategy [6]. Specifically, we distribute all the enhanced class names evenly over $G$ GPUs in forward, and gather the embeddings from eachgpu for similarity calculation, reducing the computation cost and saves memory consumption by the text encoder to $1 / G$ .", + "bbox": [ + 496, + 441, + 890, + 638 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/b954c033fd52506bdd67c6133077a625763f59a6f4d87136c8735e99f315ea21.jpg", + "table_caption": [ + "Table 1. Ablation on formulation adaptations for image classification task. Models are trained with 100 epochs." + ], + "table_footnote": [], + "table_body": "
#Cosine LossText-enc. as ClassifierEnhanced classesIN-1K
180.9
281.5
381.2
481.4
", + "bbox": [ + 550, + 683, + 836, + 768 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiment", + "text_level": 1, + "bbox": [ + 500, + 776, + 624, + 792 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We verify the effectiveness of the proposed iCLIP framework through the comparisons to single-task baselines and a naive multi-task learning baseline. The comparisons are conducted in three settings covering different scales of pretraining data. In evaluation, we assess the models on different tasks, including in-domain classification, zero-shot classification, multi-modal retrieval, and downstream tasks.", + "bbox": [ + 496, + 799, + 890, + 904 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "2780", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/5ebf13e05a96325c267fe24c466a6f36c4f49d14f2e1a2927171824e23809fff.jpg", + "table_caption": [ + "Table 2. Ablation study conducted on IN-22K [8] and YFCC-14M [53]. Models are pre-trained from scratched with 32 epochs following UniCL [61]. COCO and Flickr stand for MSCOCO [31] and Flickr30K [64]. IR and TR stand for image retrieval and text retrieval." + ], + "table_footnote": [], + "table_body": "
Zero-shot classificationZero-shot retrieval
#Training DataMethodIN-1K14-dataset avg.Flickr-IRFlickr-TRCOCO-IRCOCO-TR
1YFCC-14MCLIP [44]30.136.321.537.912.521.2
2YFCC-14M (half) + IN-21K (half)iCLIP (w/o Desc.)39.445.427.639.113.020.4
3YFCC-14M (half) + IN-21K (half)iCLIP45.949.931.949.815.527.2
4YFCC-14M + IN-21KiCLIP (w/o Desc.)41.149.433.451.216.326.5
5YFCC-14M + IN-21KiCLIP50.954.437.155.718.530.7
6YFCC-14M + IN-22KiCLIP (w/o Desc.)76.251.633.248.214.423.8
7YFCC-14M + IN-22KiCLIP76.355.536.255.318.029.7
", + "bbox": [ + 86, + 116, + 875, + 260 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1. Experimental Setup", + "text_level": 1, + "bbox": [ + 76, + 273, + 267, + 291 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Pre-training data and settings. We consider three different scales of dataset combination for model pre-training.", + "bbox": [ + 76, + 297, + 470, + 329 + ], + "page_idx": 5 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- ImageNet-1K [8] and GCC-3M [48]. In this setting, we use ImageNet-1K as the classification data while GCC-3M as the image-text data. We adopt a Swin-T [33] initialized with MoBY [59] as the visual encoder, while for the textual encoder, we use a pretrained RoBERTa-B [32]. We sample half number of images from each dataset in a mini-batch and train the models with a batch size of $128 \\times 8$ V100 GPUs for 100 epochs. The highest learning rate is 2e-4 with a cosine learning rate schedule and 5 epochs warm-up. Weight decay is set to be 0.01. RandAugment [7] and stochastic depth [21] with a rate of 0.1 are used for visual encoder only.", + "- ImageNet-22K [8] and YFCC-14M [53]. We follow UniCL [61] to train all models from scratch with 32 epochs for a fair comparison with it. Swin-T [33] is used as the visual encoder, and a 12-layer transformer with a hidden dimension of 512 same as CLIP [44] is used as the text encoder. A batch size of $512 \\times 16$ GPUs is adopted. The highest learning rate is selected from 2e-4 and 8e-4. Other regularization is the same as previous, except for a larger weight decay of 0.05. We also conduct experiments using two variants of this setup for a fair and clean comparison with the methods that use one task alone (IC or CLIP): 1) Excluding the 1,000 ImageNet-1K classes in ImageNet-22K dataset (dubbed IN-21K). This setup variant allows us to evaluate the zero-shot accuracy on ImageNet-1K for different methods; 2) Half images of the ImageNet-21K and YFCC-14M are used, such that the dataset size and training iterations are the same as that used in one single task.", + "- ImageNet-22K [8] and Laion-400M [47]. For this large-scale pre-training setting, we adopt a Swin-B initialized with MoBY as the visual encoder and a pre-trained RoBERTa-B as the text encoder. We train iCLIP for 100K iters, with a batch size of $192 \\times 64$ V100 GPUs. In each mini batch, we sample 64 images from IN-22K and 128 images from Laion-400M. The model is trained on" + ], + "bbox": [ + 83, + 333, + 472, + 901 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "classification data for around 30 epochs and on image-text data for around 2 epochs equivalently. The highest learning rate is 1e-3 with a cosine learning rate schedule and a warm-up for $16.7\\mathrm{K}$ liters. Weight decay is set to 0.05 and drop depth rate is set to 0.2.", + "bbox": [ + 519, + 273, + 890, + 349 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Evaluation datasets and settings. During evaluation, we assess the models considering five different settings.", + "bbox": [ + 500, + 361, + 890, + 392 + ], + "page_idx": 5 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Zero-shot classification. We evaluate the concept coverage and generalization ability of the models on three datasets: 1) ImageNet-1K variants, including IN-1K [8], and IN-Sketch (IN-S) [56]. Top-1 accuracy is reported; 2) the widely-used Kornblith 12-dataset benchmark [27]; 3) 14 datasets used in UniCL [61]. For 2) and 3), averaged accuracy is reported.", + "- Zero-shot multi-modal retrieval. Flickr30K [64] (1K test set) and MSCOCO [31] (5K test set) are used to evaluate the alignment between image and text modalities. We report the Top-1 recall on both image retrieval (IR) and text retrieval (TR).", + "- In-domain classification. ImageNet-1K data is included in some of our pre-training setups, so we conduct indomain evaluation on ImageNet-1K in these cases. The Top-1 accuracy is reported.", + "- Few-shot classification. Following CLIP [44], we also evaluate the models on few-shot classification task using Kornblith 12-dataset with a frozen visual encoder. Averaged accuracy is reported.", + "- Fine-tuning on downstream tasks. To validate the generalization ability of iCLIP, the models are fine-tuned and compared on semantic segmentation [68], long-tail detection [17], and video action recognition [26]. We report val mIoU,_bbox mAP and Top-1 accuracy, respectively. The detailed settings can be found in the supplementary material." + ], + "bbox": [ + 506, + 396, + 890, + 813 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Experiments on IN-1K [8] and CC3M [48]", + "text_level": 1, + "bbox": [ + 500, + 830, + 861, + 849 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Formulation adaptations for image classification. Tab. 1 ablates the effect of adapting the common image classification to that used in iCLIP, including both cosine loss, the", + "bbox": [ + 498, + 854, + 890, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "2781", + "bbox": [ + 482, + 944, + 513, + 955 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/30619a3d1a609564c42d4f80068a74647e1cfe905ef647122d4dc96f1cc6c23d.jpg", + "table_caption": [ + "Table 3. Ablation conducted on IN-1K [8] and GCC-3M [48] combined data. For the models only using IN-1K, we train them for 100 epochs. For the models only using GCC-3M, we train them with the same iterations and batch size as the ones used in IN-1K." + ], + "table_footnote": [], + "table_body": "
12-datasetImageNet-related
#Methodavg.IN-1KIN-S
1Sup-only-80.929.4
2VL-only31.432.418.3
3Naïve multi-task35.180.638.3
4iCLIP (w/o Desc.)37.780.538.6
5iCLIP39.180.438.7
", + "bbox": [ + 106, + 155, + 442, + 256 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/2523dbf155557c5cb05545c56b4c48e62805c05cb94b154b6ca35d4ea4d018e8.jpg", + "table_caption": [ + "Table 4. Comparison with UniCL. Models are pre-trained from scratched with 32 epochs, following UniCL [61]." + ], + "table_footnote": [], + "table_body": "
#Training DataMethodIN-1K14-dataset avg.
1YFCC + IN-21K (half)UniCL [61]36.445.5
2YFCC + IN-21K (half)iCLIP45.949.9
3YFCC + IN-21KUniCL [61]40.549.1
4YFCC + IN-21KiCLIP50.954.4
5YFCC + IN-22KUniCL [61]70.552.4
6YFCC + IN-22KiCLIP76.355.5
", + "bbox": [ + 99, + 309, + 447, + 412 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "text-encoder-based classifier and enhanced class names using ImageNet-1K dataset. It can be seen that the cosine classification loss gets slightly better performance than the linear one, with a $+0.6\\%$ gain on IN-1K (see #1 v.s. #2). When further adapting the text-encoder-based classifier (#3) and enhancing class names from dictionaries (#4), it has almost no performance degradation ( $+0.3\\%$ and $+0.5\\%$ on IN-1K compared to the linear classifier), which allows us to further sharing the text encoder with CLIP for tasks unification.", + "bbox": [ + 75, + 431, + 468, + 566 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Zero-shot and in-domain classification. With previous adaptations on the image classification formulation, we can further share the text encoder between the two tasks. To ablate the effect of sharing the text encoder, we set a naive multi-task baseline, that combines image classification and CLIP in a shallow fusion, i.e., simply averaging the loss Eq. (1) and Eq. (2). Each has its own head network, i.e., the fully-connected layer $W$ for Eq. (1) and the text encoder $f_{t}$ for Eq. (2). The best performances of the two heads are reported in Tab. 3. With a shared text encoder across the two tasks, our iCLIP (w/o Desc.) outperforms the naive multi-task on Kornblith 12-dataset zero-shot classification by $+2.6\\%$ in average, while they are comparable on ImageNet-related datasets classification (see #3 v.s. #4). Our iCLIP deeply unifies two tasks, thus better gathering the merits of the two learning protocols. When compared with the supervised softmax classifier baseline, i.e., Eq. (1) Sup-only, and the contrastive image-text pre-training baseline, i.e., Eq. (2) VL-only, our method is slightly worse than Sup-only on IN-1K by $0.4\\%$ , while achieves superior performance on other evaluation settings, $+6.3\\%$ better than VL-only method on 12-dataset zero-shot testing and $+9.2\\%$", + "bbox": [ + 75, + 568, + 470, + 901 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "better than Sup-only method on IN-S (see #4 v.s. #1). Moreover, the dictionary enhancement on class names (#5) can further bring an average of $+1.4\\%$ improvements on Kornblith 12-dataset, revealing the increased discriminative representation for ambiguous concepts.", + "bbox": [ + 496, + 90, + 890, + 167 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3. Experiments on IN-22K [8] and YFCC14M [53]", + "text_level": 1, + "bbox": [ + 498, + 172, + 890, + 190 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Effects of the unified framework. Here, we further ablate the effect of the unified formulation for deep fusion of the two tasks. In #2, #4 and #6 of Tab. 2, we show the results of our unified framework under three different dataset combination setups. Compared with the CLIP baseline (#1), our iCLIP (#2) earns $+8.3\\%$ gains on IN-1K zero-shot classification and also $+9.1\\%$ improvements when evaluated on the 14-dataset. In addition, our iCLIP is better than the CLIP baseline on most cross-modal retrieval benchmarks, while only using half of visual-language data in pre-training.", + "bbox": [ + 496, + 196, + 890, + 348 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Effects of dictionary enhancement. Furthermore, we dissect the model to study the contributions of dictionary-enhanced category description. From Tab. 2, we can see that enhancing each class names with informative description from the dictionary brings consistent improvements on both zero-shot classification and zero-shot retrieval under three dataset combination setups (see #3, #5 and #7). In particular, when pre-trained with half images of YFCC-14M and IN-21K (#3), the integrated knowledge contributes $+6.5\\%$ improvements on IN-1K zero-shot classification, which makes our iCLIP reach $45.9\\%$ , being $+5.4\\%$ better than UniCL method [61] with full images of YFCC-14M and IN-21K (see #3 in Tab. 4). More importantly, the enhanced class names is beneficial to cross-modal retrieval. For example, for image-to-text search, the dictionary-enhanced description can bring $10.7\\%$ and $6.8\\%$ top-1 recall gains on Flickr30K [64] and MSCOCO [31] respectively, as reported in row 3 of Tab. 2.", + "bbox": [ + 496, + 348, + 892, + 618 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Comparison with UniCL [61]. Tab. 4 summaries our comparison to UniCL. The same as UniCL, we evaluate our models on IN-1K and 14 datasets. Under three different dataset combination setups, our iCLIP surpasses UniCL by at least $+5\\%$ on IN-1K image classification, while reaching $55.5\\%$ averaged accuracy on 14 datasets (#6), being $+3.1\\%$ better than UniCL (#5).", + "bbox": [ + 496, + 619, + 890, + 724 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.4. Experiments on IN-22K and Laion-400M [47]", + "text_level": 1, + "bbox": [ + 500, + 726, + 885, + 742 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Zero-shot and in-domain classification. Tab. 5 presents a large scale experiment using the publicly accessible largescale data: Laion-400M [47] and IN-22K [8]. For Sup-only, i.e. Eq. (1), we use the released version from Swin [33], which is trained on IN-22K for 90 epochs. For VL-only, i.e. Eq. (2), we pre-train it on Laion-400M with a similar image numbers (#im). Our method is comparable to Sup only on IN-1K, while it gets $+17.8\\%$ and $+8.3\\%$ better results than the two baselines on IN-S, demonstrating its robustness to natural distribution shifts. Our iCLIP surpasses", + "bbox": [ + 496, + 750, + 890, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "2782", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/3547be5ffab658d4af25c9276d306add2bcb44a390381b5e1592bdcd373d1a76.jpg", + "table_caption": [ + "Table 5. Ablation study on IN-22K [8] and Laion-400M [47]. We evaluate the models on ImageNet datasets (IN-1K [8] and IN-S [56]) and zero-shot evaluation on the Kornblith 12-dataset benchmark [27]. Few-shot learning on Kornblith 12-dataset and the fine-tuning on three downstream tasks are conducted to evaluate the transfer capability of iCLIP. $\\ddagger$ denotes for our reproduction using released checkpoints." + ], + "table_footnote": [], + "table_body": "
Visual encoderPre-trainImageNet-related12-dataset avg.downstream tasks
MethodArch.length (#im).IN-1KIN-S0-shot4-shotADE20KLVISKinetics400
CLIP [44]ViT-B/16400M×32 eps68.646.6‡68.866.4‡---
OpenCLIP [22]ViT-B/16400M×32 eps67.152.4‡70.9‡----
Sup-onlySwin-Base14M×90 eps82.642.0-67.652.135.982.7
VL-onlySwin-Base400M×3 eps61.151.567.273.352.036.682.3
iCLIPSwin-Base400M×2 eps+14M×30 eps82.959.870.678.152.637.983.1
", + "bbox": [ + 86, + 142, + 885, + 268 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/f086c3b3a0ac2a1c5a5f821886e5d46ca51c0858ce47e88287b36d29c6b69d1b.jpg", + "image_caption": [ + "Figure 4. Major comparison with the CLIP-ViT-B/16 of few-shot classification (top-1 accuracy) on the Kornblith 12-dataset. $\\star$ denotes the zero-shot performances. Results of CLIP on few-shot classification are reproduced using released model. We run every experiments three times and the averaged results are reported." + ], + "image_footnote": [], + "bbox": [ + 117, + 287, + 423, + 435 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "OpenCLIP [22], which also uses Laion-400M data for pretraining [47], by more than $+15\\%$ on IN-1K, mainly due to the pre-training data IN-22K covers the visual concepts in IN-1K. Moreover, when performing zero-shot evaluation on 12 datasets [27], our iCLIP model also achieves non-trivial improvements, e.g., an average of over $+3\\%$ gains (VL-only in Tab. 5). In addition, our iCLIP is comparable to OpenCLIP on 12 datasets in average with fewer training time. More details are elaborated in the supplementary material.", + "bbox": [ + 75, + 535, + 468, + 672 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Few-shot classification We also conduct experiments in few-shot settings. Following CLIP [44], we freeze the visual encoder and append a linear probe layer for few-shot fine-tuning. We notice that the performance of CLIP [44] in few-shot classification cannot catch up with that of zero-shot classification, unless more than 4 examples per class are given, as presented in Fig. 4 ( $\\star$ v.s. $-\\bullet$ ). We conjecture the underlying reason is that the number of training samples is too limited to train a randomly initialized classifier. This situation can be alleviated by fine-tuning the pretrained text encoder, instead of the linear probe layer. In this way, text encoder is able to serve as a good initialization for few-shot classification, closing the gap between pretraining and fine-tuning. We evaluate such method on Kornblith 12-dataset benchmark [27] and report the results in Fig. 4.", + "bbox": [ + 75, + 674, + 470, + 902 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "When only given one example per class, by utilizing text encoder as the classifier, our iCLIP achieve $73.9\\%$ on 12-dataset in average, surpassing the original CLIP model by $+29.5\\%$ . Such one-shot recognition gets $+3.3\\%$ gains over the zero-shot baseline ( $\\star$ v.s. $-\\bullet$ ), demonstrating good few-shot transfer ability. When using 16 examples per class, our model still performs superior to CLIP by $4.1\\%$ . Compared to supervised-only model and visual-linguistic only model, our unified contrastive learning pretrained model obtains $+24.6\\%$ and $+6.1\\%$ better accuracy under one-shot learning setting. Such advantages are kept to 16-shot with $+2.7\\%$ and $+5.0\\%$ gains $(- \\bullet -$ and $- \\bullet -)$ .", + "bbox": [ + 496, + 287, + 893, + 470 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Fine-tuning on Downstream Tasks We also study the generalization capability of our pre-trained models on downstream tasks, including semantic segmentation, object detection and video recognition. As shown in Tab. 5, compared to Sup-only, our iCLIP surpasses it by $+0.5\\%$ , $+2.0\\%$ , $+0.4\\%$ on the three downstream tasks, respectively. We also earn $+0.6\\%$ , $+1.3\\%$ , $+0.8\\%$ gains over VL-only baseline. These results reveal that our unified method could learn general visual representations.", + "bbox": [ + 496, + 478, + 893, + 614 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 646, + 619, + 662 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this paper, we propose a unified framework dubbed iCLIP to bridge image classification and language-image pre-training. It naturally forces the cross-modal feature learning in a unified space, where the two tasks share the same visual and textual encoders. Extensive experiments demonstrate that iCLIP is effective, and can be generalized to different visual recognition scenarios, including zero-shot, few-shot, and fully-supervised fine-tuning.", + "bbox": [ + 496, + 680, + 893, + 801 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Limitations. One limitation of iCLIP is that, despite its competitive performance, the model still relies on human labeled classification data that is not scalable. Besides, our model currently only adopts median-size parameters, which can not fully validate the generation ability to large-scale models. We are interested in exploring this in future work.", + "bbox": [ + 496, + 809, + 893, + 902 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "2783", + "bbox": [ + 482, + 944, + 514, + 957 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Soren Auer, Christian Bizer, Georgi Kobilarov, Jens Lehmann, Richard Cyganiak, and Zachary Ives. Dbpedia: A nucleus for a web of open data. In The semantic web, pages 722-735. Springer, 2007. 3", + "[2] Yue Cao, Zhenda Xie, Bin Liu, Yutong Lin, Zheng Zhang, and Han Hu. Parametric instance classification for unsupervised visual feature learning. 2020. 2", + "[3] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In European Conference on Computer Vision, pages 213-229. Springer, 2020. 3", + "[4] Joao Carreira and Andrew Zisserman. Quo vadis, action recognition? a new model and the kinetics dataset. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6299-6308, 2017. 3", + "[5] Liang-Chieh Chen, George Papandreou, Florian Schroff, and Hartwig Adam. Rethinking atrous convolution for semantic image segmentation. arXiv preprint arXiv:1706.05587, 2017.3", + "[6] Ting Chen, Simon Kornblith, Mohammad Norouzi, and Geoffrey Hinton. A simple framework for contrastive learning of visual representations. pages 1597-1607, 2020. 2, 5", + "[7] Ekin D Cubuk, Barret Zoph, Jonathon Shlens, and Quoc V Le. Randaugment: Practical automated data augmentation with a reduced search space. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, pages 702-703, 2020. 6", + "[8] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pages 248-255. IEEE, 2009. 1, 3, 5, 6, 7, 8", + "[9] Jiankang Deng, Jia Guo, Jing Yang, Niannan Xue, Irene Cotcia, and Stefanos P Zafeiriou. ArcFace: Additive angular margin loss for deep face recognition. IEEE Transactions on Pattern Analysis and Machine Intelligence, pages 1-1, 2021. 2", + "[10] Karan Desai and Justin Johnson. Virtex: Learning visual representations from textual annotations. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11162-11173, 2021. 3", + "[11] Xiaoyi Dong, Yinglin Zheng, Jianmin Bao, Ting Zhang, Dongdong Chen, Hao Yang, Ming Zeng, Weiming Zhang, Lu Yuan, Dong Chen, et al. Maskclip: Masked self-distillation advances contrastive language-image pretraining. arXiv preprint arXiv:2208.12262, 2022. 3", + "[12] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations, 2021. 2", + "[13] Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He. Slowfast networks for video recognition. In" + ], + "bbox": [ + 78, + 114, + 470, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Proceedings of the IEEE international conference on computer vision, pages 6202-6211, 2019. 3", + "[14] Andrea Frome, Greg S Corrado, Jon Shlens, Samy Bengio, Jeff Dean, Marc' Aurelio Ranzato, and Tomas Mikolov. Devise: A deep visual-semantic embedding model. In C. J. C. Burges, L. Bottou, M. Welling, Z. Ghahramani, and K. Q. Weinberger, editors, Advances in Neural Information Processing Systems, volume 26. Curran Associates, Inc., 2013. 3", + "[15] Ross Girshick, Jeff Donahue, Trevor Darrell, and Jitendra Malik. Rich feature hierarchies for accurate object detection and semantic segmentation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), June 2014. 3", + "[16] Lluis Gomez, Yash Patel, Marçal Rusinol, Dimosthenis Karatzas, and CV Jawahar. Self-supervised learning of visual features through embedding images into text topic spaces. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 4230-4239, 2017. 3", + "[17] Agrim Gupta, Piotr Dollar, and Ross Girshick. Lvis: A dataset for large vocabulary instance segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5356-5364, 2019. 2, 6", + "[18] Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum contrast for unsupervised visual representation learning. pages 9729-9738, 2020. 2", + "[19] Kaiming He, Georgia Gkioxari, Piotr Dollár, and Ross Girshick. Mask r-cnn. In Proceedings of the IEEE international conference on computer vision, pages 2961-2969, 2017. 3", + "[20] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 2", + "[21] Gao Huang, Yu Sun, Zhuang Liu, Daniel Sedra, and Kilian Q Weinberger. Deep networks with stochastic depth. In European conference on computer vision, pages 646-661. Springer, 2016. 6", + "[22] Gabriel Ilharco, Mitchell Wortsman, Ross Wightman, Cade Gordon, Nicholas Carlini, Rohan Taori, Achal Dave, Vaishaal Shankar, Hongseok Namkoong, John Miller, Hannaneh Hajishirzi, Ali Farhadi, and Ludwig Schmidt. Openclip, July 2021. If you use this software, please cite it as below. 8", + "[23] P Jackson. Introduction to expert systems. 1 1986. 3", + "[24] Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. In International Conference on Machine Learning, pages 4904-4916. PMLR, 2021. 1, 3", + "[25] Armand Joulin, Laurens Van Der Maaten, Allan Jabri, and Nicolas Vasilache. Learning visual features from large weakly supervised data. In European Conference on Computer Vision, pages 67-84. Springer, 2016. 3", + "[26] Will Kay, Joao Carreira, Karen Simonyan, Brian Zhang, Chloe Hillier, Sudheendra Vijayanarasimhan, Fabio Viola, Tim Green, Trevor Back, Paul Natsev, et al. The kinetics hu" + ], + "bbox": [ + 503, + 92, + 890, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "2784", + "bbox": [ + 482, + 945, + 514, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "man action video dataset. arXiv preprint arXiv:1705.06950, 2017.2,6", + "[27] Simon Kornblith, Jonathon Shlens, and Quoc V. Le. Do better imagenet models transfer better? In 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2656-2666, 2019. 6, 8", + "[28] Alex Krizhevsky, Ilya Sutskever, and Geoffrey E Hinton. Imagenet classification with deep convolutional neural networks. In Advances in neural information processing systems, pages 1097-1105, 2012. 2, 3", + "[29] Alina Kuznetsova, Hassan Rom, Neil Alldrin, Jasper Uijlings, Ivan Krasin, Jordi Pont-Tuset, Shahab Kamali, Stefan Popov, Matteo Malloci, Alexander Kolesnikov, et al. The open images dataset v4. International Journal of Computer Vision, 128(7):1956-1981, 2020. 1", + "[30] Yangguang Li, Feng Liang, Lichen Zhao, Yufeng Cui, Wanli Ouyang, Jing Shao, Fengwei Yu, and Junjie Yan. Supervision exists everywhere: A data efficient contrastive language-image pre-training paradigm. In International Conference on Learning Representations, 2022. 3", + "[31] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In European conference on computer vision, pages 740-755. Springer, 2014. 3, 6, 7", + "[32] Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. Roberta: A robustly optimized bert pretraining approach. arXiv preprint arXiv:1907.11692, 2019. 6", + "[33] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. pages 10012-10022, 2021. 2, 6, 7", + "[34] Ze Liu, Jia Ning, Yue Cao, Yixuan Wei, Zheng Zhang, Stephen Lin, and Han Hu. Video swim transformer. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3202-3211, June 2022. 3", + "[35] Jonathan Long, Evan Shelhamer, and Trevor Darrell. Fully convolutional networks for semantic segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3431-3440, 2015. 3", + "[36] George A. Miller. WordNet: A lexical database for English. In Human Language Technology: Proceedings of a Workshop held at Plainsboro, New Jersey, March 8-11, 1994, 1994. 1, 5", + "[37] Norman Mu, Alexander Kirillov, David Wagner, and Saining Xie. Slip: Self-supervision meets language-image pretraining. In European Conference on Computer Vision, pages 529-544. Springer, 2022. 3", + "[38] Kevin Musgrave, Serge Belongie, and Ser-Nam Lim. A metric learning reality check, 2020. 2", + "[39] Basil Mustafa, Carlos Riquelme, Joan Puigcerver, Rodolphe Jenatton, and Neil Houlsby. Multimodal contrastive learning with limoe: the language-image mixture of experts. arXiv preprint arXiv:2206.02770, 2022. 3" + ], + "bbox": [ + 78, + 90, + 468, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[40] Hieu V Nguyen and Li Bai. Cosine similarity metric learning for face verification. In Asian conference on computer vision, pages 709-720. Springer, 2010. 4", + "[41] Mohammad Norouzi, Tomas Mikolov, Samy Bengio, Yoram Singer, Jonathon Shlens, Andrea Frome, Greg S Corrado, and Jeffrey Dean. Zero-shot learning by convex combination of semantic embeddings. arXiv preprint arXiv:1312.5650, 2013. 3", + "[42] Hieu Pham, Zihang Dai, Golnaz Ghiasi, Hanxiao Liu, Adams Wei Yu, Minh-Thang Luong, Mingxing Tan, and Quoc V. Le. Combined scaling for zero-shot transfer learning. CoRR, abs/2111.10050, 2021. 3", + "[43] Zhaofan Qiu, Ting Yao, and Tao Mei. Learning spatiotemporal representation with pseudo-3d residual networks. In proceedings of the IEEE International Conference on Computer Vision, pages 5533–5541, 2017. 3", + "[44] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International Conference on Machine Learning, pages 8748-8763. PMLR, 2021. 1, 3, 6, 8", + "[45] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. In Advances in neural information processing systems, pages 91-99, 2015. 3", + "[46] Mert Bulent Sariyildiz, Julien Perez, and Diane Larlus. Learning visual representations with caption annotations. In European Conference on Computer Vision (ECCV), 2020. 3", + "[47] Christoph Schuhmann, Richard Vencu, Romain Beaumont, Robert Kaczmarczyk, Clayton Mullis, Aarush Katta, Theo Coombes, Jenia Jitsev, and Aran Komatsuzaki. Laion-400m: Open dataset of clip-filtered 400 million image-text pairs, 2021. 6, 7, 8", + "[48] Piyush Sharma, Nan Ding, Sebastian Goodman, and Radu Soricut. Conceptual captions: A cleaned, hypernymed, image alt-text dataset for automatic image captioning. In Proceedings of ACL, 2018. 6, 7", + "[49] K. Simonyan and A. Zisserman. Very deep convolutional networks for large-scale image recognition. In International Conference on Learning Representations, May 2015. 2", + "[50] Robyn Speer, Joshua Chin, and Catherine Havasi. Conceptnet 5.5: An open multilingual graph of general knowledge. In Thirty-first AAAI conference on artificial intelligence, 2017. 3", + "[51] Chen Sun, Abhinav Shrivastava, Saurabh Singh, and Abhinav Gupta. Revisiting unreasonable effectiveness of data in deep learning era. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), Oct 2017. 1", + "[52] Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed, Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, and Andrew Rabinovich. Going deeper with convolutions. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1-9, 2015. 2", + "[53] Bart Thomee, David A. Shamma, Gerald Friedland, Benjamin Elizalde, Karl Ni, Douglas Poland, Damian Borth, and" + ], + "bbox": [ + 501, + 92, + 890, + 900 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "2785", + "bbox": [ + 482, + 945, + 514, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Li-Jia Li. Yfcc100m: The new data in multimedia research. Commun. ACM, 59(2):64-73, jan 2016. 6, 7", + "[54] Du Tran, Lubomir Bourdev, Rob Fergus, Lorenzo Torresani, and Manohar Paluri. Learning spatiotemporal features with 3d convolutional networks. In Proceedings of the IEEE international conference on computer vision, pages 4489-4497, 2015. 3", + "[55] Denny Vrandecic. Wikidata: A new platform for collaborative data collection. In Proceedings of the 21st international conference on world wide web, pages 1063-1064, 2012. 3", + "[56] Haohan Wang, Songwei Ge, Zachary Lipton, and Eric P Xing. Learning robust global representations by penalizing local predictive power. In Advances in Neural Information Processing Systems, pages 10506-10518, 2019. 6, 8", + "[57] Hao Wang, Yitong Wang, Zheng Zhou, Xing Ji, Dihong Gong, Jingchao Zhou, Zhifeng Li, and Wei Liu. Cosface: Large margin cosine loss for deep face recognition, 2018. 2", + "[58] Saining Xie, Ross Girshick, Piotr Dólár, Zhuowen Tu, and Kaiming He. Aggregated residual transformations for deep neural networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 1492-1500, 2017. 2", + "[59] Zhenda Xie, Yutong Lin, Zhuliang Yao, Zheng Zhang, Qi Dai, Yue Cao, and Han Hu. Self-supervised learning with swim transformers. arXiv preprint arXiv:2105.04553, 2021. 6", + "[60] Jinyu Yang, Jiali Duan, Son Tran, Yi Xu, Sampath Chanda, Liquun Chen, Belinda Zeng, Trishul Chilimbi, and Junzhou Huang. Vision-language pre-training with triple contrastive learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15671-15680, 2022. 3", + "[61] Jianwei Yang, Chunyuan Li, Pengchuan Zhang, Bin Xiao, Ce Liu, Lu Yuan, and Jianfeng Gao. Unified contrastive learning in image-text-label space. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 19163-19173, June 2022. 3, 6, 7", + "[62] Lewei Yao, Runhui Huang, Lu Hou, Guansong Lu, Minzhe Niu, Hang Xu, Xiaodan Liang, Zhenguo Li, Xin Jiang, and Chunjing Xu. FILIP: Fine-grained interactive language-image pre-training. In International Conference on Learning Representations, 2022. 3", + "[63] Minghao Yin, Zhuliang Yao, Yue Cao, Xiu Li, Zheng Zhang, Stephen Lin, and Han Hu. Disentangled non-local neural networks. In Proceedings of the European conference on computer vision (ECCV), 2020. 3", + "[64] Peter Young, Alice Lai, Micah Hodosh, and Julia Hockenmaier. From image descriptions to visual denotations: New similarity metrics for semantic inference over event descriptions. Transactions of the Association for Computational Linguistics, 2:67-78, 2014. 6, 7", + "[65] Jiahui Yu, Zirui Wang, Vijay Vasudevan, Legg Yeung, Mojtaba Seyedhosseini, and Yonghui Wu. Coca: Contrastive captioners are image-text foundation models. Transactions on Machine Learning Research, 2022. 3", + "[66] Xiaohua Zhai, Xiao Wang, Basil Mustafa, Andreas Steiner, Daniel Keysers, Alexander Kolesnikov, and Lucas Beyer." + ], + "bbox": [ + 78, + 90, + 468, + 900 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Lit: Zero-shot transfer with locked-image text tuning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18123-18133, 2022. 3", + "[67] Yuhao Zhang, Hang Jiang, Yasuhide Miura, Christopher D Manning, and Curtis P Langlotz. Contrastive learning of medical visual representations from paired images and text. arXiv preprint arXiv:2010.00747, 2020. 3", + "[68] Bolei Zhou, Hang Zhao, Xavier Puig, Tete Xiao, Sanja Fidler, Adela Barriuso, and Antonio Torralba. Semantic understanding of scenes through the ade20k dataset. International Journal on Computer Vision, 2018. 2, 3, 6" + ], + "bbox": [ + 501, + 92, + 890, + 246 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "2786", + "bbox": [ + 482, + 945, + 514, + 955 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/2023/iCLIP_ Bridging Image Classification and Contrastive Language-Image Pre-Training for Visual Recognition/3b34aaa2-686f-4539-b795-03e3193c1a2b_model.json b/2023/iCLIP_ Bridging Image Classification and Contrastive Language-Image Pre-Training for Visual Recognition/3b34aaa2-686f-4539-b795-03e3193c1a2b_model.json new file mode 100644 index 0000000000000000000000000000000000000000..350bceded799a433b50f34406a640e79e22cc7f1 --- /dev/null +++ b/2023/iCLIP_ Bridging Image Classification and Contrastive Language-Image Pre-Training for Visual Recognition/3b34aaa2-686f-4539-b795-03e3193c1a2b_model.json @@ -0,0 +1,2235 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.808, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.126, + 0.13, + 0.844, + 0.176 + ], + "angle": 0, + "content": "iCLIP: Bridging Image Classification and Contrastive Language-Image Pre-training for Visual Recognition" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.203, + 0.842, + 0.258 + ], + "angle": 0, + "content": "Yixuan Wei\\(^{1,2}\\), Yue Cao\\(^{2*}\\), Zheng Zhang\\(^{2}\\), Houwen Peng\\(^{2}\\), Zhuliang Yao\\(^{1,2}\\), Zhenda Xie\\(^{1,2}\\), Han Hu\\(^{2}\\), Baining Guo\\(^{2}\\) \n\\(^{1}\\)Tsinghua University \\(^{2}\\)Microsoft Research Asia" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.292, + 0.314, + 0.308 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.324, + 0.474, + 0.643 + ], + "angle": 0, + "content": "This paper presents a method that effectively combines two prevalent visual recognition methods, i.e., image classification and contrastive language-image pre-training, dubbed iCLIP. Instead of naïve multi-task learning that use two separate heads for each task, we fuse the two tasks in a deep fashion that adapts the image classification to share the same formula and the same model weights with the language-image pre-training. To further bridge these two tasks, we propose to enhance the category names in image classification tasks using external knowledge, such as their descriptions in dictionaries. Extensive experiments show that the proposed method combines the advantages of two tasks well: the strong discrimination ability in image classification tasks due to the clean category labels, and the good zero-shot ability in CLIP tasks ascribed to the richer semantics in the text descriptions. In particular, it reaches \\(82.9\\%\\) top-1 accuracy on IN-1K, and meanwhile surpasses CLIP by \\(1.8\\%\\), with similar model size, on zero-shot recognition of Kornblith 12-dataset benchmark. The code and models are publicly available at https://github.com/weiyx16/iCLIP." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.672, + 0.21, + 0.687 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.698, + 0.47, + 0.864 + ], + "angle": 0, + "content": "Image classification is a classic visual problem whose goal is to classify images into a fixed set of pre-defined categories. For example, the widely used ImageNet dataset [8] carefully annotated 14 million images and categorize them into 21,841 categories chosen from the WordNet [36]. For image classification, each category provides a clear taxonomy that groups images of the same category together and separates images from different categories, and thus endows the learnt representation with strong discriminant ability. However, this classification ability is limited to a fixed set of categories [8, 29, 51]." + }, + { + "type": "image", + "bbox": [ + 0.551, + 0.295, + 0.831, + 0.476 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.487, + 0.895, + 0.64 + ], + "angle": 0, + "content": "Figure 1. An illustration of the proposed iCLIP framework. The iCLIP framework can take two types of annotations for training: classes and alt-texts. It converts the conventional image classification formula to share the same text encoder and the same cosine classifier as that used in the contrastive language-image pretraining (CLIP). It also uses a dictionary-enhanced approach to enrich the original class names in the image classification problem with external information involved in dictionaries. The deep fusion and knowledge-enriched classes both greatly improve the performance compared to naive multi-task learning or performing one of the two tasks alone." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.66, + 0.895, + 0.903 + ], + "angle": 0, + "content": "Recently, the method that learns to contrast image-text pairs, known as contrastive language-image pre-training (abbr. CLIP), has well made up such shortage of the conventional image classification methods to achieve strong zero-shot recognition ability [24, 44]. These methods employ a contrastive learning framework, where images and their corresponding alt-texts are treated as positive pairs, while images with all other alt-texts are treated as negative pairs. Thanks to the rich semantics involved in the alt-texts, the images can be weakly connected to almost arbitrary categories that already appear in the alt-texts, resulting in its zero-shot ability. A drawback is that the image-text pairs are usually crawled from the internet without human labeling, leading to their noisy and ambiguous nature. Thus the learnt representations are often not conceptual compact, and may lack certain discriminative ability." + }, + { + "type": "page_footnote", + "bbox": [ + 0.078, + 0.875, + 0.47, + 0.9 + ], + "angle": 0, + "content": "*Corresponding Author. The work is done when Yixuan Wei, Zhuliang Yao, and Zhenda Xie are interns at Microsoft Research Asia." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "2776" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.288 + ], + "angle": 0, + "content": "This paper explores how to effectively combine these two powerful visual recognition and representation learning methods, to take advantages of both methods and data sources while relieving their shortages. We first try a naive multi-task learning framework that applies the original head networks of the two tasks on top of a shared visual encoder, and jointly learn the network with separate losses of the two tasks. This naive multi-task learning approach has been able to benefit each individual tasks, but the effect is marginal. We thus seek to fuse the two tasks more deeply, so that the advantages of the two tasks can be more effectively joined for better visual recognition, as well as for better transferable representations." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.29, + 0.473, + 0.548 + ], + "angle": 0, + "content": "To this end, our first technique is to deeply unify the formulations of image classification and CLIP learning. By examining their formulations, we found there are two main differences: 1) Different classification losses. Image classification tasks typically use a linear classification loss which has better fitting ability due to the non-normalized nature, while the CLIP-based methods adopt a cosine classifier which has better transferability for new domains and categories [2, 6, 9, 18, 38, 57]. 2) Different parameterization methods for classifier weights. Image classification tasks usually directly optimize the parametric classification weights without a need to process text semantics in class names. The CLIP method can be regarded as generating classifier weights through a text encoder and learns the text encoder instead. The text-encoder-based classifier allows sharing between alt-texts as well as modeling their relationships, which enables the ability to tackle any classes." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.551, + 0.473, + 0.763 + ], + "angle": 0, + "content": "Although the linear classifier and direct classifier weight parameterization have been common practice in image classification for many years, it is interesting to find that changing the old formulation as that in the CLIP approach has almost no performance degradation for pure image classification problems. This indicates that we can directly adapt the image classification formulation to the cosine classifier and the text encoder parameterization used by CLIP, with almost no loss. This also allows us to further share the text encoder for both class names and alt-texts. Our experiments show that this deep fusion approach performs much better than the naive multi-task method for both in-domain/zero-shot classification and multi-modal retrieval tasks learning (see 3)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.765, + 0.473, + 0.903 + ], + "angle": 0, + "content": "Another gap between the image classification and CLIP lies in the different text richness. Class names are usually in short, i.e., one or a few words, and sometimes are even ambiguous and polysemous in referring to specific semantics, for example, \"night bird\" can represents either \"owl\" or \"nightingale\". On the contrary, alt-texts in CLIP are usually full sentences containing rich information. To further bridge the gap between the image classification and CLIP, we propose a second technique that leverages the knowledge" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.242 + ], + "angle": 0, + "content": "base to enhance the original class names, such as the explanations in dictionaries. In our implementation, knowledge is simply encoded as a prefix/suffix prompt, as illustrated in Fig 1. Although simple, dictionary enhanced method shows to maintain the accuracy for pure image classification problem (see Table 1), while greatly improve the zero-shot and multi-modal retrieval performance as shown in Table 2 and 3. Note the process is just like human beings who learn new words or concepts through both real examples and explanations in dictionaries." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.243, + 0.893, + 0.514 + ], + "angle": 0, + "content": "By these techniques, we present a framework that deeply fuses the two important tasks of image classification and contrastive language-image pre-training, dubbed iCLIP. Extensive experiments using different combinations of image classification and image-text pair datasets show that the iCLIP method can take advantages of both the discriminative power of image classification tasks and the zero-shot ability in CLIP-like tasks, and perform significantly better than conducting each task alone or the naive multi-task learning in both the in-domain/zero-shot classification and multi-modal retrieval problems. The iCLIP method also shows that learning a stronger transferable representation than using each of the two tasks alone, verified on a variety of downstream tasks, including ADE20K semantic segmentation [68], LVIS long-tail detection [17], and video action recognition [26], as well as different evaluation settings of few-shot and fine-tuning. Our contributions are summarized as follows:" + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.524, + 0.892, + 0.57 + ], + "angle": 0, + "content": "- We combined two important vision tasks of image classification and contrastive language-image pretraining into a single framework." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.58, + 0.892, + 0.685 + ], + "angle": 0, + "content": "- We found that the original image classification formulation can be adapted to CLIP approach with almost no performance degradation. With this finding, we present a deep fusion approach in which the two tasks share the same text encoder and the same classifier type, whose effectiveness is extensively verified on benchmarks." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.696, + 0.892, + 0.802 + ], + "angle": 0, + "content": "- We proposed a simple yet effective method to introduce knowledge bases into image classification, addressing the ambiguous and polysemous issue of the originally short image names as well as further bridges the gap between classes and alt-texts. It also provides the first showcase of applying knowledge bases into computer vision problems." + }, + { + "type": "list", + "bbox": [ + 0.518, + 0.524, + 0.892, + 0.802 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.815, + 0.642, + 0.831 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Supervised visual classification. Classification is almost ubiquitous for visual understanding tasks of various recognition granularity, e.g., image-level classification [12, 20, 28, 33, 49, 52, 58], object-level classification in" + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "2777" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.258 + ], + "angle": 0, + "content": "object detection [3, 15, 19, 45], pixel-level classification in semantic/instance segmentation [5, 35, 63], and video-level action classification [4, 13, 34, 43, 54]. In these tasks, the data is manually annotated to a fixed set of classes, e.g., the 1,000-class ImageNet-1K dataset [8], the 80-class COCO detection dataset [31], the 150-class ADE20K segmentation dataset [68], etc. Among these classification tasks, the image-level classification is particularly important, which has greatly advances the success of deep learning in computer vision, thanks to its high quality and transferable discriminative representations." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.259, + 0.472, + 0.351 + ], + "angle": 0, + "content": "The supervised visual classification is generally performed as a \\(K\\)-way classification problem without considering the text semantics of the class names. The most common classifier is the linear classifier, where the classifier vector of each category is parameterized as model weights and is directly learnt through optimization [28]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.352, + 0.473, + 0.534 + ], + "angle": 0, + "content": "Contrastive language-image pre-training. Pioneered by CLIP [44] and Align [24], the contrastive language-image pre-training is now attracting more and more attention due to its strong zero-shot transfer capacity. These methods learn a network to pair an image and its associated alt-text, in which the image-text pairs are crawled from the Internet. With web-scale alt-text, it is possible to cover almost all classes, and these methods do show to perform very well for zero-shot recognition. In their frameworks, the images and texts are embedded using two separate encoders, and the output representations of the images and alt-texts are contrasted according to the positive and negative pairs." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.535, + 0.473, + 0.672 + ], + "angle": 0, + "content": "While prior to CLIP and Align, there have been a few early works leveraging alt-text or text encoders for image recognition [10,14,16,25,41,46,67]. More follow-up works appeared after CLIP and Align, including Filip [62], DeClip [30], BASIC [42], LiT [66], LiMoE [39], TCL [60], and so on. A drawback of these method is that the image-text pairs are usually noisy without human labeling, leading to the learned representations are not conceptual compact, lacking strong discrimination ability." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.673, + 0.473, + 0.809 + ], + "angle": 0, + "content": "Introducing knowledge into AI systems. Our approach is also related to the expert systems in 1980s which heavily rely on a knowledge base for reasoning [23]. Recently, in natural language process, there also emerges boosting large-scale pretrained models by making use of encyclopedic [1,55] and commonsense knowledge [50]. However, in computer vision, the knowledge bases is not well explored. We hope our findings can encourage more attention to incorporate human knowledge into current vision systems." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.811, + 0.473, + 0.903 + ], + "angle": 0, + "content": "Combination of representation learning. Regarding individual strengths of different representation learning approaches, there have been several works trying to combine different representation learning approaches so as to take advantages of individuals' strength. For example, SLIP [37] combines CLIP learning with a self-supervised contrastive" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.895, + 0.212 + ], + "angle": 0, + "content": "learning approach. CoCa [65] combines the CLIP target with an image caption task, in hope to perform well for both understanding and generation problems. MaskCLIP [11] combines CLIP with masked image modeling based self-supervised learning. In contrast, our work also aims to effectively combine different representation learning approaches so as to take both advantages, specifically, the image classification and CLIP." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.212, + 0.895, + 0.501 + ], + "angle": 0, + "content": "Relationship to UniCL [61] Concurrent to our work, there is another work named UniCL [61] which also combines image classification with language-image pretraining. We hope the consistent knowledge will help the community in learning more powerful representations. Also note that there are two main differences comparing our framework to the UniCL framework [61]: 1) We involve all negative classifiers in training the supervised classification, while UniCL only involve negatives in a same batch. To make feasible all negative classifiers, we propose a GPU-distributed implementation that distributes the classifiers evenly into different GPUs. Our implementations show to have better in-domain accuracy compared to UniCL when the category number is as large as tens of thousands (76.3% vs. 70.5% as shown in Tab. 4). 2) We introduce a new dictionary enhanced approach to convert the class names with rich semantical text, which shows to be very beneficial for zero-shot image classification and multi-modal retrieval (see Tab. 2)." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.511, + 0.593, + 0.527 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.537, + 0.894, + 0.629 + ], + "angle": 0, + "content": "In this section, we first review existing methods on image classification and contrastive language-image pre-training tasks. Then, we propose a unified framework to bridge the two tasks in a deep fusion fashion. Finally, we introduce dictionary-enhanced category descriptions to further align the two taks on input label space." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.636, + 0.642, + 0.65 + ], + "angle": 0, + "content": "3.1. Preliminaries" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.659, + 0.895, + 0.858 + ], + "angle": 0, + "content": "Image Classification. Given a set of \\(<\\) image, category label> pairs, i.e., \\(\\mathcal{D}^c = \\{(I_i,C_i)\\}_{i=1}^{|\\mathcal{D}^c|}\\), image classification task targets to predict the category label of a given image, through a visual encoder \\(f_v\\), and a parametric category classifier \\(h_c\\), illustrated in Fig. 2 (b). The parameters of \\(h_c\\) is a matrix \\(W \\in \\mathcal{R}^{N \\times H}\\), where \\(N\\) is the number of categories and \\(H\\) is the dimension of visual embeddings. The visual encoder \\(f_v\\) transforms each raw image \\(I_i\\) to an embedding \\(v_i = f_v(I_i)\\), while the classifier \\(h_c\\) predicts the distribution \\(P_i \\in \\mathcal{R}^N\\) over all pre-defined categories via an inner product between \\(W\\) and \\(v_i\\), i.e., \\(P_i = W \\cdot v_i\\) (bias term is omitted for simplicity). Finally, a cross entropy is applied on \\(P_i\\) and \\(C_i\\) to calculate training loss, which is formulated as:" + }, + { + "type": "equation", + "bbox": [ + 0.543, + 0.865, + 0.895, + 0.906 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = \\frac {- 1}{| \\mathcal {D} ^ {c} |} \\sum_ {(I _ {i}, C _ {i}) \\in \\mathcal {D} ^ {c}} \\log \\frac {\\exp (W _ {C _ {i}} \\cdot v _ {i})}{\\sum_ {j = 1} ^ {N} \\exp (W _ {j} \\cdot v _ {i})}, \\qquad (1)\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "2778" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.119, + 0.079, + 0.837, + 0.245 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.075, + 0.251, + 0.895, + 0.308 + ], + "angle": 0, + "content": "Figure 2. An illustration of iCLIP framework. \\(\\mathcal{B}\\) is the batch size, \\(N\\) is the number of categories and \\(G\\) is the number of gpus. iCLIP unifies both contrastive language-image pre-training and classification tasks with shared text and visual encoder, taking alt-texts or dictionary enhanced class names as annotations. To reduce the computation, iCLIP distributes the enhanced class names over all gpus in forward, and gathers the embeddings for similarity calculation." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.322, + 0.421, + 0.337 + ], + "angle": 0, + "content": "where \\( W_{j} \\) is the parametric weight of \\( j \\)-th category." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.337, + 0.47, + 0.474 + ], + "angle": 0, + "content": "Contrastive Language-Image Pre-training. Given a set of \\(<\\) image, alt-text> pairs, i.e., \\(\\mathcal{D}^a = \\{(I_i,T_i^a)\\}_{i=1}^{|\\mathcal{D}^a|}\\), contrastive language-image pre-training targets to close the distances between paired image and text while enlarging those of unpaired ones, through a visual encoder \\(f_v\\) and a text encoder \\(f_t\\), shown in Fig. 2 (a). They transform the image \\(I_i\\) and the alt-text \\(T_i^a\\) to feature embeddings \\(v_i\\) and \\(s_i\\), respectively. A contrastive loss function is applied to shrink the cosine distance of \\(v_i\\) and \\(s_i\\), which is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.091, + 0.482, + 0.47, + 0.526 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = \\frac {- 1}{| \\mathcal {D} ^ {a} |} \\sum_ {\\substack {(I _ {i}, T _ {i} ^ {a}) \\\\ \\in \\mathcal {D} ^ {a}}} \\log \\frac {\\exp \\left(\\cos \\left(f _ {t} \\left(T _ {i} ^ {a}\\right) , v _ {i}\\right) / \\tau\\right)}{\\sum_ {T _ {j} ^ {a} \\in \\mathcal {T} ^ {a}} \\exp \\left(\\cos \\left(f _ {t} \\left(T _ {j} ^ {a}\\right) , v _ {i}\\right) / \\tau\\right)}, \\tag{2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.536, + 0.47, + 0.596 + ], + "angle": 0, + "content": "where \\(\\cos (\\cdot ,\\cdot)\\) represents the cosine similarity between two embeddings, \\(\\mathcal{T}^a\\) is all the alt-texts in a batch including one positive paired alt-text and \\(|\\mathcal{T}^a| - 1\\) negative ones, and \\(\\tau\\) is a temperature hyper-parameter to scale the similarities." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.596, + 0.471, + 0.763 + ], + "angle": 0, + "content": "Task differences. Comparing the formations of image classification and language-image pre-training, we can draw three main differences between them. 1) Training loss functions. Classification commonly adopts a cross-entropy loss on inner-product similarity, while image-text learning uses InfoNCE loss on cosine similarity. 2) Classifier types. Classification adopts a parametric category classifier, while image-text learning uses a text encoder. 3) Label granularity. Category names in classification are usually very short, i.e., one or few words, while the captions in image-text pretraining are full sentences containing rich semantics." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.772, + 0.47, + 0.804 + ], + "angle": 0, + "content": "3.2. Bridge Image Classification and Contrastive Language-Image Pre-training" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.81, + 0.469, + 0.87 + ], + "angle": 0, + "content": "To bridge image classification and image-text alignment, we introduce three adaptations to align their training losses, unify the classifier types, and close the label granularity gap. The overall adaption is visualized in Fig. 3." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.871, + 0.47, + 0.902 + ], + "angle": 0, + "content": "Classification with Text Encoder. As formulated in Eq. (1), image classification commonly adopts a cross-" + }, + { + "type": "image", + "bbox": [ + 0.507, + 0.318, + 0.895, + 0.469 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.478, + 0.893, + 0.548 + ], + "angle": 0, + "content": "Figure 3. An illustration of our approach to bring image classification (a) to CLIP (b), from the perspective of loss function, classifier types and label granularity. We reformulate the linear classifier (a.1) with a text-encoder-based classifier (a.2), and enhance the class names with a text description from the dictionary (a.3)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.562, + 0.893, + 0.682 + ], + "angle": 0, + "content": "entropy loss on top of the inner-product similarity between the visual embedding \\( v_{i} \\) and the parametric classifier \\( h_c \\). This formulation is not in line with the InfoNCE loss in Eq. (2), leading to a misalignment between the two paradigms. To address this issue, we adopt a cosine similarity for image classification, instead of the original inner-product similarity in Eq. (1), which formulates a cosine classifier as:" + }, + { + "type": "equation", + "bbox": [ + 0.52, + 0.689, + 0.892, + 0.726 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = \\frac {- 1}{| \\mathcal {D} ^ {c} |} \\sum_ {\\left(I _ {i}, C _ {i}\\right) \\in \\mathcal {D} ^ {c}} \\log \\frac {\\exp \\left(\\cos \\left(W _ {C _ {i}} , v _ {i}\\right) / \\tau\\right)}{\\sum_ {j = 1} ^ {N} \\exp \\left(\\cos \\left(W _ {j} , v _ {i}\\right) / \\tau\\right)}. \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.734, + 0.894, + 0.825 + ], + "angle": 0, + "content": "Cosine similarity is a common practice in metric learning [40]. It can smoothly align the supervised image classification with the cross-modal contrastive pre-training in terms of learning objective function, i.e., Eq. (2). Moreover, our experiments demonstrate that this cosine classifier performs on par with the traditional linear classifier (see Tab. 1)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.826, + 0.894, + 0.901 + ], + "angle": 0, + "content": "The cosine classifier aligns the training losses of two tasks. However, the annotations, i.e., category labels and captions, are modeled separately by the parametric category classifier \\( h_c \\) and the text encoder \\( f_t \\). As analyzed in Sec. 4.3, shallowly combining the two tasks with a shared" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "2779" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.152 + ], + "angle": 0, + "content": "visual encoder \\( f_{v} \\) and two separate task heads does not fully take advantage of the gold annotations in image classification and rich concepts in textual captions, resulting in a suboptimal solution with limited transferring capacity." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.152, + 0.471, + 0.258 + ], + "angle": 0, + "content": "To tackle this issue, we take label semantics into consideration and propose to utilize the text encoder \\( f_{t} \\) as a meta classifier for image classification. Formally, we replace the label index \\( C_i \\) with its class name \\( M_{i} \\), and generate the classifier weight \\( W \\) on-the-fly through the text encoder \\( f_{t} \\) which is shared with image-text pre-training. The new formulation is represented as:" + }, + { + "type": "equation", + "bbox": [ + 0.095, + 0.267, + 0.47, + 0.311 + ], + "angle": 0, + "content": "\\[\n\\mathcal{L} = \\frac{-1}{|\\mathcal{D}^{c}|}\\sum_{\\substack{(I_{i},M_{i})\\\\ \\in \\mathcal{D}^{c}}}\\log \\frac{\\exp\\left(\\cos\\left(f_{t}\\left(M_{i}\\right),v_{i}\\right) / \\tau\\right)}{\\sum_{j = 1}^{N}\\exp\\left(\\cos\\left(f_{t}\\left(M_{j}\\right),v_{i}\\right) / \\tau\\right)}. \\tag{4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.32, + 0.469, + 0.395 + ], + "angle": 0, + "content": "In this way, the text encoder \\( f_{t} \\) is not only used to extract semantics from gold category labels, but also capture textual information from image captions. Both the visual and textual encoders are shared across the two tasks, leading to a deep fusion of the two tasks." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.397, + 0.47, + 0.636 + ], + "angle": 0, + "content": "Classification with Dictionary Enhancement. The cosine classifier with text encoder as a meta network has largely unify the two tasks in model training. In this step, we further align them on input label granularity, reducing the disparity between label names (one or few words) and image captions (a complete sentence). Our proposal is to integrate external knowledge into label names. More specifically, for each label names, we introduce detailed descriptions from its corresponding synset in the dictionary WordNet [36] as the external knowledge and create a pseudo sentence as label for each categories. We combine the original class names and their dictionary descriptions to form the enhanced texts as the input to the text encoder. Also, we add a prompt to make the sentence more fluent. The final dictionary-enhanced description for each category is formed as:" + }, + { + "type": "equation", + "bbox": [ + 0.1, + 0.648, + 0.469, + 0.666 + ], + "angle": 0, + "content": "\\[\n\\mathcal {T} ^ {c} = \\mathrm {A p h o t o o f a} \\left\\{\\mathrm {N A M E} \\right\\} _ {C _ {i}}, \\left\\{\\mathrm {D E S C R I P T I O N} \\right\\} _ {C _ {i}}. \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.675, + 0.47, + 0.87 + ], + "angle": 0, + "content": "Such dictionary-enhanced descriptions have similar label granularity to alt-text, and thus further bring image classification closer to image-text alignment. Moreover, the description introduces more details of each category, being capable of reducing potential misconception. For example, the class \"night bird\" actually includes several kinds of birds, like owl, nightingale, etc. Such a category name cannot allow the model to learn precise representations due to the blurry concepts. If we augment the category with more external knowledge, such as \"a photo of a night bird, any bird associated with night: owl, nightingale, nighthawk\", it will help the model learn discriminative representation on distinguishing different concepts (e.g., bird species)." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.871, + 0.469, + 0.902 + ], + "angle": 0, + "content": "A Unified Framework. The above three steps adapt image classification to image-text alignment from the perspec" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.168 + ], + "angle": 0, + "content": "tive of training loss, classifier type and annotation granularity, respectively. Towards the final unification, we propose a new framework dubbed iCLIP, as presented in Fig. 2 (c), which bridges Image Classification and Image-Text Alignment with a unified contrastive learning loss formulated as:" + }, + { + "type": "equation", + "bbox": [ + 0.518, + 0.185, + 0.892, + 0.226 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = \\frac {- 1}{| \\mathcal {D} |} \\sum_ {\\left(I _ {i}, T _ {i}\\right) \\in \\mathcal {D}} \\log \\frac {\\exp \\left(\\cos \\left(f _ {t} \\left(T _ {i}\\right) , v _ {i}\\right) / \\tau\\right)}{\\sum_ {T _ {j} \\in \\tau} \\exp \\left(\\cos \\left(f _ {t} \\left(T _ {j}\\right) , v _ {i}\\right) / \\tau\\right)}, \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.231, + 0.892, + 0.442 + ], + "angle": 0, + "content": "where \\(\\mathcal{D}\\) is a set consisting of the image classification data \\(D^c\\) and the image-text alignment data \\(D^a\\), i.e., \\(\\mathcal{D} = \\{\\mathcal{D}^c, D^a\\}\\), while \\(\\mathcal{T}\\) indicates a combination of \\(T^c\\) and \\(T^a\\), i.e., \\(\\mathcal{T} = \\{\\mathcal{T}^c, T^a\\}\\). Text label \\(T_i\\) is either an image caption \\(T_i^a\\) sampled from \\(T^a\\) or a dictionary-enhanced description \\(T_i^c\\) sampled from \\(T^c\\). It is worth noting that, with this unified framework, both the text encoder \\(f_t\\) and the visual encoder \\(f_v\\) are shared across the two tasks, achieving a deep fusion. The proposed unified framework is able to leverage any combination of tag-labeled and caption-labeled image datasets for pre-training. This combination allows the model to learn more discriminative representation, while capturing more visual concepts from the textual description. On the other hand, our iCLIP method is efficient." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.443, + 0.892, + 0.639 + ], + "angle": 0, + "content": "Distributed Implementation. In our iCLIP framework, the text embedding of each category is generated by the shared text encoder on-the-fly. This computation is affordable when the number of categories \\( N \\) is not large. However, it will become infeasible if category number scales up to be large, such as 22k categories in ImageNet-22K [8]. To make the iCLIP framework feasible for large-category classification data in practice, we adopt a distribution implementation strategy [6]. Specifically, we distribute all the enhanced class names evenly over \\( G \\) GPUs in forward, and gather the embeddings from eachgpu for similarity calculation, reducing the computation cost and saves memory consumption by the text encoder to \\( 1 / G \\)." + }, + { + "type": "table_caption", + "bbox": [ + 0.498, + 0.651, + 0.892, + 0.679 + ], + "angle": 0, + "content": "Table 1. Ablation on formulation adaptations for image classification task. Models are trained with 100 epochs." + }, + { + "type": "table", + "bbox": [ + 0.551, + 0.684, + 0.837, + 0.769 + ], + "angle": 0, + "content": "
#Cosine LossText-enc. as ClassifierEnhanced classesIN-1K
180.9
281.5
381.2
481.4
" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.777, + 0.625, + 0.794 + ], + "angle": 0, + "content": "4. Experiment" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.8, + 0.892, + 0.905 + ], + "angle": 0, + "content": "We verify the effectiveness of the proposed iCLIP framework through the comparisons to single-task baselines and a naive multi-task learning baseline. The comparisons are conducted in three settings covering different scales of pretraining data. In evaluation, we assess the models on different tasks, including in-domain classification, zero-shot classification, multi-modal retrieval, and downstream tasks." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "2780" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.082, + 0.894, + 0.113 + ], + "angle": 0, + "content": "Table 2. Ablation study conducted on IN-22K [8] and YFCC-14M [53]. Models are pre-trained from scratched with 32 epochs following UniCL [61]. COCO and Flickr stand for MSCOCO [31] and Flickr30K [64]. IR and TR stand for image retrieval and text retrieval." + }, + { + "type": "table", + "bbox": [ + 0.088, + 0.117, + 0.877, + 0.261 + ], + "angle": 0, + "content": "
Zero-shot classificationZero-shot retrieval
#Training DataMethodIN-1K14-dataset avg.Flickr-IRFlickr-TRCOCO-IRCOCO-TR
1YFCC-14MCLIP [44]30.136.321.537.912.521.2
2YFCC-14M (half) + IN-21K (half)iCLIP (w/o Desc.)39.445.427.639.113.020.4
3YFCC-14M (half) + IN-21K (half)iCLIP45.949.931.949.815.527.2
4YFCC-14M + IN-21KiCLIP (w/o Desc.)41.149.433.451.216.326.5
5YFCC-14M + IN-21KiCLIP50.954.437.155.718.530.7
6YFCC-14M + IN-22KiCLIP (w/o Desc.)76.251.633.248.214.423.8
7YFCC-14M + IN-22KiCLIP76.355.536.255.318.029.7
" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.274, + 0.269, + 0.292 + ], + "angle": 0, + "content": "4.1. Experimental Setup" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.298, + 0.471, + 0.33 + ], + "angle": 0, + "content": "Pre-training data and settings. We consider three different scales of dataset combination for model pre-training." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.334, + 0.473, + 0.516 + ], + "angle": 0, + "content": "- ImageNet-1K [8] and GCC-3M [48]. In this setting, we use ImageNet-1K as the classification data while GCC-3M as the image-text data. We adopt a Swin-T [33] initialized with MoBY [59] as the visual encoder, while for the textual encoder, we use a pretrained RoBERTa-B [32]. We sample half number of images from each dataset in a mini-batch and train the models with a batch size of \\(128 \\times 8\\) V100 GPUs for 100 epochs. The highest learning rate is 2e-4 with a cosine learning rate schedule and 5 epochs warm-up. Weight decay is set to be 0.01. RandAugment [7] and stochastic depth [21] with a rate of 0.1 are used for visual encoder only." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.519, + 0.473, + 0.793 + ], + "angle": 0, + "content": "- ImageNet-22K [8] and YFCC-14M [53]. We follow UniCL [61] to train all models from scratch with 32 epochs for a fair comparison with it. Swin-T [33] is used as the visual encoder, and a 12-layer transformer with a hidden dimension of 512 same as CLIP [44] is used as the text encoder. A batch size of \\(512 \\times 16\\) GPUs is adopted. The highest learning rate is selected from 2e-4 and 8e-4. Other regularization is the same as previous, except for a larger weight decay of 0.05. We also conduct experiments using two variants of this setup for a fair and clean comparison with the methods that use one task alone (IC or CLIP): 1) Excluding the 1,000 ImageNet-1K classes in ImageNet-22K dataset (dubbed IN-21K). This setup variant allows us to evaluate the zero-shot accuracy on ImageNet-1K for different methods; 2) Half images of the ImageNet-21K and YFCC-14M are used, such that the dataset size and training iterations are the same as that used in one single task." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.796, + 0.472, + 0.902 + ], + "angle": 0, + "content": "- ImageNet-22K [8] and Laion-400M [47]. For this large-scale pre-training setting, we adopt a Swin-B initialized with MoBY as the visual encoder and a pre-trained RoBERTa-B as the text encoder. We train iCLIP for 100K iters, with a batch size of \\(192 \\times 64\\) V100 GPUs. In each mini batch, we sample 64 images from IN-22K and 128 images from Laion-400M. The model is trained on" + }, + { + "type": "list", + "bbox": [ + 0.084, + 0.334, + 0.473, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.52, + 0.275, + 0.892, + 0.351 + ], + "angle": 0, + "content": "classification data for around 30 epochs and on image-text data for around 2 epochs equivalently. The highest learning rate is 1e-3 with a cosine learning rate schedule and a warm-up for \\(16.7\\mathrm{K}\\) liters. Weight decay is set to 0.05 and drop depth rate is set to 0.2." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.362, + 0.892, + 0.393 + ], + "angle": 0, + "content": "Evaluation datasets and settings. During evaluation, we assess the models considering five different settings." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.397, + 0.892, + 0.503 + ], + "angle": 0, + "content": "- Zero-shot classification. We evaluate the concept coverage and generalization ability of the models on three datasets: 1) ImageNet-1K variants, including IN-1K [8], and IN-Sketch (IN-S) [56]. Top-1 accuracy is reported; 2) the widely-used Kornblith 12-dataset benchmark [27]; 3) 14 datasets used in UniCL [61]. For 2) and 3), averaged accuracy is reported." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.505, + 0.892, + 0.58 + ], + "angle": 0, + "content": "- Zero-shot multi-modal retrieval. Flickr30K [64] (1K test set) and MSCOCO [31] (5K test set) are used to evaluate the alignment between image and text modalities. We report the Top-1 recall on both image retrieval (IR) and text retrieval (TR)." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.583, + 0.892, + 0.644 + ], + "angle": 0, + "content": "- In-domain classification. ImageNet-1K data is included in some of our pre-training setups, so we conduct indomain evaluation on ImageNet-1K in these cases. The Top-1 accuracy is reported." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.646, + 0.892, + 0.707 + ], + "angle": 0, + "content": "- Few-shot classification. Following CLIP [44], we also evaluate the models on few-shot classification task using Kornblith 12-dataset with a frozen visual encoder. Averaged accuracy is reported." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.709, + 0.892, + 0.814 + ], + "angle": 0, + "content": "- Fine-tuning on downstream tasks. To validate the generalization ability of iCLIP, the models are fine-tuned and compared on semantic segmentation [68], long-tail detection [17], and video action recognition [26]. We report val mIoU,_bbox mAP and Top-1 accuracy, respectively. The detailed settings can be found in the supplementary material." + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.397, + 0.892, + 0.814 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.832, + 0.862, + 0.85 + ], + "angle": 0, + "content": "4.2. Experiments on IN-1K [8] and CC3M [48]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.856, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Formulation adaptations for image classification. Tab. 1 ablates the effect of adapting the common image classification to that used in iCLIP, including both cosine loss, the" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.514, + 0.957 + ], + "angle": 0, + "content": "2781" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.089, + 0.472, + 0.146 + ], + "angle": 0, + "content": "Table 3. Ablation conducted on IN-1K [8] and GCC-3M [48] combined data. For the models only using IN-1K, we train them for 100 epochs. For the models only using GCC-3M, we train them with the same iterations and batch size as the ones used in IN-1K." + }, + { + "type": "table", + "bbox": [ + 0.107, + 0.156, + 0.444, + 0.257 + ], + "angle": 0, + "content": "
12-datasetImageNet-related
#Methodavg.IN-1KIN-S
1Sup-only-80.929.4
2VL-only31.432.418.3
3Naïve multi-task35.180.638.3
4iCLIP (w/o Desc.)37.780.538.6
5iCLIP39.180.438.7
" + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.271, + 0.47, + 0.3 + ], + "angle": 0, + "content": "Table 4. Comparison with UniCL. Models are pre-trained from scratched with 32 epochs, following UniCL [61]." + }, + { + "type": "table", + "bbox": [ + 0.1, + 0.31, + 0.449, + 0.413 + ], + "angle": 0, + "content": "
#Training DataMethodIN-1K14-dataset avg.
1YFCC + IN-21K (half)UniCL [61]36.445.5
2YFCC + IN-21K (half)iCLIP45.949.9
3YFCC + IN-21KUniCL [61]40.549.1
4YFCC + IN-21KiCLIP50.954.4
5YFCC + IN-22KUniCL [61]70.552.4
6YFCC + IN-22KiCLIP76.355.5
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.432, + 0.47, + 0.568 + ], + "angle": 0, + "content": "text-encoder-based classifier and enhanced class names using ImageNet-1K dataset. It can be seen that the cosine classification loss gets slightly better performance than the linear one, with a \\(+0.6\\%\\) gain on IN-1K (see #1 v.s. #2). When further adapting the text-encoder-based classifier (#3) and enhancing class names from dictionaries (#4), it has almost no performance degradation (\\(+0.3\\%\\) and \\(+0.5\\%\\) on IN-1K compared to the linear classifier), which allows us to further sharing the text encoder with CLIP for tasks unification." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.569, + 0.471, + 0.902 + ], + "angle": 0, + "content": "Zero-shot and in-domain classification. With previous adaptations on the image classification formulation, we can further share the text encoder between the two tasks. To ablate the effect of sharing the text encoder, we set a naive multi-task baseline, that combines image classification and CLIP in a shallow fusion, i.e., simply averaging the loss Eq. (1) and Eq. (2). Each has its own head network, i.e., the fully-connected layer \\( W \\) for Eq. (1) and the text encoder \\( f_{t} \\) for Eq. (2). The best performances of the two heads are reported in Tab. 3. With a shared text encoder across the two tasks, our iCLIP (w/o Desc.) outperforms the naive multi-task on Kornblith 12-dataset zero-shot classification by \\( +2.6\\% \\) in average, while they are comparable on ImageNet-related datasets classification (see #3 v.s. #4). Our iCLIP deeply unifies two tasks, thus better gathering the merits of the two learning protocols. When compared with the supervised softmax classifier baseline, i.e., Eq. (1) Sup-only, and the contrastive image-text pre-training baseline, i.e., Eq. (2) VL-only, our method is slightly worse than Sup-only on IN-1K by \\( 0.4\\% \\), while achieves superior performance on other evaluation settings, \\( +6.3\\% \\) better than VL-only method on 12-dataset zero-shot testing and \\( +9.2\\% \\)" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.168 + ], + "angle": 0, + "content": "better than Sup-only method on IN-S (see #4 v.s. #1). Moreover, the dictionary enhancement on class names (#5) can further bring an average of \\(+1.4\\%\\) improvements on Kornblith 12-dataset, revealing the increased discriminative representation for ambiguous concepts." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.174, + 0.892, + 0.191 + ], + "angle": 0, + "content": "4.3. Experiments on IN-22K [8] and YFCC14M [53]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.198, + 0.892, + 0.349 + ], + "angle": 0, + "content": "Effects of the unified framework. Here, we further ablate the effect of the unified formulation for deep fusion of the two tasks. In #2, #4 and #6 of Tab. 2, we show the results of our unified framework under three different dataset combination setups. Compared with the CLIP baseline (#1), our iCLIP (#2) earns \\(+8.3\\%\\) gains on IN-1K zero-shot classification and also \\(+9.1\\%\\) improvements when evaluated on the 14-dataset. In addition, our iCLIP is better than the CLIP baseline on most cross-modal retrieval benchmarks, while only using half of visual-language data in pre-training." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.349, + 0.893, + 0.619 + ], + "angle": 0, + "content": "Effects of dictionary enhancement. Furthermore, we dissect the model to study the contributions of dictionary-enhanced category description. From Tab. 2, we can see that enhancing each class names with informative description from the dictionary brings consistent improvements on both zero-shot classification and zero-shot retrieval under three dataset combination setups (see #3, #5 and #7). In particular, when pre-trained with half images of YFCC-14M and IN-21K (#3), the integrated knowledge contributes \\(+6.5\\%\\) improvements on IN-1K zero-shot classification, which makes our iCLIP reach \\(45.9\\%\\), being \\(+5.4\\%\\) better than UniCL method [61] with full images of YFCC-14M and IN-21K (see #3 in Tab. 4). More importantly, the enhanced class names is beneficial to cross-modal retrieval. For example, for image-to-text search, the dictionary-enhanced description can bring \\(10.7\\%\\) and \\(6.8\\%\\) top-1 recall gains on Flickr30K [64] and MSCOCO [31] respectively, as reported in row 3 of Tab. 2." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.621, + 0.892, + 0.725 + ], + "angle": 0, + "content": "Comparison with UniCL [61]. Tab. 4 summaries our comparison to UniCL. The same as UniCL, we evaluate our models on IN-1K and 14 datasets. Under three different dataset combination setups, our iCLIP surpasses UniCL by at least \\(+5\\%\\) on IN-1K image classification, while reaching \\(55.5\\%\\) averaged accuracy on 14 datasets (#6), being \\(+3.1\\%\\) better than UniCL (#5)." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.727, + 0.887, + 0.743 + ], + "angle": 0, + "content": "4.4. Experiments on IN-22K and Laion-400M [47]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.75, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Zero-shot and in-domain classification. Tab. 5 presents a large scale experiment using the publicly accessible largescale data: Laion-400M [47] and IN-22K [8]. For Sup-only, i.e. Eq. (1), we use the released version from Swin [33], which is trained on IN-22K for 90 epochs. For VL-only, i.e. Eq. (2), we pre-train it on Laion-400M with a similar image numbers (#im). Our method is comparable to Sup only on IN-1K, while it gets \\(+17.8\\%\\) and \\(+8.3\\%\\) better results than the two baselines on IN-S, demonstrating its robustness to natural distribution shifts. Our iCLIP surpasses" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "2782" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.089, + 0.895, + 0.133 + ], + "angle": 0, + "content": "Table 5. Ablation study on IN-22K [8] and Laion-400M [47]. We evaluate the models on ImageNet datasets (IN-1K [8] and IN-S [56]) and zero-shot evaluation on the Kornblith 12-dataset benchmark [27]. Few-shot learning on Kornblith 12-dataset and the fine-tuning on three downstream tasks are conducted to evaluate the transfer capability of iCLIP. \\(\\ddagger\\) denotes for our reproduction using released checkpoints." + }, + { + "type": "table", + "bbox": [ + 0.088, + 0.143, + 0.887, + 0.27 + ], + "angle": 0, + "content": "
Visual encoderPre-trainImageNet-related12-dataset avg.downstream tasks
MethodArch.length (#im).IN-1KIN-S0-shot4-shotADE20KLVISKinetics400
CLIP [44]ViT-B/16400M×32 eps68.646.6‡68.866.4‡---
OpenCLIP [22]ViT-B/16400M×32 eps67.152.4‡70.9‡----
Sup-onlySwin-Base14M×90 eps82.642.0-67.652.135.982.7
VL-onlySwin-Base400M×3 eps61.151.567.273.352.036.682.3
iCLIPSwin-Base400M×2 eps+14M×30 eps82.959.870.678.152.637.983.1
" + }, + { + "type": "image", + "bbox": [ + 0.119, + 0.288, + 0.424, + 0.436 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.442, + 0.472, + 0.514 + ], + "angle": 0, + "content": "Figure 4. Major comparison with the CLIP-ViT-B/16 of few-shot classification (top-1 accuracy) on the Kornblith 12-dataset. \\(\\star\\) denotes the zero-shot performances. Results of CLIP on few-shot classification are reproduced using released model. We run every experiments three times and the averaged results are reported." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.536, + 0.47, + 0.673 + ], + "angle": 0, + "content": "OpenCLIP [22], which also uses Laion-400M data for pretraining [47], by more than \\(+15\\%\\) on IN-1K, mainly due to the pre-training data IN-22K covers the visual concepts in IN-1K. Moreover, when performing zero-shot evaluation on 12 datasets [27], our iCLIP model also achieves non-trivial improvements, e.g., an average of over \\(+3\\%\\) gains (VL-only in Tab. 5). In addition, our iCLIP is comparable to OpenCLIP on 12 datasets in average with fewer training time. More details are elaborated in the supplementary material." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.675, + 0.471, + 0.903 + ], + "angle": 0, + "content": "Few-shot classification We also conduct experiments in few-shot settings. Following CLIP [44], we freeze the visual encoder and append a linear probe layer for few-shot fine-tuning. We notice that the performance of CLIP [44] in few-shot classification cannot catch up with that of zero-shot classification, unless more than 4 examples per class are given, as presented in Fig. 4 (\\(\\star\\) v.s. \\(-\\bullet\\)). We conjecture the underlying reason is that the number of training samples is too limited to train a randomly initialized classifier. This situation can be alleviated by fine-tuning the pretrained text encoder, instead of the linear probe layer. In this way, text encoder is able to serve as a good initialization for few-shot classification, closing the gap between pretraining and fine-tuning. We evaluate such method on Kornblith 12-dataset benchmark [27] and report the results in Fig. 4." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.289, + 0.895, + 0.472 + ], + "angle": 0, + "content": "When only given one example per class, by utilizing text encoder as the classifier, our iCLIP achieve \\(73.9\\%\\) on 12-dataset in average, surpassing the original CLIP model by \\(+29.5\\%\\). Such one-shot recognition gets \\(+3.3\\%\\) gains over the zero-shot baseline (\\(\\star\\) v.s. \\(-\\bullet\\)), demonstrating good few-shot transfer ability. When using 16 examples per class, our model still performs superior to CLIP by \\(4.1\\%\\). Compared to supervised-only model and visual-linguistic only model, our unified contrastive learning pretrained model obtains \\(+24.6\\%\\) and \\(+6.1\\%\\) better accuracy under one-shot learning setting. Such advantages are kept to 16-shot with \\(+2.7\\%\\) and \\(+5.0\\%\\) gains \\((- \\bullet -\\) and \\(- \\bullet -)\\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.479, + 0.895, + 0.616 + ], + "angle": 0, + "content": "Fine-tuning on Downstream Tasks We also study the generalization capability of our pre-trained models on downstream tasks, including semantic segmentation, object detection and video recognition. As shown in Tab. 5, compared to Sup-only, our iCLIP surpasses it by \\(+0.5\\%\\), \\(+2.0\\%\\), \\(+0.4\\%\\) on the three downstream tasks, respectively. We also earn \\(+0.6\\%\\), \\(+1.3\\%\\), \\(+0.8\\%\\) gains over VL-only baseline. These results reveal that our unified method could learn general visual representations." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.647, + 0.62, + 0.663 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.681, + 0.894, + 0.803 + ], + "angle": 0, + "content": "In this paper, we propose a unified framework dubbed iCLIP to bridge image classification and language-image pre-training. It naturally forces the cross-modal feature learning in a unified space, where the two tasks share the same visual and textual encoders. Extensive experiments demonstrate that iCLIP is effective, and can be generalized to different visual recognition scenarios, including zero-shot, few-shot, and fully-supervised fine-tuning." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.81, + 0.894, + 0.903 + ], + "angle": 0, + "content": "Limitations. One limitation of iCLIP is that, despite its competitive performance, the model still relies on human labeled classification data that is not scalable. Besides, our model currently only adopts median-size parameters, which can not fully validate the generation ability to large-scale models. We are interested in exploring this in future work." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.958 + ], + "angle": 0, + "content": "2783" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.171 + ], + "angle": 0, + "content": "[1] Soren Auer, Christian Bizer, Georgi Kobilarov, Jens Lehmann, Richard Cyganiak, and Zachary Ives. Dbpedia: A nucleus for a web of open data. In The semantic web, pages 722-735. Springer, 2007. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.172, + 0.47, + 0.213 + ], + "angle": 0, + "content": "[2] Yue Cao, Zhenda Xie, Bin Liu, Yutong Lin, Zheng Zhang, and Han Hu. Parametric instance classification for unsupervised visual feature learning. 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.214, + 0.47, + 0.282 + ], + "angle": 0, + "content": "[3] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In European Conference on Computer Vision, pages 213-229. Springer, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.284, + 0.471, + 0.34 + ], + "angle": 0, + "content": "[4] Joao Carreira and Andrew Zisserman. Quo vadis, action recognition? a new model and the kinetics dataset. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6299-6308, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.341, + 0.47, + 0.395 + ], + "angle": 0, + "content": "[5] Liang-Chieh Chen, George Papandreou, Florian Schroff, and Hartwig Adam. Rethinking atrous convolution for semantic image segmentation. arXiv preprint arXiv:1706.05587, 2017.3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.397, + 0.471, + 0.438 + ], + "angle": 0, + "content": "[6] Ting Chen, Simon Kornblith, Mohammad Norouzi, and Geoffrey Hinton. A simple framework for contrastive learning of visual representations. pages 1597-1607, 2020. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.439, + 0.47, + 0.508 + ], + "angle": 0, + "content": "[7] Ekin D Cubuk, Barret Zoph, Jonathon Shlens, and Quoc V Le. Randaugment: Practical automated data augmentation with a reduced search space. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, pages 702-703, 2020. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.509, + 0.471, + 0.577 + ], + "angle": 0, + "content": "[8] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pages 248-255. IEEE, 2009. 1, 3, 5, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.579, + 0.47, + 0.647 + ], + "angle": 0, + "content": "[9] Jiankang Deng, Jia Guo, Jing Yang, Niannan Xue, Irene Cotcia, and Stefanos P Zafeiriou. ArcFace: Additive angular margin loss for deep face recognition. IEEE Transactions on Pattern Analysis and Machine Intelligence, pages 1-1, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.649, + 0.471, + 0.704 + ], + "angle": 0, + "content": "[10] Karan Desai and Justin Johnson. Virtex: Learning visual representations from textual annotations. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11162-11173, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.705, + 0.47, + 0.773 + ], + "angle": 0, + "content": "[11] Xiaoyi Dong, Yinglin Zheng, Jianmin Bao, Ting Zhang, Dongdong Chen, Hao Yang, Ming Zeng, Weiming Zhang, Lu Yuan, Dong Chen, et al. Maskclip: Masked self-distillation advances contrastive language-image pretraining. arXiv preprint arXiv:2208.12262, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.776, + 0.47, + 0.871 + ], + "angle": 0, + "content": "[12] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.873, + 0.47, + 0.901 + ], + "angle": 0, + "content": "[13] Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He. Slowfast networks for video recognition. In" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.471, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.892, + 0.121 + ], + "angle": 0, + "content": "Proceedings of the IEEE international conference on computer vision, pages 6202-6211, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.122, + 0.892, + 0.217 + ], + "angle": 0, + "content": "[14] Andrea Frome, Greg S Corrado, Jon Shlens, Samy Bengio, Jeff Dean, Marc' Aurelio Ranzato, and Tomas Mikolov. Devise: A deep visual-semantic embedding model. In C. J. C. Burges, L. Bottou, M. Welling, Z. Ghahramani, and K. Q. Weinberger, editors, Advances in Neural Information Processing Systems, volume 26. Curran Associates, Inc., 2013. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.22, + 0.892, + 0.289 + ], + "angle": 0, + "content": "[15] Ross Girshick, Jeff Donahue, Trevor Darrell, and Jitendra Malik. Rich feature hierarchies for accurate object detection and semantic segmentation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), June 2014. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.291, + 0.892, + 0.36 + ], + "angle": 0, + "content": "[16] Lluis Gomez, Yash Patel, Marçal Rusinol, Dimosthenis Karatzas, and CV Jawahar. Self-supervised learning of visual features through embedding images into text topic spaces. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 4230-4239, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.362, + 0.892, + 0.417 + ], + "angle": 0, + "content": "[17] Agrim Gupta, Piotr Dollar, and Ross Girshick. Lvis: A dataset for large vocabulary instance segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5356-5364, 2019. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.419, + 0.892, + 0.46 + ], + "angle": 0, + "content": "[18] Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum contrast for unsupervised visual representation learning. pages 9729-9738, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.461, + 0.892, + 0.503 + ], + "angle": 0, + "content": "[19] Kaiming He, Georgia Gkioxari, Piotr Dollár, and Ross Girshick. Mask r-cnn. In Proceedings of the IEEE international conference on computer vision, pages 2961-2969, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.504, + 0.892, + 0.56 + ], + "angle": 0, + "content": "[20] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.561, + 0.892, + 0.617 + ], + "angle": 0, + "content": "[21] Gao Huang, Yu Sun, Zhuang Liu, Daniel Sedra, and Kilian Q Weinberger. Deep networks with stochastic depth. In European conference on computer vision, pages 646-661. Springer, 2016. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.618, + 0.892, + 0.7 + ], + "angle": 0, + "content": "[22] Gabriel Ilharco, Mitchell Wortsman, Ross Wightman, Cade Gordon, Nicholas Carlini, Rohan Taori, Achal Dave, Vaishaal Shankar, Hongseok Namkoong, John Miller, Hannaneh Hajishirzi, Ali Farhadi, and Ludwig Schmidt. Openclip, July 2021. If you use this software, please cite it as below. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.702, + 0.85, + 0.717 + ], + "angle": 0, + "content": "[23] P Jackson. Introduction to expert systems. 1 1986. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.718, + 0.892, + 0.8 + ], + "angle": 0, + "content": "[24] Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. In International Conference on Machine Learning, pages 4904-4916. PMLR, 2021. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.802, + 0.892, + 0.858 + ], + "angle": 0, + "content": "[25] Armand Joulin, Laurens Van Der Maaten, Allan Jabri, and Nicolas Vasilache. Learning visual features from large weakly supervised data. In European Conference on Computer Vision, pages 67-84. Springer, 2016. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.86, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[26] Will Kay, Joao Carreira, Karen Simonyan, Brian Zhang, Chloe Hillier, Sudheendra Vijayanarasimhan, Fabio Viola, Tim Green, Trevor Back, Paul Natsev, et al. The kinetics hu" + }, + { + "type": "list", + "bbox": [ + 0.504, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.516, + 0.957 + ], + "angle": 0, + "content": "2784" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.109, + 0.092, + 0.469, + 0.12 + ], + "angle": 0, + "content": "man action video dataset. arXiv preprint arXiv:1705.06950, 2017.2,6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.122, + 0.47, + 0.177 + ], + "angle": 0, + "content": "[27] Simon Kornblith, Jonathon Shlens, and Quoc V. Le. Do better imagenet models transfer better? In 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2656-2666, 2019. 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.179, + 0.469, + 0.234 + ], + "angle": 0, + "content": "[28] Alex Krizhevsky, Ilya Sutskever, and Geoffrey E Hinton. Imagenet classification with deep convolutional neural networks. In Advances in neural information processing systems, pages 1097-1105, 2012. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.236, + 0.469, + 0.304 + ], + "angle": 0, + "content": "[29] Alina Kuznetsova, Hassan Rom, Neil Alldrin, Jasper Uijlings, Ivan Krasin, Jordi Pont-Tuset, Shahab Kamali, Stefan Popov, Matteo Malloci, Alexander Kolesnikov, et al. The open images dataset v4. International Journal of Computer Vision, 128(7):1956-1981, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.305, + 0.469, + 0.375 + ], + "angle": 0, + "content": "[30] Yangguang Li, Feng Liang, Lichen Zhao, Yufeng Cui, Wanli Ouyang, Jing Shao, Fengwei Yu, and Junjie Yan. Supervision exists everywhere: A data efficient contrastive language-image pre-training paradigm. In International Conference on Learning Representations, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.377, + 0.469, + 0.446 + ], + "angle": 0, + "content": "[31] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In European conference on computer vision, pages 740-755. Springer, 2014. 3, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.448, + 0.469, + 0.515 + ], + "angle": 0, + "content": "[32] Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. Roberta: A robustly optimized bert pretraining approach. arXiv preprint arXiv:1907.11692, 2019. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.518, + 0.469, + 0.573 + ], + "angle": 0, + "content": "[33] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. pages 10012-10022, 2021. 2, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.575, + 0.469, + 0.643 + ], + "angle": 0, + "content": "[34] Ze Liu, Jia Ning, Yue Cao, Yixuan Wei, Zheng Zhang, Stephen Lin, and Han Hu. Video swim transformer. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3202-3211, June 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.646, + 0.469, + 0.701 + ], + "angle": 0, + "content": "[35] Jonathan Long, Evan Shelhamer, and Trevor Darrell. Fully convolutional networks for semantic segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3431-3440, 2015. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.702, + 0.469, + 0.757 + ], + "angle": 0, + "content": "[36] George A. Miller. WordNet: A lexical database for English. In Human Language Technology: Proceedings of a Workshop held at Plainsboro, New Jersey, March 8-11, 1994, 1994. 1, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.759, + 0.469, + 0.815 + ], + "angle": 0, + "content": "[37] Norman Mu, Alexander Kirillov, David Wagner, and Saining Xie. Slip: Self-supervision meets language-image pretraining. In European Conference on Computer Vision, pages 529-544. Springer, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.817, + 0.469, + 0.844 + ], + "angle": 0, + "content": "[38] Kevin Musgrave, Serge Belongie, and Ser-Nam Lim. A metric learning reality check, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.846, + 0.469, + 0.901 + ], + "angle": 0, + "content": "[39] Basil Mustafa, Carlos Riquelme, Joan Puigcerver, Rodolphe Jenatton, and Neil Houlsby. Multimodal contrastive learning with limoe: the language-image mixture of experts. arXiv preprint arXiv:2206.02770, 2022. 3" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.135 + ], + "angle": 0, + "content": "[40] Hieu V Nguyen and Li Bai. Cosine similarity metric learning for face verification. In Asian conference on computer vision, pages 709-720. Springer, 2010. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.136, + 0.892, + 0.204 + ], + "angle": 0, + "content": "[41] Mohammad Norouzi, Tomas Mikolov, Samy Bengio, Yoram Singer, Jonathon Shlens, Andrea Frome, Greg S Corrado, and Jeffrey Dean. Zero-shot learning by convex combination of semantic embeddings. arXiv preprint arXiv:1312.5650, 2013. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.206, + 0.892, + 0.261 + ], + "angle": 0, + "content": "[42] Hieu Pham, Zihang Dai, Golnaz Ghiasi, Hanxiao Liu, Adams Wei Yu, Minh-Thang Luong, Mingxing Tan, and Quoc V. Le. Combined scaling for zero-shot transfer learning. CoRR, abs/2111.10050, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.263, + 0.892, + 0.319 + ], + "angle": 0, + "content": "[43] Zhaofan Qiu, Ting Yao, and Tao Mei. Learning spatiotemporal representation with pseudo-3d residual networks. In proceedings of the IEEE International Conference on Computer Vision, pages 5533–5541, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.321, + 0.892, + 0.403 + ], + "angle": 0, + "content": "[44] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International Conference on Machine Learning, pages 8748-8763. PMLR, 2021. 1, 3, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.405, + 0.892, + 0.461 + ], + "angle": 0, + "content": "[45] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. In Advances in neural information processing systems, pages 91-99, 2015. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.462, + 0.892, + 0.503 + ], + "angle": 0, + "content": "[46] Mert Bulent Sariyildiz, Julien Perez, and Diane Larlus. Learning visual representations with caption annotations. In European Conference on Computer Vision (ECCV), 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.504, + 0.892, + 0.572 + ], + "angle": 0, + "content": "[47] Christoph Schuhmann, Richard Vencu, Romain Beaumont, Robert Kaczmarczyk, Clayton Mullis, Aarush Katta, Theo Coombes, Jenia Jitsev, and Aran Komatsuzaki. Laion-400m: Open dataset of clip-filtered 400 million image-text pairs, 2021. 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.575, + 0.892, + 0.63 + ], + "angle": 0, + "content": "[48] Piyush Sharma, Nan Ding, Sebastian Goodman, and Radu Soricut. Conceptual captions: A cleaned, hypernymed, image alt-text dataset for automatic image captioning. In Proceedings of ACL, 2018. 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.632, + 0.892, + 0.673 + ], + "angle": 0, + "content": "[49] K. Simonyan and A. Zisserman. Very deep convolutional networks for large-scale image recognition. In International Conference on Learning Representations, May 2015. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.675, + 0.892, + 0.731 + ], + "angle": 0, + "content": "[50] Robyn Speer, Joshua Chin, and Catherine Havasi. Conceptnet 5.5: An open multilingual graph of general knowledge. In Thirty-first AAAI conference on artificial intelligence, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.732, + 0.892, + 0.787 + ], + "angle": 0, + "content": "[51] Chen Sun, Abhinav Shrivastava, Saurabh Singh, and Abhinav Gupta. Revisiting unreasonable effectiveness of data in deep learning era. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), Oct 2017. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.789, + 0.892, + 0.869 + ], + "angle": 0, + "content": "[52] Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed, Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, and Andrew Rabinovich. Going deeper with convolutions. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1-9, 2015. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.873, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[53] Bart Thomee, David A. Shamma, Gerald Friedland, Benjamin Elizalde, Karl Ni, Douglas Poland, Damian Borth, and" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.515, + 0.957 + ], + "angle": 0, + "content": "2785" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.11, + 0.092, + 0.468, + 0.12 + ], + "angle": 0, + "content": "Li-Jia Li. Yfcc100m: The new data in multimedia research. Commun. ACM, 59(2):64-73, jan 2016. 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.122, + 0.469, + 0.189 + ], + "angle": 0, + "content": "[54] Du Tran, Lubomir Bourdev, Rob Fergus, Lorenzo Torresani, and Manohar Paluri. Learning spatiotemporal features with 3d convolutional networks. In Proceedings of the IEEE international conference on computer vision, pages 4489-4497, 2015. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.192, + 0.469, + 0.234 + ], + "angle": 0, + "content": "[55] Denny Vrandecic. Wikidata: A new platform for collaborative data collection. In Proceedings of the 21st international conference on world wide web, pages 1063-1064, 2012. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.236, + 0.469, + 0.291 + ], + "angle": 0, + "content": "[56] Haohan Wang, Songwei Ge, Zachary Lipton, and Eric P Xing. Learning robust global representations by penalizing local predictive power. In Advances in Neural Information Processing Systems, pages 10506-10518, 2019. 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.293, + 0.469, + 0.334 + ], + "angle": 0, + "content": "[57] Hao Wang, Yitong Wang, Zheng Zhou, Xing Ji, Dihong Gong, Jingchao Zhou, Zhifeng Li, and Wei Liu. Cosface: Large margin cosine loss for deep face recognition, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.336, + 0.469, + 0.403 + ], + "angle": 0, + "content": "[58] Saining Xie, Ross Girshick, Piotr Dólár, Zhuowen Tu, and Kaiming He. Aggregated residual transformations for deep neural networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 1492-1500, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.406, + 0.469, + 0.46 + ], + "angle": 0, + "content": "[59] Zhenda Xie, Yutong Lin, Zhuliang Yao, Zheng Zhang, Qi Dai, Yue Cao, and Han Hu. Self-supervised learning with swim transformers. arXiv preprint arXiv:2105.04553, 2021. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.463, + 0.469, + 0.545 + ], + "angle": 0, + "content": "[60] Jinyu Yang, Jiali Duan, Son Tran, Yi Xu, Sampath Chanda, Liquun Chen, Belinda Zeng, Trishul Chilimbi, and Junzhou Huang. Vision-language pre-training with triple contrastive learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15671-15680, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.548, + 0.469, + 0.616 + ], + "angle": 0, + "content": "[61] Jianwei Yang, Chunyuan Li, Pengchuan Zhang, Bin Xiao, Ce Liu, Lu Yuan, and Jianfeng Gao. Unified contrastive learning in image-text-label space. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 19163-19173, June 2022. 3, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.619, + 0.469, + 0.686 + ], + "angle": 0, + "content": "[62] Lewei Yao, Runhui Huang, Lu Hou, Guansong Lu, Minzhe Niu, Hang Xu, Xiaodan Liang, Zhenguo Li, Xin Jiang, and Chunjing Xu. FILIP: Fine-grained interactive language-image pre-training. In International Conference on Learning Representations, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.689, + 0.469, + 0.743 + ], + "angle": 0, + "content": "[63] Minghao Yin, Zhuliang Yao, Yue Cao, Xiu Li, Zheng Zhang, Stephen Lin, and Han Hu. Disentangled non-local neural networks. In Proceedings of the European conference on computer vision (ECCV), 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.746, + 0.469, + 0.814 + ], + "angle": 0, + "content": "[64] Peter Young, Alice Lai, Micah Hodosh, and Julia Hockenmaier. From image descriptions to visual denotations: New similarity metrics for semantic inference over event descriptions. Transactions of the Association for Computational Linguistics, 2:67-78, 2014. 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.817, + 0.469, + 0.871 + ], + "angle": 0, + "content": "[65] Jiahui Yu, Zirui Wang, Vijay Vasudevan, Legg Yeung, Mojtaba Seyedhosseini, and Yonghui Wu. Coca: Contrastive captioners are image-text foundation models. Transactions on Machine Learning Research, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.874, + 0.469, + 0.901 + ], + "angle": 0, + "content": "[66] Xiaohua Zhai, Xiao Wang, Basil Mustafa, Andreas Steiner, Daniel Keysers, Alexander Kolesnikov, and Lucas Beyer." + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.469, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.535, + 0.093, + 0.892, + 0.135 + ], + "angle": 0, + "content": "Lit: Zero-shot transfer with locked-image text tuning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18123-18133, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.136, + 0.892, + 0.19 + ], + "angle": 0, + "content": "[67] Yuhao Zhang, Hang Jiang, Yasuhide Miura, Christopher D Manning, and Curtis P Langlotz. Contrastive learning of medical visual representations from paired images and text. arXiv preprint arXiv:2010.00747, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.192, + 0.892, + 0.247 + ], + "angle": 0, + "content": "[68] Bolei Zhou, Hang Zhao, Xavier Puig, Tete Xiao, Sanja Fidler, Adela Barriuso, and Antonio Torralba. Semantic understanding of scenes through the ade20k dataset. International Journal on Computer Vision, 2018. 2, 3, 6" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.247 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.516, + 0.956 + ], + "angle": 0, + "content": "2786" + } + ] +] \ No newline at end of file diff --git a/2023/iCLIP_ Bridging Image Classification and Contrastive Language-Image Pre-Training for Visual Recognition/3b34aaa2-686f-4539-b795-03e3193c1a2b_origin.pdf b/2023/iCLIP_ Bridging Image Classification and Contrastive Language-Image Pre-Training for Visual Recognition/3b34aaa2-686f-4539-b795-03e3193c1a2b_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..9ed640adab73914c060755c61d6737e07266f608 --- /dev/null +++ b/2023/iCLIP_ Bridging Image Classification and Contrastive Language-Image Pre-Training for Visual Recognition/3b34aaa2-686f-4539-b795-03e3193c1a2b_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:34cacd1216c8e4413310feafd9268958214d6efa8465ea8e02cdc424ffedb6fa +size 1158157 diff --git a/2023/iCLIP_ Bridging Image Classification and Contrastive Language-Image Pre-Training for Visual Recognition/full.md b/2023/iCLIP_ Bridging Image Classification and Contrastive Language-Image Pre-Training for Visual Recognition/full.md new file mode 100644 index 0000000000000000000000000000000000000000..ea30238330e1b4716259803a6ef2728d8d5130e0 --- /dev/null +++ b/2023/iCLIP_ Bridging Image Classification and Contrastive Language-Image Pre-Training for Visual Recognition/full.md @@ -0,0 +1,289 @@ +# iCLIP: Bridging Image Classification and Contrastive Language-Image Pre-training for Visual Recognition + +Yixuan Wei $^{1,2}$ , Yue Cao $^{2*}$ , Zheng Zhang $^{2}$ , Houwen Peng $^{2}$ , Zhuliang Yao $^{1,2}$ , Zhenda Xie $^{1,2}$ , Han Hu $^{2}$ , Baining Guo $^{2}$ $^{1}$ Tsinghua University $^{2}$ Microsoft Research Asia + +# Abstract + +This paper presents a method that effectively combines two prevalent visual recognition methods, i.e., image classification and contrastive language-image pre-training, dubbed iCLIP. Instead of naïve multi-task learning that use two separate heads for each task, we fuse the two tasks in a deep fashion that adapts the image classification to share the same formula and the same model weights with the language-image pre-training. To further bridge these two tasks, we propose to enhance the category names in image classification tasks using external knowledge, such as their descriptions in dictionaries. Extensive experiments show that the proposed method combines the advantages of two tasks well: the strong discrimination ability in image classification tasks due to the clean category labels, and the good zero-shot ability in CLIP tasks ascribed to the richer semantics in the text descriptions. In particular, it reaches $82.9\%$ top-1 accuracy on IN-1K, and meanwhile surpasses CLIP by $1.8\%$ , with similar model size, on zero-shot recognition of Kornblith 12-dataset benchmark. The code and models are publicly available at https://github.com/weiyx16/iCLIP. + +# 1. Introduction + +Image classification is a classic visual problem whose goal is to classify images into a fixed set of pre-defined categories. For example, the widely used ImageNet dataset [8] carefully annotated 14 million images and categorize them into 21,841 categories chosen from the WordNet [36]. For image classification, each category provides a clear taxonomy that groups images of the same category together and separates images from different categories, and thus endows the learnt representation with strong discriminant ability. However, this classification ability is limited to a fixed set of categories [8, 29, 51]. + +![](images/af0f5f82fdbfa99cb6fc29b27f21da79ad9876dd462996b500e6c87e28d447a9.jpg) +Figure 1. An illustration of the proposed iCLIP framework. The iCLIP framework can take two types of annotations for training: classes and alt-texts. It converts the conventional image classification formula to share the same text encoder and the same cosine classifier as that used in the contrastive language-image pretraining (CLIP). It also uses a dictionary-enhanced approach to enrich the original class names in the image classification problem with external information involved in dictionaries. The deep fusion and knowledge-enriched classes both greatly improve the performance compared to naive multi-task learning or performing one of the two tasks alone. + +Recently, the method that learns to contrast image-text pairs, known as contrastive language-image pre-training (abbr. CLIP), has well made up such shortage of the conventional image classification methods to achieve strong zero-shot recognition ability [24, 44]. These methods employ a contrastive learning framework, where images and their corresponding alt-texts are treated as positive pairs, while images with all other alt-texts are treated as negative pairs. Thanks to the rich semantics involved in the alt-texts, the images can be weakly connected to almost arbitrary categories that already appear in the alt-texts, resulting in its zero-shot ability. A drawback is that the image-text pairs are usually crawled from the internet without human labeling, leading to their noisy and ambiguous nature. Thus the learnt representations are often not conceptual compact, and may lack certain discriminative ability. + +This paper explores how to effectively combine these two powerful visual recognition and representation learning methods, to take advantages of both methods and data sources while relieving their shortages. We first try a naive multi-task learning framework that applies the original head networks of the two tasks on top of a shared visual encoder, and jointly learn the network with separate losses of the two tasks. This naive multi-task learning approach has been able to benefit each individual tasks, but the effect is marginal. We thus seek to fuse the two tasks more deeply, so that the advantages of the two tasks can be more effectively joined for better visual recognition, as well as for better transferable representations. + +To this end, our first technique is to deeply unify the formulations of image classification and CLIP learning. By examining their formulations, we found there are two main differences: 1) Different classification losses. Image classification tasks typically use a linear classification loss which has better fitting ability due to the non-normalized nature, while the CLIP-based methods adopt a cosine classifier which has better transferability for new domains and categories [2, 6, 9, 18, 38, 57]. 2) Different parameterization methods for classifier weights. Image classification tasks usually directly optimize the parametric classification weights without a need to process text semantics in class names. The CLIP method can be regarded as generating classifier weights through a text encoder and learns the text encoder instead. The text-encoder-based classifier allows sharing between alt-texts as well as modeling their relationships, which enables the ability to tackle any classes. + +Although the linear classifier and direct classifier weight parameterization have been common practice in image classification for many years, it is interesting to find that changing the old formulation as that in the CLIP approach has almost no performance degradation for pure image classification problems. This indicates that we can directly adapt the image classification formulation to the cosine classifier and the text encoder parameterization used by CLIP, with almost no loss. This also allows us to further share the text encoder for both class names and alt-texts. Our experiments show that this deep fusion approach performs much better than the naive multi-task method for both in-domain/zero-shot classification and multi-modal retrieval tasks learning (see 3). + +Another gap between the image classification and CLIP lies in the different text richness. Class names are usually in short, i.e., one or a few words, and sometimes are even ambiguous and polysemous in referring to specific semantics, for example, "night bird" can represents either "owl" or "nightingale". On the contrary, alt-texts in CLIP are usually full sentences containing rich information. To further bridge the gap between the image classification and CLIP, we propose a second technique that leverages the knowledge + +base to enhance the original class names, such as the explanations in dictionaries. In our implementation, knowledge is simply encoded as a prefix/suffix prompt, as illustrated in Fig 1. Although simple, dictionary enhanced method shows to maintain the accuracy for pure image classification problem (see Table 1), while greatly improve the zero-shot and multi-modal retrieval performance as shown in Table 2 and 3. Note the process is just like human beings who learn new words or concepts through both real examples and explanations in dictionaries. + +By these techniques, we present a framework that deeply fuses the two important tasks of image classification and contrastive language-image pre-training, dubbed iCLIP. Extensive experiments using different combinations of image classification and image-text pair datasets show that the iCLIP method can take advantages of both the discriminative power of image classification tasks and the zero-shot ability in CLIP-like tasks, and perform significantly better than conducting each task alone or the naive multi-task learning in both the in-domain/zero-shot classification and multi-modal retrieval problems. The iCLIP method also shows that learning a stronger transferable representation than using each of the two tasks alone, verified on a variety of downstream tasks, including ADE20K semantic segmentation [68], LVIS long-tail detection [17], and video action recognition [26], as well as different evaluation settings of few-shot and fine-tuning. Our contributions are summarized as follows: + +- We combined two important vision tasks of image classification and contrastive language-image pretraining into a single framework. +- We found that the original image classification formulation can be adapted to CLIP approach with almost no performance degradation. With this finding, we present a deep fusion approach in which the two tasks share the same text encoder and the same classifier type, whose effectiveness is extensively verified on benchmarks. +- We proposed a simple yet effective method to introduce knowledge bases into image classification, addressing the ambiguous and polysemous issue of the originally short image names as well as further bridges the gap between classes and alt-texts. It also provides the first showcase of applying knowledge bases into computer vision problems. + +# 2. Related Work + +Supervised visual classification. Classification is almost ubiquitous for visual understanding tasks of various recognition granularity, e.g., image-level classification [12, 20, 28, 33, 49, 52, 58], object-level classification in + +object detection [3, 15, 19, 45], pixel-level classification in semantic/instance segmentation [5, 35, 63], and video-level action classification [4, 13, 34, 43, 54]. In these tasks, the data is manually annotated to a fixed set of classes, e.g., the 1,000-class ImageNet-1K dataset [8], the 80-class COCO detection dataset [31], the 150-class ADE20K segmentation dataset [68], etc. Among these classification tasks, the image-level classification is particularly important, which has greatly advances the success of deep learning in computer vision, thanks to its high quality and transferable discriminative representations. + +The supervised visual classification is generally performed as a $K$ -way classification problem without considering the text semantics of the class names. The most common classifier is the linear classifier, where the classifier vector of each category is parameterized as model weights and is directly learnt through optimization [28]. + +Contrastive language-image pre-training. Pioneered by CLIP [44] and Align [24], the contrastive language-image pre-training is now attracting more and more attention due to its strong zero-shot transfer capacity. These methods learn a network to pair an image and its associated alt-text, in which the image-text pairs are crawled from the Internet. With web-scale alt-text, it is possible to cover almost all classes, and these methods do show to perform very well for zero-shot recognition. In their frameworks, the images and texts are embedded using two separate encoders, and the output representations of the images and alt-texts are contrasted according to the positive and negative pairs. + +While prior to CLIP and Align, there have been a few early works leveraging alt-text or text encoders for image recognition [10,14,16,25,41,46,67]. More follow-up works appeared after CLIP and Align, including Filip [62], DeClip [30], BASIC [42], LiT [66], LiMoE [39], TCL [60], and so on. A drawback of these method is that the image-text pairs are usually noisy without human labeling, leading to the learned representations are not conceptual compact, lacking strong discrimination ability. + +Introducing knowledge into AI systems. Our approach is also related to the expert systems in 1980s which heavily rely on a knowledge base for reasoning [23]. Recently, in natural language process, there also emerges boosting large-scale pretrained models by making use of encyclopedic [1,55] and commonsense knowledge [50]. However, in computer vision, the knowledge bases is not well explored. We hope our findings can encourage more attention to incorporate human knowledge into current vision systems. + +Combination of representation learning. Regarding individual strengths of different representation learning approaches, there have been several works trying to combine different representation learning approaches so as to take advantages of individuals' strength. For example, SLIP [37] combines CLIP learning with a self-supervised contrastive + +learning approach. CoCa [65] combines the CLIP target with an image caption task, in hope to perform well for both understanding and generation problems. MaskCLIP [11] combines CLIP with masked image modeling based self-supervised learning. In contrast, our work also aims to effectively combine different representation learning approaches so as to take both advantages, specifically, the image classification and CLIP. + +Relationship to UniCL [61] Concurrent to our work, there is another work named UniCL [61] which also combines image classification with language-image pretraining. We hope the consistent knowledge will help the community in learning more powerful representations. Also note that there are two main differences comparing our framework to the UniCL framework [61]: 1) We involve all negative classifiers in training the supervised classification, while UniCL only involve negatives in a same batch. To make feasible all negative classifiers, we propose a GPU-distributed implementation that distributes the classifiers evenly into different GPUs. Our implementations show to have better in-domain accuracy compared to UniCL when the category number is as large as tens of thousands (76.3% vs. 70.5% as shown in Tab. 4). 2) We introduce a new dictionary enhanced approach to convert the class names with rich semantical text, which shows to be very beneficial for zero-shot image classification and multi-modal retrieval (see Tab. 2). + +# 3. Method + +In this section, we first review existing methods on image classification and contrastive language-image pre-training tasks. Then, we propose a unified framework to bridge the two tasks in a deep fusion fashion. Finally, we introduce dictionary-enhanced category descriptions to further align the two taks on input label space. + +# 3.1. Preliminaries + +Image Classification. Given a set of $<$ image, category label> pairs, i.e., $\mathcal{D}^c = \{(I_i,C_i)\}_{i=1}^{|\mathcal{D}^c|}$ , image classification task targets to predict the category label of a given image, through a visual encoder $f_v$ , and a parametric category classifier $h_c$ , illustrated in Fig. 2 (b). The parameters of $h_c$ is a matrix $W \in \mathcal{R}^{N \times H}$ , where $N$ is the number of categories and $H$ is the dimension of visual embeddings. The visual encoder $f_v$ transforms each raw image $I_i$ to an embedding $v_i = f_v(I_i)$ , while the classifier $h_c$ predicts the distribution $P_i \in \mathcal{R}^N$ over all pre-defined categories via an inner product between $W$ and $v_i$ , i.e., $P_i = W \cdot v_i$ (bias term is omitted for simplicity). Finally, a cross entropy is applied on $P_i$ and $C_i$ to calculate training loss, which is formulated as: + +$$ +\mathcal {L} = \frac {- 1}{| \mathcal {D} ^ {c} |} \sum_ {(I _ {i}, C _ {i}) \in \mathcal {D} ^ {c}} \log \frac {\exp (W _ {C _ {i}} \cdot v _ {i})}{\sum_ {j = 1} ^ {N} \exp (W _ {j} \cdot v _ {i})}, \qquad (1) +$$ + +![](images/c8774f0ac673c5836a497e48d2b1456d23aac8254209126679141c31ed7b6276.jpg) +Figure 2. An illustration of iCLIP framework. $\mathcal{B}$ is the batch size, $N$ is the number of categories and $G$ is the number of gpus. iCLIP unifies both contrastive language-image pre-training and classification tasks with shared text and visual encoder, taking alt-texts or dictionary enhanced class names as annotations. To reduce the computation, iCLIP distributes the enhanced class names over all gpus in forward, and gathers the embeddings for similarity calculation. + +where $W_{j}$ is the parametric weight of $j$ -th category. + +Contrastive Language-Image Pre-training. Given a set of $<$ image, alt-text> pairs, i.e., $\mathcal{D}^a = \{(I_i,T_i^a)\}_{i=1}^{|\mathcal{D}^a|}$ , contrastive language-image pre-training targets to close the distances between paired image and text while enlarging those of unpaired ones, through a visual encoder $f_v$ and a text encoder $f_t$ , shown in Fig. 2 (a). They transform the image $I_i$ and the alt-text $T_i^a$ to feature embeddings $v_i$ and $s_i$ , respectively. A contrastive loss function is applied to shrink the cosine distance of $v_i$ and $s_i$ , which is defined as: + +$$ +\mathcal {L} = \frac {- 1}{| \mathcal {D} ^ {a} |} \sum_ {\substack {(I _ {i}, T _ {i} ^ {a}) \\ \in \mathcal {D} ^ {a}}} \log \frac {\exp \left(\cos \left(f _ {t} \left(T _ {i} ^ {a}\right) , v _ {i}\right) / \tau\right)}{\sum_ {T _ {j} ^ {a} \in \mathcal {T} ^ {a}} \exp \left(\cos \left(f _ {t} \left(T _ {j} ^ {a}\right) , v _ {i}\right) / \tau\right)}, \tag{2} +$$ + +where $\cos (\cdot ,\cdot)$ represents the cosine similarity between two embeddings, $\mathcal{T}^a$ is all the alt-texts in a batch including one positive paired alt-text and $|\mathcal{T}^a| - 1$ negative ones, and $\tau$ is a temperature hyper-parameter to scale the similarities. + +Task differences. Comparing the formations of image classification and language-image pre-training, we can draw three main differences between them. 1) Training loss functions. Classification commonly adopts a cross-entropy loss on inner-product similarity, while image-text learning uses InfoNCE loss on cosine similarity. 2) Classifier types. Classification adopts a parametric category classifier, while image-text learning uses a text encoder. 3) Label granularity. Category names in classification are usually very short, i.e., one or few words, while the captions in image-text pretraining are full sentences containing rich semantics. + +# 3.2. Bridge Image Classification and Contrastive Language-Image Pre-training + +To bridge image classification and image-text alignment, we introduce three adaptations to align their training losses, unify the classifier types, and close the label granularity gap. The overall adaption is visualized in Fig. 3. + +Classification with Text Encoder. As formulated in Eq. (1), image classification commonly adopts a cross- + +![](images/c2bed6768d0cd53928f163cd4f3c903942f1447b74e996c37a773bcc413d3570.jpg) +Figure 3. An illustration of our approach to bring image classification (a) to CLIP (b), from the perspective of loss function, classifier types and label granularity. We reformulate the linear classifier (a.1) with a text-encoder-based classifier (a.2), and enhance the class names with a text description from the dictionary (a.3). + +entropy loss on top of the inner-product similarity between the visual embedding $v_{i}$ and the parametric classifier $h_c$ . This formulation is not in line with the InfoNCE loss in Eq. (2), leading to a misalignment between the two paradigms. To address this issue, we adopt a cosine similarity for image classification, instead of the original inner-product similarity in Eq. (1), which formulates a cosine classifier as: + +$$ +\mathcal {L} = \frac {- 1}{| \mathcal {D} ^ {c} |} \sum_ {\left(I _ {i}, C _ {i}\right) \in \mathcal {D} ^ {c}} \log \frac {\exp \left(\cos \left(W _ {C _ {i}} , v _ {i}\right) / \tau\right)}{\sum_ {j = 1} ^ {N} \exp \left(\cos \left(W _ {j} , v _ {i}\right) / \tau\right)}. \tag {3} +$$ + +Cosine similarity is a common practice in metric learning [40]. It can smoothly align the supervised image classification with the cross-modal contrastive pre-training in terms of learning objective function, i.e., Eq. (2). Moreover, our experiments demonstrate that this cosine classifier performs on par with the traditional linear classifier (see Tab. 1). + +The cosine classifier aligns the training losses of two tasks. However, the annotations, i.e., category labels and captions, are modeled separately by the parametric category classifier $h_c$ and the text encoder $f_t$ . As analyzed in Sec. 4.3, shallowly combining the two tasks with a shared + +visual encoder $f_{v}$ and two separate task heads does not fully take advantage of the gold annotations in image classification and rich concepts in textual captions, resulting in a suboptimal solution with limited transferring capacity. + +To tackle this issue, we take label semantics into consideration and propose to utilize the text encoder $f_{t}$ as a meta classifier for image classification. Formally, we replace the label index $C_i$ with its class name $M_{i}$ , and generate the classifier weight $W$ on-the-fly through the text encoder $f_{t}$ which is shared with image-text pre-training. The new formulation is represented as: + +$$ +\mathcal{L} = \frac{-1}{|\mathcal{D}^{c}|}\sum_{\substack{(I_{i},M_{i})\\ \in \mathcal{D}^{c}}}\log \frac{\exp\left(\cos\left(f_{t}\left(M_{i}\right),v_{i}\right) / \tau\right)}{\sum_{j = 1}^{N}\exp\left(\cos\left(f_{t}\left(M_{j}\right),v_{i}\right) / \tau\right)}. \tag{4} +$$ + +In this way, the text encoder $f_{t}$ is not only used to extract semantics from gold category labels, but also capture textual information from image captions. Both the visual and textual encoders are shared across the two tasks, leading to a deep fusion of the two tasks. + +Classification with Dictionary Enhancement. The cosine classifier with text encoder as a meta network has largely unify the two tasks in model training. In this step, we further align them on input label granularity, reducing the disparity between label names (one or few words) and image captions (a complete sentence). Our proposal is to integrate external knowledge into label names. More specifically, for each label names, we introduce detailed descriptions from its corresponding synset in the dictionary WordNet [36] as the external knowledge and create a pseudo sentence as label for each categories. We combine the original class names and their dictionary descriptions to form the enhanced texts as the input to the text encoder. Also, we add a prompt to make the sentence more fluent. The final dictionary-enhanced description for each category is formed as: + +$$ +\mathcal {T} ^ {c} = \mathrm {A p h o t o o f a} \left\{\mathrm {N A M E} \right\} _ {C _ {i}}, \left\{\mathrm {D E S C R I P T I O N} \right\} _ {C _ {i}}. \tag {5} +$$ + +Such dictionary-enhanced descriptions have similar label granularity to alt-text, and thus further bring image classification closer to image-text alignment. Moreover, the description introduces more details of each category, being capable of reducing potential misconception. For example, the class "night bird" actually includes several kinds of birds, like owl, nightingale, etc. Such a category name cannot allow the model to learn precise representations due to the blurry concepts. If we augment the category with more external knowledge, such as "a photo of a night bird, any bird associated with night: owl, nightingale, nighthawk", it will help the model learn discriminative representation on distinguishing different concepts (e.g., bird species). + +A Unified Framework. The above three steps adapt image classification to image-text alignment from the perspec + +tive of training loss, classifier type and annotation granularity, respectively. Towards the final unification, we propose a new framework dubbed iCLIP, as presented in Fig. 2 (c), which bridges Image Classification and Image-Text Alignment with a unified contrastive learning loss formulated as: + +$$ +\mathcal {L} = \frac {- 1}{| \mathcal {D} |} \sum_ {\left(I _ {i}, T _ {i}\right) \in \mathcal {D}} \log \frac {\exp \left(\cos \left(f _ {t} \left(T _ {i}\right) , v _ {i}\right) / \tau\right)}{\sum_ {T _ {j} \in \tau} \exp \left(\cos \left(f _ {t} \left(T _ {j}\right) , v _ {i}\right) / \tau\right)}, \tag {6} +$$ + +where $\mathcal{D}$ is a set consisting of the image classification data $D^c$ and the image-text alignment data $D^a$ , i.e., $\mathcal{D} = \{\mathcal{D}^c, D^a\}$ , while $\mathcal{T}$ indicates a combination of $T^c$ and $T^a$ , i.e., $\mathcal{T} = \{\mathcal{T}^c, T^a\}$ . Text label $T_i$ is either an image caption $T_i^a$ sampled from $T^a$ or a dictionary-enhanced description $T_i^c$ sampled from $T^c$ . It is worth noting that, with this unified framework, both the text encoder $f_t$ and the visual encoder $f_v$ are shared across the two tasks, achieving a deep fusion. The proposed unified framework is able to leverage any combination of tag-labeled and caption-labeled image datasets for pre-training. This combination allows the model to learn more discriminative representation, while capturing more visual concepts from the textual description. On the other hand, our iCLIP method is efficient. + +Distributed Implementation. In our iCLIP framework, the text embedding of each category is generated by the shared text encoder on-the-fly. This computation is affordable when the number of categories $N$ is not large. However, it will become infeasible if category number scales up to be large, such as 22k categories in ImageNet-22K [8]. To make the iCLIP framework feasible for large-category classification data in practice, we adopt a distribution implementation strategy [6]. Specifically, we distribute all the enhanced class names evenly over $G$ GPUs in forward, and gather the embeddings from eachgpu for similarity calculation, reducing the computation cost and saves memory consumption by the text encoder to $1 / G$ . + +Table 1. Ablation on formulation adaptations for image classification task. Models are trained with 100 epochs. + +
#Cosine LossText-enc. as ClassifierEnhanced classesIN-1K
180.9
281.5
381.2
481.4
+ +# 4. Experiment + +We verify the effectiveness of the proposed iCLIP framework through the comparisons to single-task baselines and a naive multi-task learning baseline. The comparisons are conducted in three settings covering different scales of pretraining data. In evaluation, we assess the models on different tasks, including in-domain classification, zero-shot classification, multi-modal retrieval, and downstream tasks. + +Table 2. Ablation study conducted on IN-22K [8] and YFCC-14M [53]. Models are pre-trained from scratched with 32 epochs following UniCL [61]. COCO and Flickr stand for MSCOCO [31] and Flickr30K [64]. IR and TR stand for image retrieval and text retrieval. + +
Zero-shot classificationZero-shot retrieval
#Training DataMethodIN-1K14-dataset avg.Flickr-IRFlickr-TRCOCO-IRCOCO-TR
1YFCC-14MCLIP [44]30.136.321.537.912.521.2
2YFCC-14M (half) + IN-21K (half)iCLIP (w/o Desc.)39.445.427.639.113.020.4
3YFCC-14M (half) + IN-21K (half)iCLIP45.949.931.949.815.527.2
4YFCC-14M + IN-21KiCLIP (w/o Desc.)41.149.433.451.216.326.5
5YFCC-14M + IN-21KiCLIP50.954.437.155.718.530.7
6YFCC-14M + IN-22KiCLIP (w/o Desc.)76.251.633.248.214.423.8
7YFCC-14M + IN-22KiCLIP76.355.536.255.318.029.7
+ +# 4.1. Experimental Setup + +Pre-training data and settings. We consider three different scales of dataset combination for model pre-training. + +- ImageNet-1K [8] and GCC-3M [48]. In this setting, we use ImageNet-1K as the classification data while GCC-3M as the image-text data. We adopt a Swin-T [33] initialized with MoBY [59] as the visual encoder, while for the textual encoder, we use a pretrained RoBERTa-B [32]. We sample half number of images from each dataset in a mini-batch and train the models with a batch size of $128 \times 8$ V100 GPUs for 100 epochs. The highest learning rate is 2e-4 with a cosine learning rate schedule and 5 epochs warm-up. Weight decay is set to be 0.01. RandAugment [7] and stochastic depth [21] with a rate of 0.1 are used for visual encoder only. +- ImageNet-22K [8] and YFCC-14M [53]. We follow UniCL [61] to train all models from scratch with 32 epochs for a fair comparison with it. Swin-T [33] is used as the visual encoder, and a 12-layer transformer with a hidden dimension of 512 same as CLIP [44] is used as the text encoder. A batch size of $512 \times 16$ GPUs is adopted. The highest learning rate is selected from 2e-4 and 8e-4. Other regularization is the same as previous, except for a larger weight decay of 0.05. We also conduct experiments using two variants of this setup for a fair and clean comparison with the methods that use one task alone (IC or CLIP): 1) Excluding the 1,000 ImageNet-1K classes in ImageNet-22K dataset (dubbed IN-21K). This setup variant allows us to evaluate the zero-shot accuracy on ImageNet-1K for different methods; 2) Half images of the ImageNet-21K and YFCC-14M are used, such that the dataset size and training iterations are the same as that used in one single task. +- ImageNet-22K [8] and Laion-400M [47]. For this large-scale pre-training setting, we adopt a Swin-B initialized with MoBY as the visual encoder and a pre-trained RoBERTa-B as the text encoder. We train iCLIP for 100K iters, with a batch size of $192 \times 64$ V100 GPUs. In each mini batch, we sample 64 images from IN-22K and 128 images from Laion-400M. The model is trained on + +classification data for around 30 epochs and on image-text data for around 2 epochs equivalently. The highest learning rate is 1e-3 with a cosine learning rate schedule and a warm-up for $16.7\mathrm{K}$ liters. Weight decay is set to 0.05 and drop depth rate is set to 0.2. + +Evaluation datasets and settings. During evaluation, we assess the models considering five different settings. + +- Zero-shot classification. We evaluate the concept coverage and generalization ability of the models on three datasets: 1) ImageNet-1K variants, including IN-1K [8], and IN-Sketch (IN-S) [56]. Top-1 accuracy is reported; 2) the widely-used Kornblith 12-dataset benchmark [27]; 3) 14 datasets used in UniCL [61]. For 2) and 3), averaged accuracy is reported. +- Zero-shot multi-modal retrieval. Flickr30K [64] (1K test set) and MSCOCO [31] (5K test set) are used to evaluate the alignment between image and text modalities. We report the Top-1 recall on both image retrieval (IR) and text retrieval (TR). +- In-domain classification. ImageNet-1K data is included in some of our pre-training setups, so we conduct indomain evaluation on ImageNet-1K in these cases. The Top-1 accuracy is reported. +- Few-shot classification. Following CLIP [44], we also evaluate the models on few-shot classification task using Kornblith 12-dataset with a frozen visual encoder. Averaged accuracy is reported. +- Fine-tuning on downstream tasks. To validate the generalization ability of iCLIP, the models are fine-tuned and compared on semantic segmentation [68], long-tail detection [17], and video action recognition [26]. We report val mIoU,_bbox mAP and Top-1 accuracy, respectively. The detailed settings can be found in the supplementary material. + +# 4.2. Experiments on IN-1K [8] and CC3M [48] + +Formulation adaptations for image classification. Tab. 1 ablates the effect of adapting the common image classification to that used in iCLIP, including both cosine loss, the + +Table 3. Ablation conducted on IN-1K [8] and GCC-3M [48] combined data. For the models only using IN-1K, we train them for 100 epochs. For the models only using GCC-3M, we train them with the same iterations and batch size as the ones used in IN-1K. + +
12-datasetImageNet-related
#Methodavg.IN-1KIN-S
1Sup-only-80.929.4
2VL-only31.432.418.3
3Naïve multi-task35.180.638.3
4iCLIP (w/o Desc.)37.780.538.6
5iCLIP39.180.438.7
+ +Table 4. Comparison with UniCL. Models are pre-trained from scratched with 32 epochs, following UniCL [61]. + +
#Training DataMethodIN-1K14-dataset avg.
1YFCC + IN-21K (half)UniCL [61]36.445.5
2YFCC + IN-21K (half)iCLIP45.949.9
3YFCC + IN-21KUniCL [61]40.549.1
4YFCC + IN-21KiCLIP50.954.4
5YFCC + IN-22KUniCL [61]70.552.4
6YFCC + IN-22KiCLIP76.355.5
+ +text-encoder-based classifier and enhanced class names using ImageNet-1K dataset. It can be seen that the cosine classification loss gets slightly better performance than the linear one, with a $+0.6\%$ gain on IN-1K (see #1 v.s. #2). When further adapting the text-encoder-based classifier (#3) and enhancing class names from dictionaries (#4), it has almost no performance degradation ( $+0.3\%$ and $+0.5\%$ on IN-1K compared to the linear classifier), which allows us to further sharing the text encoder with CLIP for tasks unification. + +Zero-shot and in-domain classification. With previous adaptations on the image classification formulation, we can further share the text encoder between the two tasks. To ablate the effect of sharing the text encoder, we set a naive multi-task baseline, that combines image classification and CLIP in a shallow fusion, i.e., simply averaging the loss Eq. (1) and Eq. (2). Each has its own head network, i.e., the fully-connected layer $W$ for Eq. (1) and the text encoder $f_{t}$ for Eq. (2). The best performances of the two heads are reported in Tab. 3. With a shared text encoder across the two tasks, our iCLIP (w/o Desc.) outperforms the naive multi-task on Kornblith 12-dataset zero-shot classification by $+2.6\%$ in average, while they are comparable on ImageNet-related datasets classification (see #3 v.s. #4). Our iCLIP deeply unifies two tasks, thus better gathering the merits of the two learning protocols. When compared with the supervised softmax classifier baseline, i.e., Eq. (1) Sup-only, and the contrastive image-text pre-training baseline, i.e., Eq. (2) VL-only, our method is slightly worse than Sup-only on IN-1K by $0.4\%$ , while achieves superior performance on other evaluation settings, $+6.3\%$ better than VL-only method on 12-dataset zero-shot testing and $+9.2\%$ + +better than Sup-only method on IN-S (see #4 v.s. #1). Moreover, the dictionary enhancement on class names (#5) can further bring an average of $+1.4\%$ improvements on Kornblith 12-dataset, revealing the increased discriminative representation for ambiguous concepts. + +# 4.3. Experiments on IN-22K [8] and YFCC14M [53] + +Effects of the unified framework. Here, we further ablate the effect of the unified formulation for deep fusion of the two tasks. In #2, #4 and #6 of Tab. 2, we show the results of our unified framework under three different dataset combination setups. Compared with the CLIP baseline (#1), our iCLIP (#2) earns $+8.3\%$ gains on IN-1K zero-shot classification and also $+9.1\%$ improvements when evaluated on the 14-dataset. In addition, our iCLIP is better than the CLIP baseline on most cross-modal retrieval benchmarks, while only using half of visual-language data in pre-training. + +Effects of dictionary enhancement. Furthermore, we dissect the model to study the contributions of dictionary-enhanced category description. From Tab. 2, we can see that enhancing each class names with informative description from the dictionary brings consistent improvements on both zero-shot classification and zero-shot retrieval under three dataset combination setups (see #3, #5 and #7). In particular, when pre-trained with half images of YFCC-14M and IN-21K (#3), the integrated knowledge contributes $+6.5\%$ improvements on IN-1K zero-shot classification, which makes our iCLIP reach $45.9\%$ , being $+5.4\%$ better than UniCL method [61] with full images of YFCC-14M and IN-21K (see #3 in Tab. 4). More importantly, the enhanced class names is beneficial to cross-modal retrieval. For example, for image-to-text search, the dictionary-enhanced description can bring $10.7\%$ and $6.8\%$ top-1 recall gains on Flickr30K [64] and MSCOCO [31] respectively, as reported in row 3 of Tab. 2. + +Comparison with UniCL [61]. Tab. 4 summaries our comparison to UniCL. The same as UniCL, we evaluate our models on IN-1K and 14 datasets. Under three different dataset combination setups, our iCLIP surpasses UniCL by at least $+5\%$ on IN-1K image classification, while reaching $55.5\%$ averaged accuracy on 14 datasets (#6), being $+3.1\%$ better than UniCL (#5). + +# 4.4. Experiments on IN-22K and Laion-400M [47] + +Zero-shot and in-domain classification. Tab. 5 presents a large scale experiment using the publicly accessible largescale data: Laion-400M [47] and IN-22K [8]. For Sup-only, i.e. Eq. (1), we use the released version from Swin [33], which is trained on IN-22K for 90 epochs. For VL-only, i.e. Eq. (2), we pre-train it on Laion-400M with a similar image numbers (#im). Our method is comparable to Sup only on IN-1K, while it gets $+17.8\%$ and $+8.3\%$ better results than the two baselines on IN-S, demonstrating its robustness to natural distribution shifts. Our iCLIP surpasses + +Table 5. Ablation study on IN-22K [8] and Laion-400M [47]. We evaluate the models on ImageNet datasets (IN-1K [8] and IN-S [56]) and zero-shot evaluation on the Kornblith 12-dataset benchmark [27]. Few-shot learning on Kornblith 12-dataset and the fine-tuning on three downstream tasks are conducted to evaluate the transfer capability of iCLIP. $\ddagger$ denotes for our reproduction using released checkpoints. + +
Visual encoderPre-trainImageNet-related12-dataset avg.downstream tasks
MethodArch.length (#im).IN-1KIN-S0-shot4-shotADE20KLVISKinetics400
CLIP [44]ViT-B/16400M×32 eps68.646.6‡68.866.4‡---
OpenCLIP [22]ViT-B/16400M×32 eps67.152.4‡70.9‡----
Sup-onlySwin-Base14M×90 eps82.642.0-67.652.135.982.7
VL-onlySwin-Base400M×3 eps61.151.567.273.352.036.682.3
iCLIPSwin-Base400M×2 eps+14M×30 eps82.959.870.678.152.637.983.1
+ +![](images/f086c3b3a0ac2a1c5a5f821886e5d46ca51c0858ce47e88287b36d29c6b69d1b.jpg) +Figure 4. Major comparison with the CLIP-ViT-B/16 of few-shot classification (top-1 accuracy) on the Kornblith 12-dataset. $\star$ denotes the zero-shot performances. Results of CLIP on few-shot classification are reproduced using released model. We run every experiments three times and the averaged results are reported. + +OpenCLIP [22], which also uses Laion-400M data for pretraining [47], by more than $+15\%$ on IN-1K, mainly due to the pre-training data IN-22K covers the visual concepts in IN-1K. Moreover, when performing zero-shot evaluation on 12 datasets [27], our iCLIP model also achieves non-trivial improvements, e.g., an average of over $+3\%$ gains (VL-only in Tab. 5). In addition, our iCLIP is comparable to OpenCLIP on 12 datasets in average with fewer training time. More details are elaborated in the supplementary material. + +Few-shot classification We also conduct experiments in few-shot settings. Following CLIP [44], we freeze the visual encoder and append a linear probe layer for few-shot fine-tuning. We notice that the performance of CLIP [44] in few-shot classification cannot catch up with that of zero-shot classification, unless more than 4 examples per class are given, as presented in Fig. 4 ( $\star$ v.s. $-\bullet$ ). We conjecture the underlying reason is that the number of training samples is too limited to train a randomly initialized classifier. This situation can be alleviated by fine-tuning the pretrained text encoder, instead of the linear probe layer. In this way, text encoder is able to serve as a good initialization for few-shot classification, closing the gap between pretraining and fine-tuning. We evaluate such method on Kornblith 12-dataset benchmark [27] and report the results in Fig. 4. + +When only given one example per class, by utilizing text encoder as the classifier, our iCLIP achieve $73.9\%$ on 12-dataset in average, surpassing the original CLIP model by $+29.5\%$ . Such one-shot recognition gets $+3.3\%$ gains over the zero-shot baseline ( $\star$ v.s. $-\bullet$ ), demonstrating good few-shot transfer ability. When using 16 examples per class, our model still performs superior to CLIP by $4.1\%$ . Compared to supervised-only model and visual-linguistic only model, our unified contrastive learning pretrained model obtains $+24.6\%$ and $+6.1\%$ better accuracy under one-shot learning setting. Such advantages are kept to 16-shot with $+2.7\%$ and $+5.0\%$ gains $(- \bullet -$ and $- \bullet -)$ . + +Fine-tuning on Downstream Tasks We also study the generalization capability of our pre-trained models on downstream tasks, including semantic segmentation, object detection and video recognition. As shown in Tab. 5, compared to Sup-only, our iCLIP surpasses it by $+0.5\%$ , $+2.0\%$ , $+0.4\%$ on the three downstream tasks, respectively. We also earn $+0.6\%$ , $+1.3\%$ , $+0.8\%$ gains over VL-only baseline. These results reveal that our unified method could learn general visual representations. + +# 5. Conclusion + +In this paper, we propose a unified framework dubbed iCLIP to bridge image classification and language-image pre-training. It naturally forces the cross-modal feature learning in a unified space, where the two tasks share the same visual and textual encoders. Extensive experiments demonstrate that iCLIP is effective, and can be generalized to different visual recognition scenarios, including zero-shot, few-shot, and fully-supervised fine-tuning. + +Limitations. One limitation of iCLIP is that, despite its competitive performance, the model still relies on human labeled classification data that is not scalable. Besides, our model currently only adopts median-size parameters, which can not fully validate the generation ability to large-scale models. We are interested in exploring this in future work. + +# References + +[1] Soren Auer, Christian Bizer, Georgi Kobilarov, Jens Lehmann, Richard Cyganiak, and Zachary Ives. Dbpedia: A nucleus for a web of open data. In The semantic web, pages 722-735. Springer, 2007. 3 +[2] Yue Cao, Zhenda Xie, Bin Liu, Yutong Lin, Zheng Zhang, and Han Hu. Parametric instance classification for unsupervised visual feature learning. 2020. 2 +[3] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In European Conference on Computer Vision, pages 213-229. Springer, 2020. 3 +[4] Joao Carreira and Andrew Zisserman. Quo vadis, action recognition? a new model and the kinetics dataset. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6299-6308, 2017. 3 +[5] Liang-Chieh Chen, George Papandreou, Florian Schroff, and Hartwig Adam. Rethinking atrous convolution for semantic image segmentation. arXiv preprint arXiv:1706.05587, 2017.3 +[6] Ting Chen, Simon Kornblith, Mohammad Norouzi, and Geoffrey Hinton. A simple framework for contrastive learning of visual representations. pages 1597-1607, 2020. 2, 5 +[7] Ekin D Cubuk, Barret Zoph, Jonathon Shlens, and Quoc V Le. Randaugment: Practical automated data augmentation with a reduced search space. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, pages 702-703, 2020. 6 +[8] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pages 248-255. IEEE, 2009. 1, 3, 5, 6, 7, 8 +[9] Jiankang Deng, Jia Guo, Jing Yang, Niannan Xue, Irene Cotcia, and Stefanos P Zafeiriou. ArcFace: Additive angular margin loss for deep face recognition. IEEE Transactions on Pattern Analysis and Machine Intelligence, pages 1-1, 2021. 2 +[10] Karan Desai and Justin Johnson. Virtex: Learning visual representations from textual annotations. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11162-11173, 2021. 3 +[11] Xiaoyi Dong, Yinglin Zheng, Jianmin Bao, Ting Zhang, Dongdong Chen, Hao Yang, Ming Zeng, Weiming Zhang, Lu Yuan, Dong Chen, et al. Maskclip: Masked self-distillation advances contrastive language-image pretraining. arXiv preprint arXiv:2208.12262, 2022. 3 +[12] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations, 2021. 2 +[13] Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He. Slowfast networks for video recognition. In + +Proceedings of the IEEE international conference on computer vision, pages 6202-6211, 2019. 3 +[14] Andrea Frome, Greg S Corrado, Jon Shlens, Samy Bengio, Jeff Dean, Marc' Aurelio Ranzato, and Tomas Mikolov. Devise: A deep visual-semantic embedding model. In C. J. C. Burges, L. Bottou, M. Welling, Z. Ghahramani, and K. Q. Weinberger, editors, Advances in Neural Information Processing Systems, volume 26. Curran Associates, Inc., 2013. 3 +[15] Ross Girshick, Jeff Donahue, Trevor Darrell, and Jitendra Malik. Rich feature hierarchies for accurate object detection and semantic segmentation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), June 2014. 3 +[16] Lluis Gomez, Yash Patel, Marçal Rusinol, Dimosthenis Karatzas, and CV Jawahar. Self-supervised learning of visual features through embedding images into text topic spaces. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 4230-4239, 2017. 3 +[17] Agrim Gupta, Piotr Dollar, and Ross Girshick. Lvis: A dataset for large vocabulary instance segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5356-5364, 2019. 2, 6 +[18] Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum contrast for unsupervised visual representation learning. pages 9729-9738, 2020. 2 +[19] Kaiming He, Georgia Gkioxari, Piotr Dollár, and Ross Girshick. Mask r-cnn. In Proceedings of the IEEE international conference on computer vision, pages 2961-2969, 2017. 3 +[20] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 2 +[21] Gao Huang, Yu Sun, Zhuang Liu, Daniel Sedra, and Kilian Q Weinberger. Deep networks with stochastic depth. In European conference on computer vision, pages 646-661. Springer, 2016. 6 +[22] Gabriel Ilharco, Mitchell Wortsman, Ross Wightman, Cade Gordon, Nicholas Carlini, Rohan Taori, Achal Dave, Vaishaal Shankar, Hongseok Namkoong, John Miller, Hannaneh Hajishirzi, Ali Farhadi, and Ludwig Schmidt. Openclip, July 2021. If you use this software, please cite it as below. 8 +[23] P Jackson. Introduction to expert systems. 1 1986. 3 +[24] Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. In International Conference on Machine Learning, pages 4904-4916. PMLR, 2021. 1, 3 +[25] Armand Joulin, Laurens Van Der Maaten, Allan Jabri, and Nicolas Vasilache. Learning visual features from large weakly supervised data. In European Conference on Computer Vision, pages 67-84. Springer, 2016. 3 +[26] Will Kay, Joao Carreira, Karen Simonyan, Brian Zhang, Chloe Hillier, Sudheendra Vijayanarasimhan, Fabio Viola, Tim Green, Trevor Back, Paul Natsev, et al. The kinetics hu + +man action video dataset. arXiv preprint arXiv:1705.06950, 2017.2,6 +[27] Simon Kornblith, Jonathon Shlens, and Quoc V. Le. Do better imagenet models transfer better? In 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2656-2666, 2019. 6, 8 +[28] Alex Krizhevsky, Ilya Sutskever, and Geoffrey E Hinton. Imagenet classification with deep convolutional neural networks. In Advances in neural information processing systems, pages 1097-1105, 2012. 2, 3 +[29] Alina Kuznetsova, Hassan Rom, Neil Alldrin, Jasper Uijlings, Ivan Krasin, Jordi Pont-Tuset, Shahab Kamali, Stefan Popov, Matteo Malloci, Alexander Kolesnikov, et al. The open images dataset v4. International Journal of Computer Vision, 128(7):1956-1981, 2020. 1 +[30] Yangguang Li, Feng Liang, Lichen Zhao, Yufeng Cui, Wanli Ouyang, Jing Shao, Fengwei Yu, and Junjie Yan. Supervision exists everywhere: A data efficient contrastive language-image pre-training paradigm. In International Conference on Learning Representations, 2022. 3 +[31] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In European conference on computer vision, pages 740-755. Springer, 2014. 3, 6, 7 +[32] Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. Roberta: A robustly optimized bert pretraining approach. arXiv preprint arXiv:1907.11692, 2019. 6 +[33] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. pages 10012-10022, 2021. 2, 6, 7 +[34] Ze Liu, Jia Ning, Yue Cao, Yixuan Wei, Zheng Zhang, Stephen Lin, and Han Hu. Video swim transformer. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3202-3211, June 2022. 3 +[35] Jonathan Long, Evan Shelhamer, and Trevor Darrell. Fully convolutional networks for semantic segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3431-3440, 2015. 3 +[36] George A. Miller. WordNet: A lexical database for English. In Human Language Technology: Proceedings of a Workshop held at Plainsboro, New Jersey, March 8-11, 1994, 1994. 1, 5 +[37] Norman Mu, Alexander Kirillov, David Wagner, and Saining Xie. Slip: Self-supervision meets language-image pretraining. In European Conference on Computer Vision, pages 529-544. Springer, 2022. 3 +[38] Kevin Musgrave, Serge Belongie, and Ser-Nam Lim. A metric learning reality check, 2020. 2 +[39] Basil Mustafa, Carlos Riquelme, Joan Puigcerver, Rodolphe Jenatton, and Neil Houlsby. Multimodal contrastive learning with limoe: the language-image mixture of experts. arXiv preprint arXiv:2206.02770, 2022. 3 + +[40] Hieu V Nguyen and Li Bai. Cosine similarity metric learning for face verification. In Asian conference on computer vision, pages 709-720. Springer, 2010. 4 +[41] Mohammad Norouzi, Tomas Mikolov, Samy Bengio, Yoram Singer, Jonathon Shlens, Andrea Frome, Greg S Corrado, and Jeffrey Dean. Zero-shot learning by convex combination of semantic embeddings. arXiv preprint arXiv:1312.5650, 2013. 3 +[42] Hieu Pham, Zihang Dai, Golnaz Ghiasi, Hanxiao Liu, Adams Wei Yu, Minh-Thang Luong, Mingxing Tan, and Quoc V. Le. Combined scaling for zero-shot transfer learning. CoRR, abs/2111.10050, 2021. 3 +[43] Zhaofan Qiu, Ting Yao, and Tao Mei. Learning spatiotemporal representation with pseudo-3d residual networks. In proceedings of the IEEE International Conference on Computer Vision, pages 5533–5541, 2017. 3 +[44] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International Conference on Machine Learning, pages 8748-8763. PMLR, 2021. 1, 3, 6, 8 +[45] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. In Advances in neural information processing systems, pages 91-99, 2015. 3 +[46] Mert Bulent Sariyildiz, Julien Perez, and Diane Larlus. Learning visual representations with caption annotations. In European Conference on Computer Vision (ECCV), 2020. 3 +[47] Christoph Schuhmann, Richard Vencu, Romain Beaumont, Robert Kaczmarczyk, Clayton Mullis, Aarush Katta, Theo Coombes, Jenia Jitsev, and Aran Komatsuzaki. Laion-400m: Open dataset of clip-filtered 400 million image-text pairs, 2021. 6, 7, 8 +[48] Piyush Sharma, Nan Ding, Sebastian Goodman, and Radu Soricut. Conceptual captions: A cleaned, hypernymed, image alt-text dataset for automatic image captioning. In Proceedings of ACL, 2018. 6, 7 +[49] K. Simonyan and A. Zisserman. Very deep convolutional networks for large-scale image recognition. In International Conference on Learning Representations, May 2015. 2 +[50] Robyn Speer, Joshua Chin, and Catherine Havasi. Conceptnet 5.5: An open multilingual graph of general knowledge. In Thirty-first AAAI conference on artificial intelligence, 2017. 3 +[51] Chen Sun, Abhinav Shrivastava, Saurabh Singh, and Abhinav Gupta. Revisiting unreasonable effectiveness of data in deep learning era. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), Oct 2017. 1 +[52] Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed, Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, and Andrew Rabinovich. Going deeper with convolutions. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1-9, 2015. 2 +[53] Bart Thomee, David A. Shamma, Gerald Friedland, Benjamin Elizalde, Karl Ni, Douglas Poland, Damian Borth, and + +Li-Jia Li. Yfcc100m: The new data in multimedia research. Commun. ACM, 59(2):64-73, jan 2016. 6, 7 +[54] Du Tran, Lubomir Bourdev, Rob Fergus, Lorenzo Torresani, and Manohar Paluri. Learning spatiotemporal features with 3d convolutional networks. In Proceedings of the IEEE international conference on computer vision, pages 4489-4497, 2015. 3 +[55] Denny Vrandecic. Wikidata: A new platform for collaborative data collection. In Proceedings of the 21st international conference on world wide web, pages 1063-1064, 2012. 3 +[56] Haohan Wang, Songwei Ge, Zachary Lipton, and Eric P Xing. Learning robust global representations by penalizing local predictive power. In Advances in Neural Information Processing Systems, pages 10506-10518, 2019. 6, 8 +[57] Hao Wang, Yitong Wang, Zheng Zhou, Xing Ji, Dihong Gong, Jingchao Zhou, Zhifeng Li, and Wei Liu. Cosface: Large margin cosine loss for deep face recognition, 2018. 2 +[58] Saining Xie, Ross Girshick, Piotr Dólár, Zhuowen Tu, and Kaiming He. Aggregated residual transformations for deep neural networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 1492-1500, 2017. 2 +[59] Zhenda Xie, Yutong Lin, Zhuliang Yao, Zheng Zhang, Qi Dai, Yue Cao, and Han Hu. Self-supervised learning with swim transformers. arXiv preprint arXiv:2105.04553, 2021. 6 +[60] Jinyu Yang, Jiali Duan, Son Tran, Yi Xu, Sampath Chanda, Liquun Chen, Belinda Zeng, Trishul Chilimbi, and Junzhou Huang. Vision-language pre-training with triple contrastive learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15671-15680, 2022. 3 +[61] Jianwei Yang, Chunyuan Li, Pengchuan Zhang, Bin Xiao, Ce Liu, Lu Yuan, and Jianfeng Gao. Unified contrastive learning in image-text-label space. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 19163-19173, June 2022. 3, 6, 7 +[62] Lewei Yao, Runhui Huang, Lu Hou, Guansong Lu, Minzhe Niu, Hang Xu, Xiaodan Liang, Zhenguo Li, Xin Jiang, and Chunjing Xu. FILIP: Fine-grained interactive language-image pre-training. In International Conference on Learning Representations, 2022. 3 +[63] Minghao Yin, Zhuliang Yao, Yue Cao, Xiu Li, Zheng Zhang, Stephen Lin, and Han Hu. Disentangled non-local neural networks. In Proceedings of the European conference on computer vision (ECCV), 2020. 3 +[64] Peter Young, Alice Lai, Micah Hodosh, and Julia Hockenmaier. From image descriptions to visual denotations: New similarity metrics for semantic inference over event descriptions. Transactions of the Association for Computational Linguistics, 2:67-78, 2014. 6, 7 +[65] Jiahui Yu, Zirui Wang, Vijay Vasudevan, Legg Yeung, Mojtaba Seyedhosseini, and Yonghui Wu. Coca: Contrastive captioners are image-text foundation models. Transactions on Machine Learning Research, 2022. 3 +[66] Xiaohua Zhai, Xiao Wang, Basil Mustafa, Andreas Steiner, Daniel Keysers, Alexander Kolesnikov, and Lucas Beyer. + +Lit: Zero-shot transfer with locked-image text tuning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18123-18133, 2022. 3 +[67] Yuhao Zhang, Hang Jiang, Yasuhide Miura, Christopher D Manning, and Curtis P Langlotz. Contrastive learning of medical visual representations from paired images and text. arXiv preprint arXiv:2010.00747, 2020. 3 +[68] Bolei Zhou, Hang Zhao, Xavier Puig, Tete Xiao, Sanja Fidler, Adela Barriuso, and Antonio Torralba. Semantic understanding of scenes through the ade20k dataset. International Journal on Computer Vision, 2018. 2, 3, 6 \ No newline at end of file diff --git a/2023/iCLIP_ Bridging Image Classification and Contrastive Language-Image Pre-Training for Visual Recognition/images.zip b/2023/iCLIP_ Bridging Image Classification and Contrastive Language-Image Pre-Training for Visual Recognition/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..cc05ee9ad3c59670c3851aad9d5df3b97a9906c0 --- /dev/null +++ b/2023/iCLIP_ Bridging Image Classification and Contrastive Language-Image Pre-Training for Visual Recognition/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f0442b0d62d5b88d5d164d8feb8f3f038fa5630b43e6441c8664a7c59eabc681 +size 325015 diff --git a/2023/iCLIP_ Bridging Image Classification and Contrastive Language-Image Pre-Training for Visual Recognition/layout.json b/2023/iCLIP_ Bridging Image Classification and Contrastive Language-Image Pre-Training for Visual Recognition/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..e13c11b777288b949c65b3457c941227fc7c6fe9 --- /dev/null +++ b/2023/iCLIP_ Bridging Image Classification and Contrastive Language-Image Pre-Training for Visual Recognition/layout.json @@ -0,0 +1,9268 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 77, + 102, + 516, + 139 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 102, + 516, + 139 + ], + "spans": [ + { + "bbox": [ + 77, + 102, + 516, + 139 + ], + "type": "text", + "content": "iCLIP: Bridging Image Classification and Contrastive Language-Image Pre-training for Visual Recognition" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 78, + 160, + 515, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 160, + 515, + 204 + ], + "spans": [ + { + "bbox": [ + 78, + 160, + 515, + 204 + ], + "type": "text", + "content": "Yixuan Wei" + }, + { + "bbox": [ + 78, + 160, + 515, + 204 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 78, + 160, + 515, + 204 + ], + "type": "text", + "content": ", Yue Cao" + }, + { + "bbox": [ + 78, + 160, + 515, + 204 + ], + "type": "inline_equation", + "content": "^{2*}" + }, + { + "bbox": [ + 78, + 160, + 515, + 204 + ], + "type": "text", + "content": ", Zheng Zhang" + }, + { + "bbox": [ + 78, + 160, + 515, + 204 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 78, + 160, + 515, + 204 + ], + "type": "text", + "content": ", Houwen Peng" + }, + { + "bbox": [ + 78, + 160, + 515, + 204 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 78, + 160, + 515, + 204 + ], + "type": "text", + "content": ", Zhuliang Yao" + }, + { + "bbox": [ + 78, + 160, + 515, + 204 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 78, + 160, + 515, + 204 + ], + "type": "text", + "content": ", Zhenda Xie" + }, + { + "bbox": [ + 78, + 160, + 515, + 204 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 78, + 160, + 515, + 204 + ], + "type": "text", + "content": ", Han Hu" + }, + { + "bbox": [ + 78, + 160, + 515, + 204 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 78, + 160, + 515, + 204 + ], + "type": "text", + "content": ", Baining Guo" + }, + { + "bbox": [ + 78, + 160, + 515, + 204 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 78, + 160, + 515, + 204 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 78, + 160, + 515, + 204 + ], + "type": "text", + "content": "Tsinghua University " + }, + { + "bbox": [ + 78, + 160, + 515, + 204 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 78, + 160, + 515, + 204 + ], + "type": "text", + "content": "Microsoft Research Asia" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 143, + 231, + 192, + 243 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 231, + 192, + 243 + ], + "spans": [ + { + "bbox": [ + 143, + 231, + 192, + 243 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 256, + 290, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 256, + 290, + 509 + ], + "spans": [ + { + "bbox": [ + 47, + 256, + 290, + 509 + ], + "type": "text", + "content": "This paper presents a method that effectively combines two prevalent visual recognition methods, i.e., image classification and contrastive language-image pre-training, dubbed iCLIP. Instead of naïve multi-task learning that use two separate heads for each task, we fuse the two tasks in a deep fashion that adapts the image classification to share the same formula and the same model weights with the language-image pre-training. To further bridge these two tasks, we propose to enhance the category names in image classification tasks using external knowledge, such as their descriptions in dictionaries. Extensive experiments show that the proposed method combines the advantages of two tasks well: the strong discrimination ability in image classification tasks due to the clean category labels, and the good zero-shot ability in CLIP tasks ascribed to the richer semantics in the text descriptions. In particular, it reaches " + }, + { + "bbox": [ + 47, + 256, + 290, + 509 + ], + "type": "inline_equation", + "content": "82.9\\%" + }, + { + "bbox": [ + 47, + 256, + 290, + 509 + ], + "type": "text", + "content": " top-1 accuracy on IN-1K, and meanwhile surpasses CLIP by " + }, + { + "bbox": [ + 47, + 256, + 290, + 509 + ], + "type": "inline_equation", + "content": "1.8\\%" + }, + { + "bbox": [ + 47, + 256, + 290, + 509 + ], + "type": "text", + "content": ", with similar model size, on zero-shot recognition of Kornblith 12-dataset benchmark. The code and models are publicly available at https://github.com/weiyx16/iCLIP." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 532, + 128, + 544 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 532, + 128, + 544 + ], + "spans": [ + { + "bbox": [ + 47, + 532, + 128, + 544 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 552, + 287, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 552, + 287, + 684 + ], + "spans": [ + { + "bbox": [ + 46, + 552, + 287, + 684 + ], + "type": "text", + "content": "Image classification is a classic visual problem whose goal is to classify images into a fixed set of pre-defined categories. For example, the widely used ImageNet dataset [8] carefully annotated 14 million images and categorize them into 21,841 categories chosen from the WordNet [36]. For image classification, each category provides a clear taxonomy that groups images of the same category together and separates images from different categories, and thus endows the learnt representation with strong discriminant ability. However, this classification ability is limited to a fixed set of categories [8, 29, 51]." + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 337, + 233, + 508, + 376 + ], + "blocks": [ + { + "bbox": [ + 337, + 233, + 508, + 376 + ], + "lines": [ + { + "bbox": [ + 337, + 233, + 508, + 376 + ], + "spans": [ + { + "bbox": [ + 337, + 233, + 508, + 376 + ], + "type": "image", + "image_path": "af0f5f82fdbfa99cb6fc29b27f21da79ad9876dd462996b500e6c87e28d447a9.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 385, + 547, + 506 + ], + "lines": [ + { + "bbox": [ + 304, + 385, + 547, + 506 + ], + "spans": [ + { + "bbox": [ + 304, + 385, + 547, + 506 + ], + "type": "text", + "content": "Figure 1. An illustration of the proposed iCLIP framework. The iCLIP framework can take two types of annotations for training: classes and alt-texts. It converts the conventional image classification formula to share the same text encoder and the same cosine classifier as that used in the contrastive language-image pretraining (CLIP). It also uses a dictionary-enhanced approach to enrich the original class names in the image classification problem with external information involved in dictionaries. The deep fusion and knowledge-enriched classes both greatly improve the performance compared to naive multi-task learning or performing one of the two tasks alone." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 522, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 522, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 522, + 547, + 715 + ], + "type": "text", + "content": "Recently, the method that learns to contrast image-text pairs, known as contrastive language-image pre-training (abbr. CLIP), has well made up such shortage of the conventional image classification methods to achieve strong zero-shot recognition ability [24, 44]. These methods employ a contrastive learning framework, where images and their corresponding alt-texts are treated as positive pairs, while images with all other alt-texts are treated as negative pairs. Thanks to the rich semantics involved in the alt-texts, the images can be weakly connected to almost arbitrary categories that already appear in the alt-texts, resulting in its zero-shot ability. A drawback is that the image-text pairs are usually crawled from the internet without human labeling, leading to their noisy and ambiguous nature. Thus the learnt representations are often not conceptual compact, and may lack certain discriminative ability." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 693, + 287, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 693, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 47, + 693, + 287, + 712 + ], + "type": "text", + "content": "*Corresponding Author. The work is done when Yixuan Wei, Zhuliang Yao, and Zhenda Xie are interns at Microsoft Research Asia." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2776" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 228 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 228 + ], + "type": "text", + "content": "This paper explores how to effectively combine these two powerful visual recognition and representation learning methods, to take advantages of both methods and data sources while relieving their shortages. We first try a naive multi-task learning framework that applies the original head networks of the two tasks on top of a shared visual encoder, and jointly learn the network with separate losses of the two tasks. This naive multi-task learning approach has been able to benefit each individual tasks, but the effect is marginal. We thus seek to fuse the two tasks more deeply, so that the advantages of the two tasks can be more effectively joined for better visual recognition, as well as for better transferable representations." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 229, + 289, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 229, + 289, + 434 + ], + "spans": [ + { + "bbox": [ + 46, + 229, + 289, + 434 + ], + "type": "text", + "content": "To this end, our first technique is to deeply unify the formulations of image classification and CLIP learning. By examining their formulations, we found there are two main differences: 1) Different classification losses. Image classification tasks typically use a linear classification loss which has better fitting ability due to the non-normalized nature, while the CLIP-based methods adopt a cosine classifier which has better transferability for new domains and categories [2, 6, 9, 18, 38, 57]. 2) Different parameterization methods for classifier weights. Image classification tasks usually directly optimize the parametric classification weights without a need to process text semantics in class names. The CLIP method can be regarded as generating classifier weights through a text encoder and learns the text encoder instead. The text-encoder-based classifier allows sharing between alt-texts as well as modeling their relationships, which enables the ability to tackle any classes." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 436, + 289, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 436, + 289, + 604 + ], + "spans": [ + { + "bbox": [ + 46, + 436, + 289, + 604 + ], + "type": "text", + "content": "Although the linear classifier and direct classifier weight parameterization have been common practice in image classification for many years, it is interesting to find that changing the old formulation as that in the CLIP approach has almost no performance degradation for pure image classification problems. This indicates that we can directly adapt the image classification formulation to the cosine classifier and the text encoder parameterization used by CLIP, with almost no loss. This also allows us to further share the text encoder for both class names and alt-texts. Our experiments show that this deep fusion approach performs much better than the naive multi-task method for both in-domain/zero-shot classification and multi-modal retrieval tasks learning (see 3)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 605, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 605, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 605, + 289, + 715 + ], + "type": "text", + "content": "Another gap between the image classification and CLIP lies in the different text richness. Class names are usually in short, i.e., one or a few words, and sometimes are even ambiguous and polysemous in referring to specific semantics, for example, \"night bird\" can represents either \"owl\" or \"nightingale\". On the contrary, alt-texts in CLIP are usually full sentences containing rich information. To further bridge the gap between the image classification and CLIP, we propose a second technique that leverages the knowledge" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 304, + 72, + 545, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 191 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 191 + ], + "type": "text", + "content": "base to enhance the original class names, such as the explanations in dictionaries. In our implementation, knowledge is simply encoded as a prefix/suffix prompt, as illustrated in Fig 1. Although simple, dictionary enhanced method shows to maintain the accuracy for pure image classification problem (see Table 1), while greatly improve the zero-shot and multi-modal retrieval performance as shown in Table 2 and 3. Note the process is just like human beings who learn new words or concepts through both real examples and explanations in dictionaries." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 192, + 546, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 192, + 546, + 407 + ], + "spans": [ + { + "bbox": [ + 304, + 192, + 546, + 407 + ], + "type": "text", + "content": "By these techniques, we present a framework that deeply fuses the two important tasks of image classification and contrastive language-image pre-training, dubbed iCLIP. Extensive experiments using different combinations of image classification and image-text pair datasets show that the iCLIP method can take advantages of both the discriminative power of image classification tasks and the zero-shot ability in CLIP-like tasks, and perform significantly better than conducting each task alone or the naive multi-task learning in both the in-domain/zero-shot classification and multi-modal retrieval problems. The iCLIP method also shows that learning a stronger transferable representation than using each of the two tasks alone, verified on a variety of downstream tasks, including ADE20K semantic segmentation [68], LVIS long-tail detection [17], and video action recognition [26], as well as different evaluation settings of few-shot and fine-tuning. Our contributions are summarized as follows:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 317, + 415, + 545, + 635 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 317, + 415, + 545, + 451 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 415, + 545, + 451 + ], + "spans": [ + { + "bbox": [ + 317, + 415, + 545, + 451 + ], + "type": "text", + "content": "- We combined two important vision tasks of image classification and contrastive language-image pretraining into a single framework." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 317, + 459, + 545, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 459, + 545, + 542 + ], + "spans": [ + { + "bbox": [ + 317, + 459, + 545, + 542 + ], + "type": "text", + "content": "- We found that the original image classification formulation can be adapted to CLIP approach with almost no performance degradation. With this finding, we present a deep fusion approach in which the two tasks share the same text encoder and the same classifier type, whose effectiveness is extensively verified on benchmarks." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 317, + 551, + 545, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 551, + 545, + 635 + ], + "spans": [ + { + "bbox": [ + 317, + 551, + 545, + 635 + ], + "type": "text", + "content": "- We proposed a simple yet effective method to introduce knowledge bases into image classification, addressing the ambiguous and polysemous issue of the originally short image names as well as further bridges the gap between classes and alt-texts. It also provides the first showcase of applying knowledge bases into computer vision problems." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 645, + 392, + 658 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 645, + 392, + 658 + ], + "spans": [ + { + "bbox": [ + 306, + 645, + 392, + 658 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "content": "Supervised visual classification. Classification is almost ubiquitous for visual understanding tasks of various recognition granularity, e.g., image-level classification [12, 20, 28, 33, 49, 52, 58], object-level classification in" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "2777" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 204 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 204 + ], + "type": "text", + "content": "object detection [3, 15, 19, 45], pixel-level classification in semantic/instance segmentation [5, 35, 63], and video-level action classification [4, 13, 34, 43, 54]. In these tasks, the data is manually annotated to a fixed set of classes, e.g., the 1,000-class ImageNet-1K dataset [8], the 80-class COCO detection dataset [31], the 150-class ADE20K segmentation dataset [68], etc. Among these classification tasks, the image-level classification is particularly important, which has greatly advances the success of deep learning in computer vision, thanks to its high quality and transferable discriminative representations." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 205, + 288, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 205, + 288, + 277 + ], + "spans": [ + { + "bbox": [ + 46, + 205, + 288, + 277 + ], + "type": "text", + "content": "The supervised visual classification is generally performed as a " + }, + { + "bbox": [ + 46, + 205, + 288, + 277 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 46, + 205, + 288, + 277 + ], + "type": "text", + "content": "-way classification problem without considering the text semantics of the class names. The most common classifier is the linear classifier, where the classifier vector of each category is parameterized as model weights and is directly learnt through optimization [28]." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 278, + 289, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 278, + 289, + 422 + ], + "spans": [ + { + "bbox": [ + 46, + 278, + 289, + 422 + ], + "type": "text", + "content": "Contrastive language-image pre-training. Pioneered by CLIP [44] and Align [24], the contrastive language-image pre-training is now attracting more and more attention due to its strong zero-shot transfer capacity. These methods learn a network to pair an image and its associated alt-text, in which the image-text pairs are crawled from the Internet. With web-scale alt-text, it is possible to cover almost all classes, and these methods do show to perform very well for zero-shot recognition. In their frameworks, the images and texts are embedded using two separate encoders, and the output representations of the images and alt-texts are contrasted according to the positive and negative pairs." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 423, + 289, + 532 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 423, + 289, + 532 + ], + "spans": [ + { + "bbox": [ + 46, + 423, + 289, + 532 + ], + "type": "text", + "content": "While prior to CLIP and Align, there have been a few early works leveraging alt-text or text encoders for image recognition [10,14,16,25,41,46,67]. More follow-up works appeared after CLIP and Align, including Filip [62], DeClip [30], BASIC [42], LiT [66], LiMoE [39], TCL [60], and so on. A drawback of these method is that the image-text pairs are usually noisy without human labeling, leading to the learned representations are not conceptual compact, lacking strong discrimination ability." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 533, + 289, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 533, + 289, + 640 + ], + "spans": [ + { + "bbox": [ + 46, + 533, + 289, + 640 + ], + "type": "text", + "content": "Introducing knowledge into AI systems. Our approach is also related to the expert systems in 1980s which heavily rely on a knowledge base for reasoning [23]. Recently, in natural language process, there also emerges boosting large-scale pretrained models by making use of encyclopedic [1,55] and commonsense knowledge [50]. However, in computer vision, the knowledge bases is not well explored. We hope our findings can encourage more attention to incorporate human knowledge into current vision systems." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 642, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 642, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 642, + 289, + 715 + ], + "type": "text", + "content": "Combination of representation learning. Regarding individual strengths of different representation learning approaches, there have been several works trying to combine different representation learning approaches so as to take advantages of individuals' strength. For example, SLIP [37] combines CLIP learning with a self-supervised contrastive" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 72, + 547, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 547, + 167 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 547, + 167 + ], + "type": "text", + "content": "learning approach. CoCa [65] combines the CLIP target with an image caption task, in hope to perform well for both understanding and generation problems. MaskCLIP [11] combines CLIP with masked image modeling based self-supervised learning. In contrast, our work also aims to effectively combine different representation learning approaches so as to take both advantages, specifically, the image classification and CLIP." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 167, + 547, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 167, + 547, + 396 + ], + "spans": [ + { + "bbox": [ + 304, + 167, + 547, + 396 + ], + "type": "text", + "content": "Relationship to UniCL [61] Concurrent to our work, there is another work named UniCL [61] which also combines image classification with language-image pretraining. We hope the consistent knowledge will help the community in learning more powerful representations. Also note that there are two main differences comparing our framework to the UniCL framework [61]: 1) We involve all negative classifiers in training the supervised classification, while UniCL only involve negatives in a same batch. To make feasible all negative classifiers, we propose a GPU-distributed implementation that distributes the classifiers evenly into different GPUs. Our implementations show to have better in-domain accuracy compared to UniCL when the category number is as large as tens of thousands (76.3% vs. 70.5% as shown in Tab. 4). 2) We introduce a new dictionary enhanced approach to convert the class names with rich semantical text, which shows to be very beneficial for zero-shot image classification and multi-modal retrieval (see Tab. 2)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 404, + 362, + 417 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 404, + 362, + 417 + ], + "spans": [ + { + "bbox": [ + 306, + 404, + 362, + 417 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 425, + 547, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 425, + 547, + 498 + ], + "spans": [ + { + "bbox": [ + 304, + 425, + 547, + 498 + ], + "type": "text", + "content": "In this section, we first review existing methods on image classification and contrastive language-image pre-training tasks. Then, we propose a unified framework to bridge the two tasks in a deep fusion fashion. Finally, we introduce dictionary-enhanced category descriptions to further align the two taks on input label space." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 503, + 392, + 514 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 503, + 392, + 514 + ], + "spans": [ + { + "bbox": [ + 306, + 503, + 392, + 514 + ], + "type": "text", + "content": "3.1. Preliminaries" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 521, + 547, + 679 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 521, + 547, + 679 + ], + "spans": [ + { + "bbox": [ + 304, + 521, + 547, + 679 + ], + "type": "text", + "content": "Image Classification. Given a set of " + }, + { + "bbox": [ + 304, + 521, + 547, + 679 + ], + "type": "inline_equation", + "content": "<" + }, + { + "bbox": [ + 304, + 521, + 547, + 679 + ], + "type": "text", + "content": " image, category label> pairs, i.e., " + }, + { + "bbox": [ + 304, + 521, + 547, + 679 + ], + "type": "inline_equation", + "content": "\\mathcal{D}^c = \\{(I_i,C_i)\\}_{i=1}^{|\\mathcal{D}^c|}" + }, + { + "bbox": [ + 304, + 521, + 547, + 679 + ], + "type": "text", + "content": ", image classification task targets to predict the category label of a given image, through a visual encoder " + }, + { + "bbox": [ + 304, + 521, + 547, + 679 + ], + "type": "inline_equation", + "content": "f_v" + }, + { + "bbox": [ + 304, + 521, + 547, + 679 + ], + "type": "text", + "content": ", and a parametric category classifier " + }, + { + "bbox": [ + 304, + 521, + 547, + 679 + ], + "type": "inline_equation", + "content": "h_c" + }, + { + "bbox": [ + 304, + 521, + 547, + 679 + ], + "type": "text", + "content": ", illustrated in Fig. 2 (b). The parameters of " + }, + { + "bbox": [ + 304, + 521, + 547, + 679 + ], + "type": "inline_equation", + "content": "h_c" + }, + { + "bbox": [ + 304, + 521, + 547, + 679 + ], + "type": "text", + "content": " is a matrix " + }, + { + "bbox": [ + 304, + 521, + 547, + 679 + ], + "type": "inline_equation", + "content": "W \\in \\mathcal{R}^{N \\times H}" + }, + { + "bbox": [ + 304, + 521, + 547, + 679 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 521, + 547, + 679 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 304, + 521, + 547, + 679 + ], + "type": "text", + "content": " is the number of categories and " + }, + { + "bbox": [ + 304, + 521, + 547, + 679 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 304, + 521, + 547, + 679 + ], + "type": "text", + "content": " is the dimension of visual embeddings. The visual encoder " + }, + { + "bbox": [ + 304, + 521, + 547, + 679 + ], + "type": "inline_equation", + "content": "f_v" + }, + { + "bbox": [ + 304, + 521, + 547, + 679 + ], + "type": "text", + "content": " transforms each raw image " + }, + { + "bbox": [ + 304, + 521, + 547, + 679 + ], + "type": "inline_equation", + "content": "I_i" + }, + { + "bbox": [ + 304, + 521, + 547, + 679 + ], + "type": "text", + "content": " to an embedding " + }, + { + "bbox": [ + 304, + 521, + 547, + 679 + ], + "type": "inline_equation", + "content": "v_i = f_v(I_i)" + }, + { + "bbox": [ + 304, + 521, + 547, + 679 + ], + "type": "text", + "content": ", while the classifier " + }, + { + "bbox": [ + 304, + 521, + 547, + 679 + ], + "type": "inline_equation", + "content": "h_c" + }, + { + "bbox": [ + 304, + 521, + 547, + 679 + ], + "type": "text", + "content": " predicts the distribution " + }, + { + "bbox": [ + 304, + 521, + 547, + 679 + ], + "type": "inline_equation", + "content": "P_i \\in \\mathcal{R}^N" + }, + { + "bbox": [ + 304, + 521, + 547, + 679 + ], + "type": "text", + "content": " over all pre-defined categories via an inner product between " + }, + { + "bbox": [ + 304, + 521, + 547, + 679 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 304, + 521, + 547, + 679 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 521, + 547, + 679 + ], + "type": "inline_equation", + "content": "v_i" + }, + { + "bbox": [ + 304, + 521, + 547, + 679 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 304, + 521, + 547, + 679 + ], + "type": "inline_equation", + "content": "P_i = W \\cdot v_i" + }, + { + "bbox": [ + 304, + 521, + 547, + 679 + ], + "type": "text", + "content": " (bias term is omitted for simplicity). Finally, a cross entropy is applied on " + }, + { + "bbox": [ + 304, + 521, + 547, + 679 + ], + "type": "inline_equation", + "content": "P_i" + }, + { + "bbox": [ + 304, + 521, + 547, + 679 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 521, + 547, + 679 + ], + "type": "inline_equation", + "content": "C_i" + }, + { + "bbox": [ + 304, + 521, + 547, + 679 + ], + "type": "text", + "content": " to calculate training loss, which is formulated as:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 332, + 685, + 547, + 717 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 332, + 685, + 547, + 717 + ], + "spans": [ + { + "bbox": [ + 332, + 685, + 547, + 717 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\frac {- 1}{| \\mathcal {D} ^ {c} |} \\sum_ {(I _ {i}, C _ {i}) \\in \\mathcal {D} ^ {c}} \\log \\frac {\\exp (W _ {C _ {i}} \\cdot v _ {i})}{\\sum_ {j = 1} ^ {N} \\exp (W _ {j} \\cdot v _ {i})}, \\qquad (1)", + "image_path": "6e01d5eed7963a6819e1be535971cb5ca0a93cd19e4d41c4fa2afadde7c286c0.jpg" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "2778" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 72, + 62, + 512, + 194 + ], + "blocks": [ + { + "bbox": [ + 72, + 62, + 512, + 194 + ], + "lines": [ + { + "bbox": [ + 72, + 62, + 512, + 194 + ], + "spans": [ + { + "bbox": [ + 72, + 62, + 512, + 194 + ], + "type": "image", + "image_path": "c8774f0ac673c5836a497e48d2b1456d23aac8254209126679141c31ed7b6276.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 198, + 547, + 243 + ], + "lines": [ + { + "bbox": [ + 45, + 198, + 547, + 243 + ], + "spans": [ + { + "bbox": [ + 45, + 198, + 547, + 243 + ], + "type": "text", + "content": "Figure 2. An illustration of iCLIP framework. " + }, + { + "bbox": [ + 45, + 198, + 547, + 243 + ], + "type": "inline_equation", + "content": "\\mathcal{B}" + }, + { + "bbox": [ + 45, + 198, + 547, + 243 + ], + "type": "text", + "content": " is the batch size, " + }, + { + "bbox": [ + 45, + 198, + 547, + 243 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 45, + 198, + 547, + 243 + ], + "type": "text", + "content": " is the number of categories and " + }, + { + "bbox": [ + 45, + 198, + 547, + 243 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 45, + 198, + 547, + 243 + ], + "type": "text", + "content": " is the number of gpus. iCLIP unifies both contrastive language-image pre-training and classification tasks with shared text and visual encoder, taking alt-texts or dictionary enhanced class names as annotations. To reduce the computation, iCLIP distributes the enhanced class names over all gpus in forward, and gathers the embeddings for similarity calculation." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 255, + 257, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 255, + 257, + 266 + ], + "spans": [ + { + "bbox": [ + 46, + 255, + 257, + 266 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 255, + 257, + 266 + ], + "type": "inline_equation", + "content": "W_{j}" + }, + { + "bbox": [ + 46, + 255, + 257, + 266 + ], + "type": "text", + "content": " is the parametric weight of " + }, + { + "bbox": [ + 46, + 255, + 257, + 266 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 46, + 255, + 257, + 266 + ], + "type": "text", + "content": "-th category." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 266, + 287, + 375 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 266, + 287, + 375 + ], + "spans": [ + { + "bbox": [ + 46, + 266, + 287, + 375 + ], + "type": "text", + "content": "Contrastive Language-Image Pre-training. Given a set of " + }, + { + "bbox": [ + 46, + 266, + 287, + 375 + ], + "type": "inline_equation", + "content": "<" + }, + { + "bbox": [ + 46, + 266, + 287, + 375 + ], + "type": "text", + "content": " image, alt-text> pairs, i.e., " + }, + { + "bbox": [ + 46, + 266, + 287, + 375 + ], + "type": "inline_equation", + "content": "\\mathcal{D}^a = \\{(I_i,T_i^a)\\}_{i=1}^{|\\mathcal{D}^a|}" + }, + { + "bbox": [ + 46, + 266, + 287, + 375 + ], + "type": "text", + "content": ", contrastive language-image pre-training targets to close the distances between paired image and text while enlarging those of unpaired ones, through a visual encoder " + }, + { + "bbox": [ + 46, + 266, + 287, + 375 + ], + "type": "inline_equation", + "content": "f_v" + }, + { + "bbox": [ + 46, + 266, + 287, + 375 + ], + "type": "text", + "content": " and a text encoder " + }, + { + "bbox": [ + 46, + 266, + 287, + 375 + ], + "type": "inline_equation", + "content": "f_t" + }, + { + "bbox": [ + 46, + 266, + 287, + 375 + ], + "type": "text", + "content": ", shown in Fig. 2 (a). They transform the image " + }, + { + "bbox": [ + 46, + 266, + 287, + 375 + ], + "type": "inline_equation", + "content": "I_i" + }, + { + "bbox": [ + 46, + 266, + 287, + 375 + ], + "type": "text", + "content": " and the alt-text " + }, + { + "bbox": [ + 46, + 266, + 287, + 375 + ], + "type": "inline_equation", + "content": "T_i^a" + }, + { + "bbox": [ + 46, + 266, + 287, + 375 + ], + "type": "text", + "content": " to feature embeddings " + }, + { + "bbox": [ + 46, + 266, + 287, + 375 + ], + "type": "inline_equation", + "content": "v_i" + }, + { + "bbox": [ + 46, + 266, + 287, + 375 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 266, + 287, + 375 + ], + "type": "inline_equation", + "content": "s_i" + }, + { + "bbox": [ + 46, + 266, + 287, + 375 + ], + "type": "text", + "content": ", respectively. A contrastive loss function is applied to shrink the cosine distance of " + }, + { + "bbox": [ + 46, + 266, + 287, + 375 + ], + "type": "inline_equation", + "content": "v_i" + }, + { + "bbox": [ + 46, + 266, + 287, + 375 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 266, + 287, + 375 + ], + "type": "inline_equation", + "content": "s_i" + }, + { + "bbox": [ + 46, + 266, + 287, + 375 + ], + "type": "text", + "content": ", which is defined as:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 381, + 287, + 416 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 381, + 287, + 416 + ], + "spans": [ + { + "bbox": [ + 55, + 381, + 287, + 416 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\frac {- 1}{| \\mathcal {D} ^ {a} |} \\sum_ {\\substack {(I _ {i}, T _ {i} ^ {a}) \\\\ \\in \\mathcal {D} ^ {a}}} \\log \\frac {\\exp \\left(\\cos \\left(f _ {t} \\left(T _ {i} ^ {a}\\right) , v _ {i}\\right) / \\tau\\right)}{\\sum_ {T _ {j} ^ {a} \\in \\mathcal {T} ^ {a}} \\exp \\left(\\cos \\left(f _ {t} \\left(T _ {j} ^ {a}\\right) , v _ {i}\\right) / \\tau\\right)}, \\tag{2}", + "image_path": "212c26ee97638be79b29f1927c45810a8cfa7832370ca18122bc9b6d3a89493b.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 424, + 287, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 424, + 287, + 472 + ], + "spans": [ + { + "bbox": [ + 46, + 424, + 287, + 472 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 424, + 287, + 472 + ], + "type": "inline_equation", + "content": "\\cos (\\cdot ,\\cdot)" + }, + { + "bbox": [ + 46, + 424, + 287, + 472 + ], + "type": "text", + "content": " represents the cosine similarity between two embeddings, " + }, + { + "bbox": [ + 46, + 424, + 287, + 472 + ], + "type": "inline_equation", + "content": "\\mathcal{T}^a" + }, + { + "bbox": [ + 46, + 424, + 287, + 472 + ], + "type": "text", + "content": " is all the alt-texts in a batch including one positive paired alt-text and " + }, + { + "bbox": [ + 46, + 424, + 287, + 472 + ], + "type": "inline_equation", + "content": "|\\mathcal{T}^a| - 1" + }, + { + "bbox": [ + 46, + 424, + 287, + 472 + ], + "type": "text", + "content": " negative ones, and " + }, + { + "bbox": [ + 46, + 424, + 287, + 472 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 46, + 424, + 287, + 472 + ], + "type": "text", + "content": " is a temperature hyper-parameter to scale the similarities." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 45, + 472, + 288, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 472, + 288, + 604 + ], + "spans": [ + { + "bbox": [ + 45, + 472, + 288, + 604 + ], + "type": "text", + "content": "Task differences. Comparing the formations of image classification and language-image pre-training, we can draw three main differences between them. 1) Training loss functions. Classification commonly adopts a cross-entropy loss on inner-product similarity, while image-text learning uses InfoNCE loss on cosine similarity. 2) Classifier types. Classification adopts a parametric category classifier, while image-text learning uses a text encoder. 3) Label granularity. Category names in classification are usually very short, i.e., one or few words, while the captions in image-text pretraining are full sentences containing rich semantics." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 611, + 287, + 636 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 611, + 287, + 636 + ], + "spans": [ + { + "bbox": [ + 47, + 611, + 287, + 636 + ], + "type": "text", + "content": "3.2. Bridge Image Classification and Contrastive Language-Image Pre-training" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 641, + 287, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 641, + 287, + 689 + ], + "spans": [ + { + "bbox": [ + 46, + 641, + 287, + 689 + ], + "type": "text", + "content": "To bridge image classification and image-text alignment, we introduce three adaptations to align their training losses, unify the classifier types, and close the label granularity gap. The overall adaption is visualized in Fig. 3." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 689, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 689, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 689, + 287, + 714 + ], + "type": "text", + "content": "Classification with Text Encoder. As formulated in Eq. (1), image classification commonly adopts a cross-" + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 310, + 251, + 547, + 371 + ], + "blocks": [ + { + "bbox": [ + 310, + 251, + 547, + 371 + ], + "lines": [ + { + "bbox": [ + 310, + 251, + 547, + 371 + ], + "spans": [ + { + "bbox": [ + 310, + 251, + 547, + 371 + ], + "type": "image", + "image_path": "c2bed6768d0cd53928f163cd4f3c903942f1447b74e996c37a773bcc413d3570.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 378, + 546, + 434 + ], + "lines": [ + { + "bbox": [ + 304, + 378, + 546, + 434 + ], + "spans": [ + { + "bbox": [ + 304, + 378, + 546, + 434 + ], + "type": "text", + "content": "Figure 3. An illustration of our approach to bring image classification (a) to CLIP (b), from the perspective of loss function, classifier types and label granularity. We reformulate the linear classifier (a.1) with a text-encoder-based classifier (a.2), and enhance the class names with a text description from the dictionary (a.3)." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 445, + 546, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 445, + 546, + 540 + ], + "spans": [ + { + "bbox": [ + 304, + 445, + 546, + 540 + ], + "type": "text", + "content": "entropy loss on top of the inner-product similarity between the visual embedding " + }, + { + "bbox": [ + 304, + 445, + 546, + 540 + ], + "type": "inline_equation", + "content": "v_{i}" + }, + { + "bbox": [ + 304, + 445, + 546, + 540 + ], + "type": "text", + "content": " and the parametric classifier " + }, + { + "bbox": [ + 304, + 445, + 546, + 540 + ], + "type": "inline_equation", + "content": "h_c" + }, + { + "bbox": [ + 304, + 445, + 546, + 540 + ], + "type": "text", + "content": ". This formulation is not in line with the InfoNCE loss in Eq. (2), leading to a misalignment between the two paradigms. To address this issue, we adopt a cosine similarity for image classification, instead of the original inner-product similarity in Eq. (1), which formulates a cosine classifier as:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 318, + 545, + 545, + 574 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 545, + 545, + 574 + ], + "spans": [ + { + "bbox": [ + 318, + 545, + 545, + 574 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\frac {- 1}{| \\mathcal {D} ^ {c} |} \\sum_ {\\left(I _ {i}, C _ {i}\\right) \\in \\mathcal {D} ^ {c}} \\log \\frac {\\exp \\left(\\cos \\left(W _ {C _ {i}} , v _ {i}\\right) / \\tau\\right)}{\\sum_ {j = 1} ^ {N} \\exp \\left(\\cos \\left(W _ {j} , v _ {i}\\right) / \\tau\\right)}. \\tag {3}", + "image_path": "04d796ff132995805fdf6b99df27eba33f201e7b24bb8e89c231f3ffe8e38172.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 581, + 547, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 581, + 547, + 653 + ], + "spans": [ + { + "bbox": [ + 304, + 581, + 547, + 653 + ], + "type": "text", + "content": "Cosine similarity is a common practice in metric learning [40]. It can smoothly align the supervised image classification with the cross-modal contrastive pre-training in terms of learning objective function, i.e., Eq. (2). Moreover, our experiments demonstrate that this cosine classifier performs on par with the traditional linear classifier (see Tab. 1)." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 654, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 654, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 654, + 547, + 713 + ], + "type": "text", + "content": "The cosine classifier aligns the training losses of two tasks. However, the annotations, i.e., category labels and captions, are modeled separately by the parametric category classifier " + }, + { + "bbox": [ + 304, + 654, + 547, + 713 + ], + "type": "inline_equation", + "content": "h_c" + }, + { + "bbox": [ + 304, + 654, + 547, + 713 + ], + "type": "text", + "content": " and the text encoder " + }, + { + "bbox": [ + 304, + 654, + 547, + 713 + ], + "type": "inline_equation", + "content": "f_t" + }, + { + "bbox": [ + 304, + 654, + 547, + 713 + ], + "type": "text", + "content": ". As analyzed in Sec. 4.3, shallowly combining the two tasks with a shared" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2779" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "content": "visual encoder " + }, + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "inline_equation", + "content": "f_{v}" + }, + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "content": " and two separate task heads does not fully take advantage of the gold annotations in image classification and rich concepts in textual captions, resulting in a suboptimal solution with limited transferring capacity." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 120, + 288, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 120, + 288, + 204 + ], + "spans": [ + { + "bbox": [ + 46, + 120, + 288, + 204 + ], + "type": "text", + "content": "To tackle this issue, we take label semantics into consideration and propose to utilize the text encoder " + }, + { + "bbox": [ + 46, + 120, + 288, + 204 + ], + "type": "inline_equation", + "content": "f_{t}" + }, + { + "bbox": [ + 46, + 120, + 288, + 204 + ], + "type": "text", + "content": " as a meta classifier for image classification. Formally, we replace the label index " + }, + { + "bbox": [ + 46, + 120, + 288, + 204 + ], + "type": "inline_equation", + "content": "C_i" + }, + { + "bbox": [ + 46, + 120, + 288, + 204 + ], + "type": "text", + "content": " with its class name " + }, + { + "bbox": [ + 46, + 120, + 288, + 204 + ], + "type": "inline_equation", + "content": "M_{i}" + }, + { + "bbox": [ + 46, + 120, + 288, + 204 + ], + "type": "text", + "content": ", and generate the classifier weight " + }, + { + "bbox": [ + 46, + 120, + 288, + 204 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 46, + 120, + 288, + 204 + ], + "type": "text", + "content": " on-the-fly through the text encoder " + }, + { + "bbox": [ + 46, + 120, + 288, + 204 + ], + "type": "inline_equation", + "content": "f_{t}" + }, + { + "bbox": [ + 46, + 120, + 288, + 204 + ], + "type": "text", + "content": " which is shared with image-text pre-training. The new formulation is represented as:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 211, + 287, + 246 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 211, + 287, + 246 + ], + "spans": [ + { + "bbox": [ + 58, + 211, + 287, + 246 + ], + "type": "interline_equation", + "content": "\\mathcal{L} = \\frac{-1}{|\\mathcal{D}^{c}|}\\sum_{\\substack{(I_{i},M_{i})\\\\ \\in \\mathcal{D}^{c}}}\\log \\frac{\\exp\\left(\\cos\\left(f_{t}\\left(M_{i}\\right),v_{i}\\right) / \\tau\\right)}{\\sum_{j = 1}^{N}\\exp\\left(\\cos\\left(f_{t}\\left(M_{j}\\right),v_{i}\\right) / \\tau\\right)}. \\tag{4}", + "image_path": "05a1f956d0b47eb5826a6fa3a20298cca339934af01c4e9118d9c2f158e086aa.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 253, + 287, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 253, + 287, + 312 + ], + "spans": [ + { + "bbox": [ + 46, + 253, + 287, + 312 + ], + "type": "text", + "content": "In this way, the text encoder " + }, + { + "bbox": [ + 46, + 253, + 287, + 312 + ], + "type": "inline_equation", + "content": "f_{t}" + }, + { + "bbox": [ + 46, + 253, + 287, + 312 + ], + "type": "text", + "content": " is not only used to extract semantics from gold category labels, but also capture textual information from image captions. Both the visual and textual encoders are shared across the two tasks, leading to a deep fusion of the two tasks." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 314, + 287, + 503 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 314, + 287, + 503 + ], + "spans": [ + { + "bbox": [ + 46, + 314, + 287, + 503 + ], + "type": "text", + "content": "Classification with Dictionary Enhancement. The cosine classifier with text encoder as a meta network has largely unify the two tasks in model training. In this step, we further align them on input label granularity, reducing the disparity between label names (one or few words) and image captions (a complete sentence). Our proposal is to integrate external knowledge into label names. More specifically, for each label names, we introduce detailed descriptions from its corresponding synset in the dictionary WordNet [36] as the external knowledge and create a pseudo sentence as label for each categories. We combine the original class names and their dictionary descriptions to form the enhanced texts as the input to the text encoder. Also, we add a prompt to make the sentence more fluent. The final dictionary-enhanced description for each category is formed as:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 61, + 513, + 287, + 527 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 513, + 287, + 527 + ], + "spans": [ + { + "bbox": [ + 61, + 513, + 287, + 527 + ], + "type": "interline_equation", + "content": "\\mathcal {T} ^ {c} = \\mathrm {A p h o t o o f a} \\left\\{\\mathrm {N A M E} \\right\\} _ {C _ {i}}, \\left\\{\\mathrm {D E S C R I P T I O N} \\right\\} _ {C _ {i}}. \\tag {5}", + "image_path": "4da5ec1f99cec3d7f8a982552134d6634d84133ee904539cf80c91ed4ae5d52e.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 534, + 287, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 534, + 287, + 689 + ], + "spans": [ + { + "bbox": [ + 46, + 534, + 287, + 689 + ], + "type": "text", + "content": "Such dictionary-enhanced descriptions have similar label granularity to alt-text, and thus further bring image classification closer to image-text alignment. Moreover, the description introduces more details of each category, being capable of reducing potential misconception. For example, the class \"night bird\" actually includes several kinds of birds, like owl, nightingale, etc. Such a category name cannot allow the model to learn precise representations due to the blurry concepts. If we augment the category with more external knowledge, such as \"a photo of a night bird, any bird associated with night: owl, nightingale, nighthawk\", it will help the model learn discriminative representation on distinguishing different concepts (e.g., bird species)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "type": "text", + "content": "A Unified Framework. The above three steps adapt image classification to image-text alignment from the perspec" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 72, + 545, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 133 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 133 + ], + "type": "text", + "content": "tive of training loss, classifier type and annotation granularity, respectively. Towards the final unification, we propose a new framework dubbed iCLIP, as presented in Fig. 2 (c), which bridges Image Classification and Image-Text Alignment with a unified contrastive learning loss formulated as:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 317, + 146, + 545, + 178 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 146, + 545, + 178 + ], + "spans": [ + { + "bbox": [ + 317, + 146, + 545, + 178 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\frac {- 1}{| \\mathcal {D} |} \\sum_ {\\left(I _ {i}, T _ {i}\\right) \\in \\mathcal {D}} \\log \\frac {\\exp \\left(\\cos \\left(f _ {t} \\left(T _ {i}\\right) , v _ {i}\\right) / \\tau\\right)}{\\sum_ {T _ {j} \\in \\tau} \\exp \\left(\\cos \\left(f _ {t} \\left(T _ {j}\\right) , v _ {i}\\right) / \\tau\\right)}, \\tag {6}", + "image_path": "f9cac02c08dbce387807742f25f6fb6cf5e6a23d7115d4bef932696016eecb65.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 182, + 545, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 182, + 545, + 350 + ], + "spans": [ + { + "bbox": [ + 304, + 182, + 545, + 350 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 182, + 545, + 350 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 304, + 182, + 545, + 350 + ], + "type": "text", + "content": " is a set consisting of the image classification data " + }, + { + "bbox": [ + 304, + 182, + 545, + 350 + ], + "type": "inline_equation", + "content": "D^c" + }, + { + "bbox": [ + 304, + 182, + 545, + 350 + ], + "type": "text", + "content": " and the image-text alignment data " + }, + { + "bbox": [ + 304, + 182, + 545, + 350 + ], + "type": "inline_equation", + "content": "D^a" + }, + { + "bbox": [ + 304, + 182, + 545, + 350 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 304, + 182, + 545, + 350 + ], + "type": "inline_equation", + "content": "\\mathcal{D} = \\{\\mathcal{D}^c, D^a\\}" + }, + { + "bbox": [ + 304, + 182, + 545, + 350 + ], + "type": "text", + "content": ", while " + }, + { + "bbox": [ + 304, + 182, + 545, + 350 + ], + "type": "inline_equation", + "content": "\\mathcal{T}" + }, + { + "bbox": [ + 304, + 182, + 545, + 350 + ], + "type": "text", + "content": " indicates a combination of " + }, + { + "bbox": [ + 304, + 182, + 545, + 350 + ], + "type": "inline_equation", + "content": "T^c" + }, + { + "bbox": [ + 304, + 182, + 545, + 350 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 182, + 545, + 350 + ], + "type": "inline_equation", + "content": "T^a" + }, + { + "bbox": [ + 304, + 182, + 545, + 350 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 304, + 182, + 545, + 350 + ], + "type": "inline_equation", + "content": "\\mathcal{T} = \\{\\mathcal{T}^c, T^a\\}" + }, + { + "bbox": [ + 304, + 182, + 545, + 350 + ], + "type": "text", + "content": ". Text label " + }, + { + "bbox": [ + 304, + 182, + 545, + 350 + ], + "type": "inline_equation", + "content": "T_i" + }, + { + "bbox": [ + 304, + 182, + 545, + 350 + ], + "type": "text", + "content": " is either an image caption " + }, + { + "bbox": [ + 304, + 182, + 545, + 350 + ], + "type": "inline_equation", + "content": "T_i^a" + }, + { + "bbox": [ + 304, + 182, + 545, + 350 + ], + "type": "text", + "content": " sampled from " + }, + { + "bbox": [ + 304, + 182, + 545, + 350 + ], + "type": "inline_equation", + "content": "T^a" + }, + { + "bbox": [ + 304, + 182, + 545, + 350 + ], + "type": "text", + "content": " or a dictionary-enhanced description " + }, + { + "bbox": [ + 304, + 182, + 545, + 350 + ], + "type": "inline_equation", + "content": "T_i^c" + }, + { + "bbox": [ + 304, + 182, + 545, + 350 + ], + "type": "text", + "content": " sampled from " + }, + { + "bbox": [ + 304, + 182, + 545, + 350 + ], + "type": "inline_equation", + "content": "T^c" + }, + { + "bbox": [ + 304, + 182, + 545, + 350 + ], + "type": "text", + "content": ". It is worth noting that, with this unified framework, both the text encoder " + }, + { + "bbox": [ + 304, + 182, + 545, + 350 + ], + "type": "inline_equation", + "content": "f_t" + }, + { + "bbox": [ + 304, + 182, + 545, + 350 + ], + "type": "text", + "content": " and the visual encoder " + }, + { + "bbox": [ + 304, + 182, + 545, + 350 + ], + "type": "inline_equation", + "content": "f_v" + }, + { + "bbox": [ + 304, + 182, + 545, + 350 + ], + "type": "text", + "content": " are shared across the two tasks, achieving a deep fusion. The proposed unified framework is able to leverage any combination of tag-labeled and caption-labeled image datasets for pre-training. This combination allows the model to learn more discriminative representation, while capturing more visual concepts from the textual description. On the other hand, our iCLIP method is efficient." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 350, + 545, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 350, + 545, + 506 + ], + "spans": [ + { + "bbox": [ + 304, + 350, + 545, + 506 + ], + "type": "text", + "content": "Distributed Implementation. In our iCLIP framework, the text embedding of each category is generated by the shared text encoder on-the-fly. This computation is affordable when the number of categories " + }, + { + "bbox": [ + 304, + 350, + 545, + 506 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 304, + 350, + 545, + 506 + ], + "type": "text", + "content": " is not large. However, it will become infeasible if category number scales up to be large, such as 22k categories in ImageNet-22K [8]. To make the iCLIP framework feasible for large-category classification data in practice, we adopt a distribution implementation strategy [6]. Specifically, we distribute all the enhanced class names evenly over " + }, + { + "bbox": [ + 304, + 350, + 545, + 506 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 304, + 350, + 545, + 506 + ], + "type": "text", + "content": " GPUs in forward, and gather the embeddings from eachgpu for similarity calculation, reducing the computation cost and saves memory consumption by the text encoder to " + }, + { + "bbox": [ + 304, + 350, + 545, + 506 + ], + "type": "inline_equation", + "content": "1 / G" + }, + { + "bbox": [ + 304, + 350, + 545, + 506 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "type": "table", + "bbox": [ + 337, + 541, + 512, + 609 + ], + "blocks": [ + { + "bbox": [ + 304, + 515, + 545, + 537 + ], + "lines": [ + { + "bbox": [ + 304, + 515, + 545, + 537 + ], + "spans": [ + { + "bbox": [ + 304, + 515, + 545, + 537 + ], + "type": "text", + "content": "Table 1. Ablation on formulation adaptations for image classification task. Models are trained with 100 epochs." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 337, + 541, + 512, + 609 + ], + "lines": [ + { + "bbox": [ + 337, + 541, + 512, + 609 + ], + "spans": [ + { + "bbox": [ + 337, + 541, + 512, + 609 + ], + "type": "table", + "html": "
#Cosine LossText-enc. as ClassifierEnhanced classesIN-1K
180.9
281.5
381.2
481.4
", + "image_path": "b954c033fd52506bdd67c6133077a625763f59a6f4d87136c8735e99f315ea21.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_body" + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 615, + 382, + 628 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 615, + 382, + 628 + ], + "spans": [ + { + "bbox": [ + 306, + 615, + 382, + 628 + ], + "type": "text", + "content": "4. Experiment" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 633, + 545, + 716 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 633, + 545, + 716 + ], + "spans": [ + { + "bbox": [ + 304, + 633, + 545, + 716 + ], + "type": "text", + "content": "We verify the effectiveness of the proposed iCLIP framework through the comparisons to single-task baselines and a naive multi-task learning baseline. The comparisons are conducted in three settings covering different scales of pretraining data. In evaluation, we assess the models on different tasks, including in-domain classification, zero-shot classification, multi-modal retrieval, and downstream tasks." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2780" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 53, + 92, + 536, + 206 + ], + "blocks": [ + { + "bbox": [ + 47, + 64, + 547, + 89 + ], + "lines": [ + { + "bbox": [ + 47, + 64, + 547, + 89 + ], + "spans": [ + { + "bbox": [ + 47, + 64, + 547, + 89 + ], + "type": "text", + "content": "Table 2. Ablation study conducted on IN-22K [8] and YFCC-14M [53]. Models are pre-trained from scratched with 32 epochs following UniCL [61]. COCO and Flickr stand for MSCOCO [31] and Flickr30K [64]. IR and TR stand for image retrieval and text retrieval." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 53, + 92, + 536, + 206 + ], + "lines": [ + { + "bbox": [ + 53, + 92, + 536, + 206 + ], + "spans": [ + { + "bbox": [ + 53, + 92, + 536, + 206 + ], + "type": "table", + "html": "
Zero-shot classificationZero-shot retrieval
#Training DataMethodIN-1K14-dataset avg.Flickr-IRFlickr-TRCOCO-IRCOCO-TR
1YFCC-14MCLIP [44]30.136.321.537.912.521.2
2YFCC-14M (half) + IN-21K (half)iCLIP (w/o Desc.)39.445.427.639.113.020.4
3YFCC-14M (half) + IN-21K (half)iCLIP45.949.931.949.815.527.2
4YFCC-14M + IN-21KiCLIP (w/o Desc.)41.149.433.451.216.326.5
5YFCC-14M + IN-21KiCLIP50.954.437.155.718.530.7
6YFCC-14M + IN-22KiCLIP (w/o Desc.)76.251.633.248.214.423.8
7YFCC-14M + IN-22KiCLIP76.355.536.255.318.029.7
", + "image_path": "5ebf13e05a96325c267fe24c466a6f36c4f49d14f2e1a2927171824e23809fff.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 217, + 164, + 231 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 217, + 164, + 231 + ], + "spans": [ + { + "bbox": [ + 47, + 217, + 164, + 231 + ], + "type": "text", + "content": "4.1. Experimental Setup" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 236, + 288, + 261 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 236, + 288, + 261 + ], + "spans": [ + { + "bbox": [ + 47, + 236, + 288, + 261 + ], + "type": "text", + "content": "Pre-training data and settings. We consider three different scales of dataset combination for model pre-training." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 51, + 264, + 289, + 714 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 51, + 264, + 289, + 408 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 264, + 289, + 408 + ], + "spans": [ + { + "bbox": [ + 51, + 264, + 289, + 408 + ], + "type": "text", + "content": "- ImageNet-1K [8] and GCC-3M [48]. In this setting, we use ImageNet-1K as the classification data while GCC-3M as the image-text data. We adopt a Swin-T [33] initialized with MoBY [59] as the visual encoder, while for the textual encoder, we use a pretrained RoBERTa-B [32]. We sample half number of images from each dataset in a mini-batch and train the models with a batch size of " + }, + { + "bbox": [ + 51, + 264, + 289, + 408 + ], + "type": "inline_equation", + "content": "128 \\times 8" + }, + { + "bbox": [ + 51, + 264, + 289, + 408 + ], + "type": "text", + "content": " V100 GPUs for 100 epochs. The highest learning rate is 2e-4 with a cosine learning rate schedule and 5 epochs warm-up. Weight decay is set to be 0.01. RandAugment [7] and stochastic depth [21] with a rate of 0.1 are used for visual encoder only." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 51, + 411, + 289, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 411, + 289, + 628 + ], + "spans": [ + { + "bbox": [ + 51, + 411, + 289, + 628 + ], + "type": "text", + "content": "- ImageNet-22K [8] and YFCC-14M [53]. We follow UniCL [61] to train all models from scratch with 32 epochs for a fair comparison with it. Swin-T [33] is used as the visual encoder, and a 12-layer transformer with a hidden dimension of 512 same as CLIP [44] is used as the text encoder. A batch size of " + }, + { + "bbox": [ + 51, + 411, + 289, + 628 + ], + "type": "inline_equation", + "content": "512 \\times 16" + }, + { + "bbox": [ + 51, + 411, + 289, + 628 + ], + "type": "text", + "content": " GPUs is adopted. The highest learning rate is selected from 2e-4 and 8e-4. Other regularization is the same as previous, except for a larger weight decay of 0.05. We also conduct experiments using two variants of this setup for a fair and clean comparison with the methods that use one task alone (IC or CLIP): 1) Excluding the 1,000 ImageNet-1K classes in ImageNet-22K dataset (dubbed IN-21K). This setup variant allows us to evaluate the zero-shot accuracy on ImageNet-1K for different methods; 2) Half images of the ImageNet-21K and YFCC-14M are used, such that the dataset size and training iterations are the same as that used in one single task." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 630, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 630, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 52, + 630, + 288, + 714 + ], + "type": "text", + "content": "- ImageNet-22K [8] and Laion-400M [47]. For this large-scale pre-training setting, we adopt a Swin-B initialized with MoBY as the visual encoder and a pre-trained RoBERTa-B as the text encoder. We train iCLIP for 100K iters, with a batch size of " + }, + { + "bbox": [ + 52, + 630, + 288, + 714 + ], + "type": "inline_equation", + "content": "192 \\times 64" + }, + { + "bbox": [ + 52, + 630, + 288, + 714 + ], + "type": "text", + "content": " V100 GPUs. In each mini batch, we sample 64 images from IN-22K and 128 images from Laion-400M. The model is trained on" + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 318, + 217, + 545, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 217, + 545, + 277 + ], + "spans": [ + { + "bbox": [ + 318, + 217, + 545, + 277 + ], + "type": "text", + "content": "classification data for around 30 epochs and on image-text data for around 2 epochs equivalently. The highest learning rate is 1e-3 with a cosine learning rate schedule and a warm-up for " + }, + { + "bbox": [ + 318, + 217, + 545, + 277 + ], + "type": "inline_equation", + "content": "16.7\\mathrm{K}" + }, + { + "bbox": [ + 318, + 217, + 545, + 277 + ], + "type": "text", + "content": " liters. Weight decay is set to 0.05 and drop depth rate is set to 0.2." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 286, + 545, + 311 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 286, + 545, + 311 + ], + "spans": [ + { + "bbox": [ + 306, + 286, + 545, + 311 + ], + "type": "text", + "content": "Evaluation datasets and settings. During evaluation, we assess the models considering five different settings." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 310, + 314, + 545, + 644 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 310, + 314, + 545, + 398 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 314, + 545, + 398 + ], + "spans": [ + { + "bbox": [ + 310, + 314, + 545, + 398 + ], + "type": "text", + "content": "- Zero-shot classification. We evaluate the concept coverage and generalization ability of the models on three datasets: 1) ImageNet-1K variants, including IN-1K [8], and IN-Sketch (IN-S) [56]. Top-1 accuracy is reported; 2) the widely-used Kornblith 12-dataset benchmark [27]; 3) 14 datasets used in UniCL [61]. For 2) and 3), averaged accuracy is reported." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 310, + 399, + 545, + 459 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 399, + 545, + 459 + ], + "spans": [ + { + "bbox": [ + 310, + 399, + 545, + 459 + ], + "type": "text", + "content": "- Zero-shot multi-modal retrieval. Flickr30K [64] (1K test set) and MSCOCO [31] (5K test set) are used to evaluate the alignment between image and text modalities. We report the Top-1 recall on both image retrieval (IR) and text retrieval (TR)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 310, + 461, + 545, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 461, + 545, + 510 + ], + "spans": [ + { + "bbox": [ + 310, + 461, + 545, + 510 + ], + "type": "text", + "content": "- In-domain classification. ImageNet-1K data is included in some of our pre-training setups, so we conduct indomain evaluation on ImageNet-1K in these cases. The Top-1 accuracy is reported." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 310, + 511, + 545, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 511, + 545, + 559 + ], + "spans": [ + { + "bbox": [ + 310, + 511, + 545, + 559 + ], + "type": "text", + "content": "- Few-shot classification. Following CLIP [44], we also evaluate the models on few-shot classification task using Kornblith 12-dataset with a frozen visual encoder. Averaged accuracy is reported." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 310, + 561, + 545, + 644 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 561, + 545, + 644 + ], + "spans": [ + { + "bbox": [ + 310, + 561, + 545, + 644 + ], + "type": "text", + "content": "- Fine-tuning on downstream tasks. To validate the generalization ability of iCLIP, the models are fine-tuned and compared on semantic segmentation [68], long-tail detection [17], and video action recognition [26]. We report val mIoU,_bbox mAP and Top-1 accuracy, respectively. The detailed settings can be found in the supplementary material." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 658, + 527, + 673 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 658, + 527, + 673 + ], + "spans": [ + { + "bbox": [ + 306, + 658, + 527, + 673 + ], + "type": "text", + "content": "4.2. Experiments on IN-1K [8] and CC3M [48]" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 305, + 677, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 677, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 677, + 545, + 713 + ], + "type": "text", + "content": "Formulation adaptations for image classification. Tab. 1 ablates the effect of adapting the common image classification to that used in iCLIP, including both cosine loss, the" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "type": "text", + "content": "2781" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 65, + 123, + 271, + 203 + ], + "blocks": [ + { + "bbox": [ + 47, + 70, + 288, + 115 + ], + "lines": [ + { + "bbox": [ + 47, + 70, + 288, + 115 + ], + "spans": [ + { + "bbox": [ + 47, + 70, + 288, + 115 + ], + "type": "text", + "content": "Table 3. Ablation conducted on IN-1K [8] and GCC-3M [48] combined data. For the models only using IN-1K, we train them for 100 epochs. For the models only using GCC-3M, we train them with the same iterations and batch size as the ones used in IN-1K." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 65, + 123, + 271, + 203 + ], + "lines": [ + { + "bbox": [ + 65, + 123, + 271, + 203 + ], + "spans": [ + { + "bbox": [ + 65, + 123, + 271, + 203 + ], + "type": "table", + "html": "
12-datasetImageNet-related
#Methodavg.IN-1KIN-S
1Sup-only-80.929.4
2VL-only31.432.418.3
3Naïve multi-task35.180.638.3
4iCLIP (w/o Desc.)37.780.538.6
5iCLIP39.180.438.7
", + "image_path": "30619a3d1a609564c42d4f80068a74647e1cfe905ef647122d4dc96f1cc6c23d.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 61, + 245, + 274, + 327 + ], + "blocks": [ + { + "bbox": [ + 47, + 214, + 287, + 237 + ], + "lines": [ + { + "bbox": [ + 47, + 214, + 287, + 237 + ], + "spans": [ + { + "bbox": [ + 47, + 214, + 287, + 237 + ], + "type": "text", + "content": "Table 4. Comparison with UniCL. Models are pre-trained from scratched with 32 epochs, following UniCL [61]." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 61, + 245, + 274, + 327 + ], + "lines": [ + { + "bbox": [ + 61, + 245, + 274, + 327 + ], + "spans": [ + { + "bbox": [ + 61, + 245, + 274, + 327 + ], + "type": "table", + "html": "
#Training DataMethodIN-1K14-dataset avg.
1YFCC + IN-21K (half)UniCL [61]36.445.5
2YFCC + IN-21K (half)iCLIP45.949.9
3YFCC + IN-21KUniCL [61]40.549.1
4YFCC + IN-21KiCLIP50.954.4
5YFCC + IN-22KUniCL [61]70.552.4
6YFCC + IN-22KiCLIP76.355.5
", + "image_path": "2523dbf155557c5cb05545c56b4c48e62805c05cb94b154b6ca35d4ea4d018e8.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 342, + 287, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 342, + 287, + 449 + ], + "spans": [ + { + "bbox": [ + 46, + 342, + 287, + 449 + ], + "type": "text", + "content": "text-encoder-based classifier and enhanced class names using ImageNet-1K dataset. It can be seen that the cosine classification loss gets slightly better performance than the linear one, with a " + }, + { + "bbox": [ + 46, + 342, + 287, + 449 + ], + "type": "inline_equation", + "content": "+0.6\\%" + }, + { + "bbox": [ + 46, + 342, + 287, + 449 + ], + "type": "text", + "content": " gain on IN-1K (see #1 v.s. #2). When further adapting the text-encoder-based classifier (#3) and enhancing class names from dictionaries (#4), it has almost no performance degradation (" + }, + { + "bbox": [ + 46, + 342, + 287, + 449 + ], + "type": "inline_equation", + "content": "+0.3\\%" + }, + { + "bbox": [ + 46, + 342, + 287, + 449 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 342, + 287, + 449 + ], + "type": "inline_equation", + "content": "+0.5\\%" + }, + { + "bbox": [ + 46, + 342, + 287, + 449 + ], + "type": "text", + "content": " on IN-1K compared to the linear classifier), which allows us to further sharing the text encoder with CLIP for tasks unification." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 450, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 450, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 450, + 288, + 714 + ], + "type": "text", + "content": "Zero-shot and in-domain classification. With previous adaptations on the image classification formulation, we can further share the text encoder between the two tasks. To ablate the effect of sharing the text encoder, we set a naive multi-task baseline, that combines image classification and CLIP in a shallow fusion, i.e., simply averaging the loss Eq. (1) and Eq. (2). Each has its own head network, i.e., the fully-connected layer " + }, + { + "bbox": [ + 46, + 450, + 288, + 714 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 46, + 450, + 288, + 714 + ], + "type": "text", + "content": " for Eq. (1) and the text encoder " + }, + { + "bbox": [ + 46, + 450, + 288, + 714 + ], + "type": "inline_equation", + "content": "f_{t}" + }, + { + "bbox": [ + 46, + 450, + 288, + 714 + ], + "type": "text", + "content": " for Eq. (2). The best performances of the two heads are reported in Tab. 3. With a shared text encoder across the two tasks, our iCLIP (w/o Desc.) outperforms the naive multi-task on Kornblith 12-dataset zero-shot classification by " + }, + { + "bbox": [ + 46, + 450, + 288, + 714 + ], + "type": "inline_equation", + "content": "+2.6\\%" + }, + { + "bbox": [ + 46, + 450, + 288, + 714 + ], + "type": "text", + "content": " in average, while they are comparable on ImageNet-related datasets classification (see #3 v.s. #4). Our iCLIP deeply unifies two tasks, thus better gathering the merits of the two learning protocols. When compared with the supervised softmax classifier baseline, i.e., Eq. (1) Sup-only, and the contrastive image-text pre-training baseline, i.e., Eq. (2) VL-only, our method is slightly worse than Sup-only on IN-1K by " + }, + { + "bbox": [ + 46, + 450, + 288, + 714 + ], + "type": "inline_equation", + "content": "0.4\\%" + }, + { + "bbox": [ + 46, + 450, + 288, + 714 + ], + "type": "text", + "content": ", while achieves superior performance on other evaluation settings, " + }, + { + "bbox": [ + 46, + 450, + 288, + 714 + ], + "type": "inline_equation", + "content": "+6.3\\%" + }, + { + "bbox": [ + 46, + 450, + 288, + 714 + ], + "type": "text", + "content": " better than VL-only method on 12-dataset zero-shot testing and " + }, + { + "bbox": [ + 46, + 450, + 288, + 714 + ], + "type": "inline_equation", + "content": "+9.2\\%" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 72, + 545, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 133 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 133 + ], + "type": "text", + "content": "better than Sup-only method on IN-S (see #4 v.s. #1). Moreover, the dictionary enhancement on class names (#5) can further bring an average of " + }, + { + "bbox": [ + 304, + 72, + 545, + 133 + ], + "type": "inline_equation", + "content": "+1.4\\%" + }, + { + "bbox": [ + 304, + 72, + 545, + 133 + ], + "type": "text", + "content": " improvements on Kornblith 12-dataset, revealing the increased discriminative representation for ambiguous concepts." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 305, + 137, + 545, + 151 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 137, + 545, + 151 + ], + "spans": [ + { + "bbox": [ + 305, + 137, + 545, + 151 + ], + "type": "text", + "content": "4.3. Experiments on IN-22K [8] and YFCC14M [53]" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 156, + 545, + 276 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 156, + 545, + 276 + ], + "spans": [ + { + "bbox": [ + 304, + 156, + 545, + 276 + ], + "type": "text", + "content": "Effects of the unified framework. Here, we further ablate the effect of the unified formulation for deep fusion of the two tasks. In #2, #4 and #6 of Tab. 2, we show the results of our unified framework under three different dataset combination setups. Compared with the CLIP baseline (#1), our iCLIP (#2) earns " + }, + { + "bbox": [ + 304, + 156, + 545, + 276 + ], + "type": "inline_equation", + "content": "+8.3\\%" + }, + { + "bbox": [ + 304, + 156, + 545, + 276 + ], + "type": "text", + "content": " gains on IN-1K zero-shot classification and also " + }, + { + "bbox": [ + 304, + 156, + 545, + 276 + ], + "type": "inline_equation", + "content": "+9.1\\%" + }, + { + "bbox": [ + 304, + 156, + 545, + 276 + ], + "type": "text", + "content": " improvements when evaluated on the 14-dataset. In addition, our iCLIP is better than the CLIP baseline on most cross-modal retrieval benchmarks, while only using half of visual-language data in pre-training." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 276, + 546, + 490 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 276, + 546, + 490 + ], + "spans": [ + { + "bbox": [ + 304, + 276, + 546, + 490 + ], + "type": "text", + "content": "Effects of dictionary enhancement. Furthermore, we dissect the model to study the contributions of dictionary-enhanced category description. From Tab. 2, we can see that enhancing each class names with informative description from the dictionary brings consistent improvements on both zero-shot classification and zero-shot retrieval under three dataset combination setups (see #3, #5 and #7). In particular, when pre-trained with half images of YFCC-14M and IN-21K (#3), the integrated knowledge contributes " + }, + { + "bbox": [ + 304, + 276, + 546, + 490 + ], + "type": "inline_equation", + "content": "+6.5\\%" + }, + { + "bbox": [ + 304, + 276, + 546, + 490 + ], + "type": "text", + "content": " improvements on IN-1K zero-shot classification, which makes our iCLIP reach " + }, + { + "bbox": [ + 304, + 276, + 546, + 490 + ], + "type": "inline_equation", + "content": "45.9\\%" + }, + { + "bbox": [ + 304, + 276, + 546, + 490 + ], + "type": "text", + "content": ", being " + }, + { + "bbox": [ + 304, + 276, + 546, + 490 + ], + "type": "inline_equation", + "content": "+5.4\\%" + }, + { + "bbox": [ + 304, + 276, + 546, + 490 + ], + "type": "text", + "content": " better than UniCL method [61] with full images of YFCC-14M and IN-21K (see #3 in Tab. 4). More importantly, the enhanced class names is beneficial to cross-modal retrieval. For example, for image-to-text search, the dictionary-enhanced description can bring " + }, + { + "bbox": [ + 304, + 276, + 546, + 490 + ], + "type": "inline_equation", + "content": "10.7\\%" + }, + { + "bbox": [ + 304, + 276, + 546, + 490 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 276, + 546, + 490 + ], + "type": "inline_equation", + "content": "6.8\\%" + }, + { + "bbox": [ + 304, + 276, + 546, + 490 + ], + "type": "text", + "content": " top-1 recall gains on Flickr30K [64] and MSCOCO [31] respectively, as reported in row 3 of Tab. 2." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 491, + 545, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 491, + 545, + 574 + ], + "spans": [ + { + "bbox": [ + 304, + 491, + 545, + 574 + ], + "type": "text", + "content": "Comparison with UniCL [61]. Tab. 4 summaries our comparison to UniCL. The same as UniCL, we evaluate our models on IN-1K and 14 datasets. Under three different dataset combination setups, our iCLIP surpasses UniCL by at least " + }, + { + "bbox": [ + 304, + 491, + 545, + 574 + ], + "type": "inline_equation", + "content": "+5\\%" + }, + { + "bbox": [ + 304, + 491, + 545, + 574 + ], + "type": "text", + "content": " on IN-1K image classification, while reaching " + }, + { + "bbox": [ + 304, + 491, + 545, + 574 + ], + "type": "inline_equation", + "content": "55.5\\%" + }, + { + "bbox": [ + 304, + 491, + 545, + 574 + ], + "type": "text", + "content": " averaged accuracy on 14 datasets (#6), being " + }, + { + "bbox": [ + 304, + 491, + 545, + 574 + ], + "type": "inline_equation", + "content": "+3.1\\%" + }, + { + "bbox": [ + 304, + 491, + 545, + 574 + ], + "type": "text", + "content": " better than UniCL (#5)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 575, + 542, + 588 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 575, + 542, + 588 + ], + "spans": [ + { + "bbox": [ + 306, + 575, + 542, + 588 + ], + "type": "text", + "content": "4.4. Experiments on IN-22K and Laion-400M [47]" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "text", + "content": "Zero-shot and in-domain classification. Tab. 5 presents a large scale experiment using the publicly accessible largescale data: Laion-400M [47] and IN-22K [8]. For Sup-only, i.e. Eq. (1), we use the released version from Swin [33], which is trained on IN-22K for 90 epochs. For VL-only, i.e. Eq. (2), we pre-train it on Laion-400M with a similar image numbers (#im). Our method is comparable to Sup only on IN-1K, while it gets " + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "inline_equation", + "content": "+17.8\\%" + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "inline_equation", + "content": "+8.3\\%" + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "text", + "content": " better results than the two baselines on IN-S, demonstrating its robustness to natural distribution shifts. Our iCLIP surpasses" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2782" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 53, + 113, + 542, + 213 + ], + "blocks": [ + { + "bbox": [ + 46, + 70, + 547, + 105 + ], + "lines": [ + { + "bbox": [ + 46, + 70, + 547, + 105 + ], + "spans": [ + { + "bbox": [ + 46, + 70, + 547, + 105 + ], + "type": "text", + "content": "Table 5. Ablation study on IN-22K [8] and Laion-400M [47]. We evaluate the models on ImageNet datasets (IN-1K [8] and IN-S [56]) and zero-shot evaluation on the Kornblith 12-dataset benchmark [27]. Few-shot learning on Kornblith 12-dataset and the fine-tuning on three downstream tasks are conducted to evaluate the transfer capability of iCLIP. " + }, + { + "bbox": [ + 46, + 70, + 547, + 105 + ], + "type": "inline_equation", + "content": "\\ddagger" + }, + { + "bbox": [ + 46, + 70, + 547, + 105 + ], + "type": "text", + "content": " denotes for our reproduction using released checkpoints." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 53, + 113, + 542, + 213 + ], + "lines": [ + { + "bbox": [ + 53, + 113, + 542, + 213 + ], + "spans": [ + { + "bbox": [ + 53, + 113, + 542, + 213 + ], + "type": "table", + "html": "
Visual encoderPre-trainImageNet-related12-dataset avg.downstream tasks
MethodArch.length (#im).IN-1KIN-S0-shot4-shotADE20KLVISKinetics400
CLIP [44]ViT-B/16400M×32 eps68.646.6‡68.866.4‡---
OpenCLIP [22]ViT-B/16400M×32 eps67.152.4‡70.9‡----
Sup-onlySwin-Base14M×90 eps82.642.0-67.652.135.982.7
VL-onlySwin-Base400M×3 eps61.151.567.273.352.036.682.3
iCLIPSwin-Base400M×2 eps+14M×30 eps82.959.870.678.152.637.983.1
", + "image_path": "3547be5ffab658d4af25c9276d306add2bcb44a390381b5e1592bdcd373d1a76.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 72, + 228, + 259, + 345 + ], + "blocks": [ + { + "bbox": [ + 72, + 228, + 259, + 345 + ], + "lines": [ + { + "bbox": [ + 72, + 228, + 259, + 345 + ], + "spans": [ + { + "bbox": [ + 72, + 228, + 259, + 345 + ], + "type": "image", + "image_path": "f086c3b3a0ac2a1c5a5f821886e5d46ca51c0858ce47e88287b36d29c6b69d1b.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 350, + 288, + 407 + ], + "lines": [ + { + "bbox": [ + 46, + 350, + 288, + 407 + ], + "spans": [ + { + "bbox": [ + 46, + 350, + 288, + 407 + ], + "type": "text", + "content": "Figure 4. Major comparison with the CLIP-ViT-B/16 of few-shot classification (top-1 accuracy) on the Kornblith 12-dataset. " + }, + { + "bbox": [ + 46, + 350, + 288, + 407 + ], + "type": "inline_equation", + "content": "\\star" + }, + { + "bbox": [ + 46, + 350, + 288, + 407 + ], + "type": "text", + "content": " denotes the zero-shot performances. Results of CLIP on few-shot classification are reproduced using released model. We run every experiments three times and the averaged results are reported." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 424, + 287, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 424, + 287, + 533 + ], + "spans": [ + { + "bbox": [ + 46, + 424, + 287, + 533 + ], + "type": "text", + "content": "OpenCLIP [22], which also uses Laion-400M data for pretraining [47], by more than " + }, + { + "bbox": [ + 46, + 424, + 287, + 533 + ], + "type": "inline_equation", + "content": "+15\\%" + }, + { + "bbox": [ + 46, + 424, + 287, + 533 + ], + "type": "text", + "content": " on IN-1K, mainly due to the pre-training data IN-22K covers the visual concepts in IN-1K. Moreover, when performing zero-shot evaluation on 12 datasets [27], our iCLIP model also achieves non-trivial improvements, e.g., an average of over " + }, + { + "bbox": [ + 46, + 424, + 287, + 533 + ], + "type": "inline_equation", + "content": "+3\\%" + }, + { + "bbox": [ + 46, + 424, + 287, + 533 + ], + "type": "text", + "content": " gains (VL-only in Tab. 5). In addition, our iCLIP is comparable to OpenCLIP on 12 datasets in average with fewer training time. More details are elaborated in the supplementary material." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 534, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 534, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 534, + 288, + 715 + ], + "type": "text", + "content": "Few-shot classification We also conduct experiments in few-shot settings. Following CLIP [44], we freeze the visual encoder and append a linear probe layer for few-shot fine-tuning. We notice that the performance of CLIP [44] in few-shot classification cannot catch up with that of zero-shot classification, unless more than 4 examples per class are given, as presented in Fig. 4 (" + }, + { + "bbox": [ + 46, + 534, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\star" + }, + { + "bbox": [ + 46, + 534, + 288, + 715 + ], + "type": "text", + "content": " v.s. " + }, + { + "bbox": [ + 46, + 534, + 288, + 715 + ], + "type": "inline_equation", + "content": "-\\bullet" + }, + { + "bbox": [ + 46, + 534, + 288, + 715 + ], + "type": "text", + "content": "). We conjecture the underlying reason is that the number of training samples is too limited to train a randomly initialized classifier. This situation can be alleviated by fine-tuning the pretrained text encoder, instead of the linear probe layer. In this way, text encoder is able to serve as a good initialization for few-shot classification, closing the gap between pretraining and fine-tuning. We evaluate such method on Kornblith 12-dataset benchmark [27] and report the results in Fig. 4." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 228, + 547, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 228, + 547, + 373 + ], + "spans": [ + { + "bbox": [ + 304, + 228, + 547, + 373 + ], + "type": "text", + "content": "When only given one example per class, by utilizing text encoder as the classifier, our iCLIP achieve " + }, + { + "bbox": [ + 304, + 228, + 547, + 373 + ], + "type": "inline_equation", + "content": "73.9\\%" + }, + { + "bbox": [ + 304, + 228, + 547, + 373 + ], + "type": "text", + "content": " on 12-dataset in average, surpassing the original CLIP model by " + }, + { + "bbox": [ + 304, + 228, + 547, + 373 + ], + "type": "inline_equation", + "content": "+29.5\\%" + }, + { + "bbox": [ + 304, + 228, + 547, + 373 + ], + "type": "text", + "content": ". Such one-shot recognition gets " + }, + { + "bbox": [ + 304, + 228, + 547, + 373 + ], + "type": "inline_equation", + "content": "+3.3\\%" + }, + { + "bbox": [ + 304, + 228, + 547, + 373 + ], + "type": "text", + "content": " gains over the zero-shot baseline (" + }, + { + "bbox": [ + 304, + 228, + 547, + 373 + ], + "type": "inline_equation", + "content": "\\star" + }, + { + "bbox": [ + 304, + 228, + 547, + 373 + ], + "type": "text", + "content": " v.s. " + }, + { + "bbox": [ + 304, + 228, + 547, + 373 + ], + "type": "inline_equation", + "content": "-\\bullet" + }, + { + "bbox": [ + 304, + 228, + 547, + 373 + ], + "type": "text", + "content": "), demonstrating good few-shot transfer ability. When using 16 examples per class, our model still performs superior to CLIP by " + }, + { + "bbox": [ + 304, + 228, + 547, + 373 + ], + "type": "inline_equation", + "content": "4.1\\%" + }, + { + "bbox": [ + 304, + 228, + 547, + 373 + ], + "type": "text", + "content": ". Compared to supervised-only model and visual-linguistic only model, our unified contrastive learning pretrained model obtains " + }, + { + "bbox": [ + 304, + 228, + 547, + 373 + ], + "type": "inline_equation", + "content": "+24.6\\%" + }, + { + "bbox": [ + 304, + 228, + 547, + 373 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 228, + 547, + 373 + ], + "type": "inline_equation", + "content": "+6.1\\%" + }, + { + "bbox": [ + 304, + 228, + 547, + 373 + ], + "type": "text", + "content": " better accuracy under one-shot learning setting. Such advantages are kept to 16-shot with " + }, + { + "bbox": [ + 304, + 228, + 547, + 373 + ], + "type": "inline_equation", + "content": "+2.7\\%" + }, + { + "bbox": [ + 304, + 228, + 547, + 373 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 228, + 547, + 373 + ], + "type": "inline_equation", + "content": "+5.0\\%" + }, + { + "bbox": [ + 304, + 228, + 547, + 373 + ], + "type": "text", + "content": " gains " + }, + { + "bbox": [ + 304, + 228, + 547, + 373 + ], + "type": "inline_equation", + "content": "(- \\bullet -" + }, + { + "bbox": [ + 304, + 228, + 547, + 373 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 228, + 547, + 373 + ], + "type": "inline_equation", + "content": "- \\bullet -)" + }, + { + "bbox": [ + 304, + 228, + 547, + 373 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 379, + 547, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 379, + 547, + 487 + ], + "spans": [ + { + "bbox": [ + 304, + 379, + 547, + 487 + ], + "type": "text", + "content": "Fine-tuning on Downstream Tasks We also study the generalization capability of our pre-trained models on downstream tasks, including semantic segmentation, object detection and video recognition. As shown in Tab. 5, compared to Sup-only, our iCLIP surpasses it by " + }, + { + "bbox": [ + 304, + 379, + 547, + 487 + ], + "type": "inline_equation", + "content": "+0.5\\%" + }, + { + "bbox": [ + 304, + 379, + 547, + 487 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 379, + 547, + 487 + ], + "type": "inline_equation", + "content": "+2.0\\%" + }, + { + "bbox": [ + 304, + 379, + 547, + 487 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 379, + 547, + 487 + ], + "type": "inline_equation", + "content": "+0.4\\%" + }, + { + "bbox": [ + 304, + 379, + 547, + 487 + ], + "type": "text", + "content": " on the three downstream tasks, respectively. We also earn " + }, + { + "bbox": [ + 304, + 379, + 547, + 487 + ], + "type": "inline_equation", + "content": "+0.6\\%" + }, + { + "bbox": [ + 304, + 379, + 547, + 487 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 379, + 547, + 487 + ], + "type": "inline_equation", + "content": "+1.3\\%" + }, + { + "bbox": [ + 304, + 379, + 547, + 487 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 379, + 547, + 487 + ], + "type": "inline_equation", + "content": "+0.8\\%" + }, + { + "bbox": [ + 304, + 379, + 547, + 487 + ], + "type": "text", + "content": " gains over VL-only baseline. These results reveal that our unified method could learn general visual representations." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 512, + 379, + 525 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 512, + 379, + 525 + ], + "spans": [ + { + "bbox": [ + 306, + 512, + 379, + 525 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 539, + 547, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 539, + 547, + 635 + ], + "spans": [ + { + "bbox": [ + 304, + 539, + 547, + 635 + ], + "type": "text", + "content": "In this paper, we propose a unified framework dubbed iCLIP to bridge image classification and language-image pre-training. It naturally forces the cross-modal feature learning in a unified space, where the two tasks share the same visual and textual encoders. Extensive experiments demonstrate that iCLIP is effective, and can be generalized to different visual recognition scenarios, including zero-shot, few-shot, and fully-supervised fine-tuning." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 641, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 641, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 641, + 547, + 715 + ], + "type": "text", + "content": "Limitations. One limitation of iCLIP is that, despite its competitive performance, the model still relies on human labeled classification data that is not scalable. Besides, our model currently only adopts median-size parameters, which can not fully validate the generation ability to large-scale models. We are interested in exploring this in future work." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 758 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 758 + ], + "type": "text", + "content": "2783" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "type": "text", + "content": "[1] Soren Auer, Christian Bizer, Georgi Kobilarov, Jens Lehmann, Richard Cyganiak, and Zachary Ives. Dbpedia: A nucleus for a web of open data. In The semantic web, pages 722-735. Springer, 2007. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 136, + 287, + 168 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 136, + 287, + 168 + ], + "spans": [ + { + "bbox": [ + 53, + 136, + 287, + 168 + ], + "type": "text", + "content": "[2] Yue Cao, Zhenda Xie, Bin Liu, Yutong Lin, Zheng Zhang, and Han Hu. Parametric instance classification for unsupervised visual feature learning. 2020. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 169, + 287, + 223 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 169, + 287, + 223 + ], + "spans": [ + { + "bbox": [ + 53, + 169, + 287, + 223 + ], + "type": "text", + "content": "[3] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In European Conference on Computer Vision, pages 213-229. Springer, 2020. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 224, + 288, + 269 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 224, + 288, + 269 + ], + "spans": [ + { + "bbox": [ + 53, + 224, + 288, + 269 + ], + "type": "text", + "content": "[4] Joao Carreira and Andrew Zisserman. Quo vadis, action recognition? a new model and the kinetics dataset. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6299-6308, 2017. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 270, + 287, + 312 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 270, + 287, + 312 + ], + "spans": [ + { + "bbox": [ + 53, + 270, + 287, + 312 + ], + "type": "text", + "content": "[5] Liang-Chieh Chen, George Papandreou, Florian Schroff, and Hartwig Adam. Rethinking atrous convolution for semantic image segmentation. arXiv preprint arXiv:1706.05587, 2017.3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 314, + 288, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 314, + 288, + 346 + ], + "spans": [ + { + "bbox": [ + 53, + 314, + 288, + 346 + ], + "type": "text", + "content": "[6] Ting Chen, Simon Kornblith, Mohammad Norouzi, and Geoffrey Hinton. A simple framework for contrastive learning of visual representations. pages 1597-1607, 2020. 2, 5" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 347, + 287, + 402 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 347, + 287, + 402 + ], + "spans": [ + { + "bbox": [ + 53, + 347, + 287, + 402 + ], + "type": "text", + "content": "[7] Ekin D Cubuk, Barret Zoph, Jonathon Shlens, and Quoc V Le. Randaugment: Practical automated data augmentation with a reduced search space. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, pages 702-703, 2020. 6" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 403, + 288, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 403, + 288, + 456 + ], + "spans": [ + { + "bbox": [ + 53, + 403, + 288, + 456 + ], + "type": "text", + "content": "[8] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pages 248-255. IEEE, 2009. 1, 3, 5, 6, 7, 8" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 458, + 287, + 512 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 458, + 287, + 512 + ], + "spans": [ + { + "bbox": [ + 53, + 458, + 287, + 512 + ], + "type": "text", + "content": "[9] Jiankang Deng, Jia Guo, Jing Yang, Niannan Xue, Irene Cotcia, and Stefanos P Zafeiriou. ArcFace: Additive angular margin loss for deep face recognition. IEEE Transactions on Pattern Analysis and Machine Intelligence, pages 1-1, 2021. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 514, + 288, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 514, + 288, + 557 + ], + "spans": [ + { + "bbox": [ + 48, + 514, + 288, + 557 + ], + "type": "text", + "content": "[10] Karan Desai and Justin Johnson. Virtex: Learning visual representations from textual annotations. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11162-11173, 2021. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 558, + 287, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 558, + 287, + 612 + ], + "spans": [ + { + "bbox": [ + 48, + 558, + 287, + 612 + ], + "type": "text", + "content": "[11] Xiaoyi Dong, Yinglin Zheng, Jianmin Bao, Ting Zhang, Dongdong Chen, Hao Yang, Ming Zeng, Weiming Zhang, Lu Yuan, Dong Chen, et al. Maskclip: Masked self-distillation advances contrastive language-image pretraining. arXiv preprint arXiv:2208.12262, 2022. 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 614, + 287, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 614, + 287, + 689 + ], + "spans": [ + { + "bbox": [ + 48, + 614, + 287, + 689 + ], + "type": "text", + "content": "[12] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations, 2021. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 691, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 691, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 691, + 287, + 713 + ], + "type": "text", + "content": "[13] Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He. Slowfast networks for video recognition. In" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 308, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "text", + "content": "Proceedings of the IEEE international conference on computer vision, pages 6202-6211, 2019. 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 96, + 545, + 171 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 96, + 545, + 171 + ], + "spans": [ + { + "bbox": [ + 308, + 96, + 545, + 171 + ], + "type": "text", + "content": "[14] Andrea Frome, Greg S Corrado, Jon Shlens, Samy Bengio, Jeff Dean, Marc' Aurelio Ranzato, and Tomas Mikolov. Devise: A deep visual-semantic embedding model. In C. J. C. Burges, L. Bottou, M. Welling, Z. Ghahramani, and K. Q. Weinberger, editors, Advances in Neural Information Processing Systems, volume 26. Curran Associates, Inc., 2013. 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 174, + 545, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 174, + 545, + 228 + ], + "spans": [ + { + "bbox": [ + 308, + 174, + 545, + 228 + ], + "type": "text", + "content": "[15] Ross Girshick, Jeff Donahue, Trevor Darrell, and Jitendra Malik. Rich feature hierarchies for accurate object detection and semantic segmentation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), June 2014. 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 230, + 545, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 230, + 545, + 285 + ], + "spans": [ + { + "bbox": [ + 308, + 230, + 545, + 285 + ], + "type": "text", + "content": "[16] Lluis Gomez, Yash Patel, Marçal Rusinol, Dimosthenis Karatzas, and CV Jawahar. Self-supervised learning of visual features through embedding images into text topic spaces. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 4230-4239, 2017. 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 286, + 545, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 286, + 545, + 330 + ], + "spans": [ + { + "bbox": [ + 308, + 286, + 545, + 330 + ], + "type": "text", + "content": "[17] Agrim Gupta, Piotr Dollar, and Ross Girshick. Lvis: A dataset for large vocabulary instance segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5356-5364, 2019. 2, 6" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 331, + 545, + 364 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 331, + 545, + 364 + ], + "spans": [ + { + "bbox": [ + 308, + 331, + 545, + 364 + ], + "type": "text", + "content": "[18] Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum contrast for unsupervised visual representation learning. pages 9729-9738, 2020. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 365, + 545, + 398 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 365, + 545, + 398 + ], + "spans": [ + { + "bbox": [ + 308, + 365, + 545, + 398 + ], + "type": "text", + "content": "[19] Kaiming He, Georgia Gkioxari, Piotr Dollár, and Ross Girshick. Mask r-cnn. In Proceedings of the IEEE international conference on computer vision, pages 2961-2969, 2017. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 399, + 545, + 443 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 399, + 545, + 443 + ], + "spans": [ + { + "bbox": [ + 308, + 399, + 545, + 443 + ], + "type": "text", + "content": "[20] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 444, + 545, + 488 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 444, + 545, + 488 + ], + "spans": [ + { + "bbox": [ + 308, + 444, + 545, + 488 + ], + "type": "text", + "content": "[21] Gao Huang, Yu Sun, Zhuang Liu, Daniel Sedra, and Kilian Q Weinberger. Deep networks with stochastic depth. In European conference on computer vision, pages 646-661. Springer, 2016. 6" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 489, + 545, + 554 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 489, + 545, + 554 + ], + "spans": [ + { + "bbox": [ + 308, + 489, + 545, + 554 + ], + "type": "text", + "content": "[22] Gabriel Ilharco, Mitchell Wortsman, Ross Wightman, Cade Gordon, Nicholas Carlini, Rohan Taori, Achal Dave, Vaishaal Shankar, Hongseok Namkoong, John Miller, Hannaneh Hajishirzi, Ali Farhadi, and Ludwig Schmidt. Openclip, July 2021. If you use this software, please cite it as below. 8" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 555, + 520, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 555, + 520, + 567 + ], + "spans": [ + { + "bbox": [ + 308, + 555, + 520, + 567 + ], + "type": "text", + "content": "[23] P Jackson. Introduction to expert systems. 1 1986. 3" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 568, + 545, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 568, + 545, + 633 + ], + "spans": [ + { + "bbox": [ + 308, + 568, + 545, + 633 + ], + "type": "text", + "content": "[24] Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. In International Conference on Machine Learning, pages 4904-4916. PMLR, 2021. 1, 3" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 635, + 545, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 635, + 545, + 679 + ], + "spans": [ + { + "bbox": [ + 308, + 635, + 545, + 679 + ], + "type": "text", + "content": "[25] Armand Joulin, Laurens Van Der Maaten, Allan Jabri, and Nicolas Vasilache. Learning visual features from large weakly supervised data. In European Conference on Computer Vision, pages 67-84. Springer, 2016. 3" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 308, + 681, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 681, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 308, + 681, + 545, + 713 + ], + "type": "text", + "content": "[26] Will Kay, Joao Carreira, Karen Simonyan, Brian Zhang, Chloe Hillier, Sudheendra Vijayanarasimhan, Fabio Viola, Tim Green, Trevor Back, Paul Natsev, et al. The kinetics hu" + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "text", + "content": "2784" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 713 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 66, + 72, + 287, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 72, + 287, + 95 + ], + "spans": [ + { + "bbox": [ + 66, + 72, + 287, + 95 + ], + "type": "text", + "content": "man action video dataset. arXiv preprint arXiv:1705.06950, 2017.2,6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 96, + 287, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 96, + 287, + 140 + ], + "spans": [ + { + "bbox": [ + 48, + 96, + 287, + 140 + ], + "type": "text", + "content": "[27] Simon Kornblith, Jonathon Shlens, and Quoc V. Le. Do better imagenet models transfer better? In 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2656-2666, 2019. 6, 8" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 141, + 287, + 185 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 141, + 287, + 185 + ], + "spans": [ + { + "bbox": [ + 48, + 141, + 287, + 185 + ], + "type": "text", + "content": "[28] Alex Krizhevsky, Ilya Sutskever, and Geoffrey E Hinton. Imagenet classification with deep convolutional neural networks. In Advances in neural information processing systems, pages 1097-1105, 2012. 2, 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 186, + 287, + 240 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 186, + 287, + 240 + ], + "spans": [ + { + "bbox": [ + 48, + 186, + 287, + 240 + ], + "type": "text", + "content": "[29] Alina Kuznetsova, Hassan Rom, Neil Alldrin, Jasper Uijlings, Ivan Krasin, Jordi Pont-Tuset, Shahab Kamali, Stefan Popov, Matteo Malloci, Alexander Kolesnikov, et al. The open images dataset v4. International Journal of Computer Vision, 128(7):1956-1981, 2020. 1" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 241, + 287, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 241, + 287, + 297 + ], + "spans": [ + { + "bbox": [ + 48, + 241, + 287, + 297 + ], + "type": "text", + "content": "[30] Yangguang Li, Feng Liang, Lichen Zhao, Yufeng Cui, Wanli Ouyang, Jing Shao, Fengwei Yu, and Junjie Yan. Supervision exists everywhere: A data efficient contrastive language-image pre-training paradigm. In International Conference on Learning Representations, 2022. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 298, + 287, + 353 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 298, + 287, + 353 + ], + "spans": [ + { + "bbox": [ + 48, + 298, + 287, + 353 + ], + "type": "text", + "content": "[31] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In European conference on computer vision, pages 740-755. Springer, 2014. 3, 6, 7" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 354, + 287, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 354, + 287, + 407 + ], + "spans": [ + { + "bbox": [ + 48, + 354, + 287, + 407 + ], + "type": "text", + "content": "[32] Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. Roberta: A robustly optimized bert pretraining approach. arXiv preprint arXiv:1907.11692, 2019. 6" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 410, + 287, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 410, + 287, + 453 + ], + "spans": [ + { + "bbox": [ + 48, + 410, + 287, + 453 + ], + "type": "text", + "content": "[33] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. pages 10012-10022, 2021. 2, 6, 7" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 455, + 287, + 509 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 455, + 287, + 509 + ], + "spans": [ + { + "bbox": [ + 48, + 455, + 287, + 509 + ], + "type": "text", + "content": "[34] Ze Liu, Jia Ning, Yue Cao, Yixuan Wei, Zheng Zhang, Stephen Lin, and Han Hu. Video swim transformer. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3202-3211, June 2022. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 511, + 287, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 511, + 287, + 555 + ], + "spans": [ + { + "bbox": [ + 48, + 511, + 287, + 555 + ], + "type": "text", + "content": "[35] Jonathan Long, Evan Shelhamer, and Trevor Darrell. Fully convolutional networks for semantic segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3431-3440, 2015. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 555, + 287, + 599 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 555, + 287, + 599 + ], + "spans": [ + { + "bbox": [ + 48, + 555, + 287, + 599 + ], + "type": "text", + "content": "[36] George A. Miller. WordNet: A lexical database for English. In Human Language Technology: Proceedings of a Workshop held at Plainsboro, New Jersey, March 8-11, 1994, 1994. 1, 5" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 601, + 287, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 601, + 287, + 645 + ], + "spans": [ + { + "bbox": [ + 48, + 601, + 287, + 645 + ], + "type": "text", + "content": "[37] Norman Mu, Alexander Kirillov, David Wagner, and Saining Xie. Slip: Self-supervision meets language-image pretraining. In European Conference on Computer Vision, pages 529-544. Springer, 2022. 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 647, + 287, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 647, + 287, + 668 + ], + "spans": [ + { + "bbox": [ + 48, + 647, + 287, + 668 + ], + "type": "text", + "content": "[38] Kevin Musgrave, Serge Belongie, and Ser-Nam Lim. A metric learning reality check, 2020. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 670, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 670, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 670, + 287, + 713 + ], + "type": "text", + "content": "[39] Basil Mustafa, Carlos Riquelme, Joan Puigcerver, Rodolphe Jenatton, and Neil Houlsby. Multimodal contrastive learning with limoe: the language-image mixture of experts. arXiv preprint arXiv:2206.02770, 2022. 3" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "type": "text", + "content": "[40] Hieu V Nguyen and Li Bai. Cosine similarity metric learning for face verification. In Asian conference on computer vision, pages 709-720. Springer, 2010. 4" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 107, + 545, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 107, + 545, + 161 + ], + "spans": [ + { + "bbox": [ + 307, + 107, + 545, + 161 + ], + "type": "text", + "content": "[41] Mohammad Norouzi, Tomas Mikolov, Samy Bengio, Yoram Singer, Jonathon Shlens, Andrea Frome, Greg S Corrado, and Jeffrey Dean. Zero-shot learning by convex combination of semantic embeddings. arXiv preprint arXiv:1312.5650, 2013. 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 163, + 545, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 163, + 545, + 206 + ], + "spans": [ + { + "bbox": [ + 307, + 163, + 545, + 206 + ], + "type": "text", + "content": "[42] Hieu Pham, Zihang Dai, Golnaz Ghiasi, Hanxiao Liu, Adams Wei Yu, Minh-Thang Luong, Mingxing Tan, and Quoc V. Le. Combined scaling for zero-shot transfer learning. CoRR, abs/2111.10050, 2021. 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 208, + 545, + 252 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 208, + 545, + 252 + ], + "spans": [ + { + "bbox": [ + 307, + 208, + 545, + 252 + ], + "type": "text", + "content": "[43] Zhaofan Qiu, Ting Yao, and Tao Mei. Learning spatiotemporal representation with pseudo-3d residual networks. In proceedings of the IEEE International Conference on Computer Vision, pages 5533–5541, 2017. 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 254, + 545, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 254, + 545, + 319 + ], + "spans": [ + { + "bbox": [ + 307, + 254, + 545, + 319 + ], + "type": "text", + "content": "[44] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International Conference on Machine Learning, pages 8748-8763. PMLR, 2021. 1, 3, 6, 8" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 320, + 545, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 320, + 545, + 365 + ], + "spans": [ + { + "bbox": [ + 307, + 320, + 545, + 365 + ], + "type": "text", + "content": "[45] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. In Advances in neural information processing systems, pages 91-99, 2015. 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 365, + 545, + 398 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 365, + 545, + 398 + ], + "spans": [ + { + "bbox": [ + 307, + 365, + 545, + 398 + ], + "type": "text", + "content": "[46] Mert Bulent Sariyildiz, Julien Perez, and Diane Larlus. Learning visual representations with caption annotations. In European Conference on Computer Vision (ECCV), 2020. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 399, + 545, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 399, + 545, + 453 + ], + "spans": [ + { + "bbox": [ + 307, + 399, + 545, + 453 + ], + "type": "text", + "content": "[47] Christoph Schuhmann, Richard Vencu, Romain Beaumont, Robert Kaczmarczyk, Clayton Mullis, Aarush Katta, Theo Coombes, Jenia Jitsev, and Aran Komatsuzaki. Laion-400m: Open dataset of clip-filtered 400 million image-text pairs, 2021. 6, 7, 8" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 455, + 545, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 455, + 545, + 498 + ], + "spans": [ + { + "bbox": [ + 307, + 455, + 545, + 498 + ], + "type": "text", + "content": "[48] Piyush Sharma, Nan Ding, Sebastian Goodman, and Radu Soricut. Conceptual captions: A cleaned, hypernymed, image alt-text dataset for automatic image captioning. In Proceedings of ACL, 2018. 6, 7" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 500, + 545, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 500, + 545, + 533 + ], + "spans": [ + { + "bbox": [ + 307, + 500, + 545, + 533 + ], + "type": "text", + "content": "[49] K. Simonyan and A. Zisserman. Very deep convolutional networks for large-scale image recognition. In International Conference on Learning Representations, May 2015. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 534, + 545, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 534, + 545, + 578 + ], + "spans": [ + { + "bbox": [ + 307, + 534, + 545, + 578 + ], + "type": "text", + "content": "[50] Robyn Speer, Joshua Chin, and Catherine Havasi. Conceptnet 5.5: An open multilingual graph of general knowledge. In Thirty-first AAAI conference on artificial intelligence, 2017. 3" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 579, + 545, + 623 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 579, + 545, + 623 + ], + "spans": [ + { + "bbox": [ + 307, + 579, + 545, + 623 + ], + "type": "text", + "content": "[51] Chen Sun, Abhinav Shrivastava, Saurabh Singh, and Abhinav Gupta. Revisiting unreasonable effectiveness of data in deep learning era. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), Oct 2017. 1" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 624, + 545, + 688 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 624, + 545, + 688 + ], + "spans": [ + { + "bbox": [ + 307, + 624, + 545, + 688 + ], + "type": "text", + "content": "[52] Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed, Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, and Andrew Rabinovich. Going deeper with convolutions. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1-9, 2015. 2" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 691, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 691, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 307, + 691, + 545, + 713 + ], + "type": "text", + "content": "[53] Bart Thomee, David A. Shamma, Gerald Friedland, Benjamin Elizalde, Karl Ni, Douglas Poland, Damian Borth, and" + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "text", + "content": "2785" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 713 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 67, + 72, + 286, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 286, + 95 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 286, + 95 + ], + "type": "text", + "content": "Li-Jia Li. Yfcc100m: The new data in multimedia research. Commun. ACM, 59(2):64-73, jan 2016. 6, 7" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 96, + 287, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 96, + 287, + 149 + ], + "spans": [ + { + "bbox": [ + 48, + 96, + 287, + 149 + ], + "type": "text", + "content": "[54] Du Tran, Lubomir Bourdev, Rob Fergus, Lorenzo Torresani, and Manohar Paluri. Learning spatiotemporal features with 3d convolutional networks. In Proceedings of the IEEE international conference on computer vision, pages 4489-4497, 2015. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 152, + 287, + 185 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 152, + 287, + 185 + ], + "spans": [ + { + "bbox": [ + 48, + 152, + 287, + 185 + ], + "type": "text", + "content": "[55] Denny Vrandecic. Wikidata: A new platform for collaborative data collection. In Proceedings of the 21st international conference on world wide web, pages 1063-1064, 2012. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 186, + 287, + 230 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 186, + 287, + 230 + ], + "spans": [ + { + "bbox": [ + 48, + 186, + 287, + 230 + ], + "type": "text", + "content": "[56] Haohan Wang, Songwei Ge, Zachary Lipton, and Eric P Xing. Learning robust global representations by penalizing local predictive power. In Advances in Neural Information Processing Systems, pages 10506-10518, 2019. 6, 8" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 232, + 287, + 264 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 232, + 287, + 264 + ], + "spans": [ + { + "bbox": [ + 48, + 232, + 287, + 264 + ], + "type": "text", + "content": "[57] Hao Wang, Yitong Wang, Zheng Zhou, Xing Ji, Dihong Gong, Jingchao Zhou, Zhifeng Li, and Wei Liu. Cosface: Large margin cosine loss for deep face recognition, 2018. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 266, + 287, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 266, + 287, + 319 + ], + "spans": [ + { + "bbox": [ + 48, + 266, + 287, + 319 + ], + "type": "text", + "content": "[58] Saining Xie, Ross Girshick, Piotr Dólár, Zhuowen Tu, and Kaiming He. Aggregated residual transformations for deep neural networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 1492-1500, 2017. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 321, + 287, + 364 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 321, + 287, + 364 + ], + "spans": [ + { + "bbox": [ + 48, + 321, + 287, + 364 + ], + "type": "text", + "content": "[59] Zhenda Xie, Yutong Lin, Zhuliang Yao, Zheng Zhang, Qi Dai, Yue Cao, and Han Hu. Self-supervised learning with swim transformers. arXiv preprint arXiv:2105.04553, 2021. 6" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 366, + 287, + 431 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 366, + 287, + 431 + ], + "spans": [ + { + "bbox": [ + 48, + 366, + 287, + 431 + ], + "type": "text", + "content": "[60] Jinyu Yang, Jiali Duan, Son Tran, Yi Xu, Sampath Chanda, Liquun Chen, Belinda Zeng, Trishul Chilimbi, and Junzhou Huang. Vision-language pre-training with triple contrastive learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15671-15680, 2022. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 434, + 287, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 434, + 287, + 487 + ], + "spans": [ + { + "bbox": [ + 48, + 434, + 287, + 487 + ], + "type": "text", + "content": "[61] Jianwei Yang, Chunyuan Li, Pengchuan Zhang, Bin Xiao, Ce Liu, Lu Yuan, and Jianfeng Gao. Unified contrastive learning in image-text-label space. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 19163-19173, June 2022. 3, 6, 7" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 490, + 287, + 543 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 490, + 287, + 543 + ], + "spans": [ + { + "bbox": [ + 48, + 490, + 287, + 543 + ], + "type": "text", + "content": "[62] Lewei Yao, Runhui Huang, Lu Hou, Guansong Lu, Minzhe Niu, Hang Xu, Xiaodan Liang, Zhenguo Li, Xin Jiang, and Chunjing Xu. FILIP: Fine-grained interactive language-image pre-training. In International Conference on Learning Representations, 2022. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 545, + 287, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 545, + 287, + 588 + ], + "spans": [ + { + "bbox": [ + 48, + 545, + 287, + 588 + ], + "type": "text", + "content": "[63] Minghao Yin, Zhuliang Yao, Yue Cao, Xiu Li, Zheng Zhang, Stephen Lin, and Han Hu. Disentangled non-local neural networks. In Proceedings of the European conference on computer vision (ECCV), 2020. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 590, + 287, + 644 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 590, + 287, + 644 + ], + "spans": [ + { + "bbox": [ + 48, + 590, + 287, + 644 + ], + "type": "text", + "content": "[64] Peter Young, Alice Lai, Micah Hodosh, and Julia Hockenmaier. From image descriptions to visual denotations: New similarity metrics for semantic inference over event descriptions. Transactions of the Association for Computational Linguistics, 2:67-78, 2014. 6, 7" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 647, + 287, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 647, + 287, + 689 + ], + "spans": [ + { + "bbox": [ + 48, + 647, + 287, + 689 + ], + "type": "text", + "content": "[65] Jiahui Yu, Zirui Wang, Vijay Vasudevan, Legg Yeung, Mojtaba Seyedhosseini, and Yonghui Wu. Coca: Contrastive captioners are image-text foundation models. Transactions on Machine Learning Research, 2022. 3" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 692, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 692, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 692, + 287, + 713 + ], + "type": "text", + "content": "[66] Xiaohua Zhai, Xiao Wang, Basil Mustafa, Andreas Steiner, Daniel Keysers, Alexander Kolesnikov, and Lucas Beyer." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 195 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 327, + 73, + 545, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 327, + 73, + 545, + 106 + ], + "spans": [ + { + "bbox": [ + 327, + 73, + 545, + 106 + ], + "type": "text", + "content": "Lit: Zero-shot transfer with locked-image text tuning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18123-18133, 2022. 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 107, + 545, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 107, + 545, + 150 + ], + "spans": [ + { + "bbox": [ + 307, + 107, + 545, + 150 + ], + "type": "text", + "content": "[67] Yuhao Zhang, Hang Jiang, Yasuhide Miura, Christopher D Manning, and Curtis P Langlotz. Contrastive learning of medical visual representations from paired images and text. arXiv preprint arXiv:2010.00747, 2020. 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 152, + 545, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 152, + 545, + 195 + ], + "spans": [ + { + "bbox": [ + 307, + 152, + 545, + 195 + ], + "type": "text", + "content": "[68] Bolei Zhou, Hang Zhao, Xavier Puig, Tete Xiao, Sanja Fidler, Adela Barriuso, and Antonio Torralba. Semantic understanding of scenes through the ade20k dataset. International Journal on Computer Vision, 2018. 2, 3, 6" + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "text", + "content": "2786" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/iDisc_ Internal Discretization for Monocular Depth Estimation/f111aba4-6ed5-4778-a051-8ab6247508d3_content_list.json b/2023/iDisc_ Internal Discretization for Monocular Depth Estimation/f111aba4-6ed5-4778-a051-8ab6247508d3_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..4f2673d5cd28e8c81693f93e88456e44cc3ffd2b --- /dev/null +++ b/2023/iDisc_ Internal Discretization for Monocular Depth Estimation/f111aba4-6ed5-4778-a051-8ab6247508d3_content_list.json @@ -0,0 +1,1544 @@ +[ + { + "type": "text", + "text": "iDisc: Internal Discretization for Monocular Depth Estimation", + "text_level": 1, + "bbox": [ + 169, + 130, + 800, + 151 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Luigi Piccinelli Christos Sakaridis Fisher Yu", + "bbox": [ + 272, + 180, + 696, + 198 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Computer Vision Lab, ETH Zürich", + "bbox": [ + 341, + 205, + 622, + 223 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 258, + 313, + 273 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Monocular depth estimation is fundamental for 3D scene understanding and downstream applications. However, even under the supervised setup, it is still challenging and ill-posed due to the lack of full geometric constraints. Although a scene can consist of millions of pixels, there are fewer high-level patterns. We propose iDisc to learn those patterns with internal discretized representations. The method implicitly partitions the scene into a set of high-level patterns. In particular, our new module, Internal Discretization (ID), implements a continuous-discrete-continuous bottleneck to learn those concepts without supervision. In contrast to state-of-the-art methods, the proposed model does not enforce any explicit constraints or priors on the depth output. The whole network with the ID module can be trained end-to-end, thanks to the bottleneck module based on attention. Our method sets the new state of the art with significant improvements on NYU-Depth v2 and KITTI, outperforming all published methods on the official KITTI benchmark. iDisc can also achieve state-of-the-art results on surface normal estimation. Further, we explore the model generalization capability via zero-shot testing. We observe the compelling need to promote diversification in the outdoor scenario. Hence, we introduce splits of two autonomous driving datasets, DDAD and Argoverse. Code is available at http://vis.xyz/pub/idisc.", + "bbox": [ + 75, + 292, + 473, + 670 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 691, + 209, + 707 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Depth estimation is essential in computer vision, especially for understanding geometric relations in a scene. This task consists in predicting the distance between the projection center and the 3D point corresponding to each pixel. Depth estimation finds direct significance in downstream applications such as 3D modeling, robotics, and autonomous cars. Some research [62] shows that depth estimation is a crucial prompt to be leveraged for action reasoning and execution. In particular, we tackle the task of monocular depth estimation (MDE). MDE is an ill-posed problem due to its inherent scale ambiguity: the same 2D input image can correspond to an infinite number of 3D scenes.", + "bbox": [ + 75, + 719, + 470, + 900 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/8ba22e74df8a546a7c53dddaaca8625992d9f6ed5692c5fbba528c9c016e51d4.jpg", + "image_caption": [ + "(a) Input image" + ], + "image_footnote": [], + "bbox": [ + 501, + 257, + 696, + 364 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/f005ed225df087a2be0bc84bfe03875ccb90c154d40ec389de3b6f70c18d5bb0.jpg", + "image_caption": [ + "(b) Output depth" + ], + "image_footnote": [], + "bbox": [ + 697, + 257, + 890, + 364 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/2e8c07acd4f3533dd988334195875e5b8b0914a829e433cb67d2c0d162911607.jpg", + "image_caption": [ + "(c) Intermediate representations" + ], + "image_footnote": [], + "bbox": [ + 501, + 383, + 696, + 491 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/870d50ba2271e434f8e85ba6f034fb1178463bcbcb085e72560087d90b8584c5.jpg", + "image_caption": [ + "(d) Internal discretization", + "Figure 1. We propose iDisc which implicitly enforces an internal discretization of the scene via a continuous-discrete-continuous bottleneck. Supervision is applied to the output depth only, i.e., the fused intermediate representations in (c), while the internal discrete representations are implicitly learned by the model. (d) displays some actual internal discretization patterns captured from the input, e.g., foreground, object relationships, and 3D planes. Our iDisc model is able to predict high-quality depth maps by capturing scene interactions and structure." + ], + "image_footnote": [], + "bbox": [ + 697, + 383, + 890, + 491 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "State-of-the-art (SotA) methods typically involve convolutional networks [12, 13, 24] or, since the advent of vision Transformer [11], transformer architectures [3, 41, 54, 59]. Most methods either impose geometric constraints on the image [22, 33, 38, 55], namely, planarity priors or explicitly discretize the continuous depth range [3, 4, 13]. The latter can be viewed as learning frontoparallel planes. These imposed priors inherently limit the expressiveness of the respective models, as they cannot model arbitrary depth patterns, ubiquitous in real-world scenes.", + "bbox": [ + 496, + 654, + 893, + 803 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We instead propose a more general depth estimation model, called iDisc, which does not explicitly impose any constraint on the final prediction. We design an Internal Discretization (ID) of the scene which is in principle depth-agnostic. Our assumption behind this ID is that each scene can be implicitly described by a set of concepts or patterns,", + "bbox": [ + 496, + 810, + 895, + 902 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 807, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "21477", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "such as objects, planes, edges, and perspectivity relationships. The specific training signal determines which patterns to learn (see Fig. 1).", + "bbox": [ + 75, + 90, + 470, + 136 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We design a continuous-to-discrete bottleneck through which the information is passed in order to obtain such internal scene discretization, namely the underlying patterns. In the bottleneck, the scene feature space is partitioned via learnable and input-dependent quantizers, which in turn transfer the information onto the continuous output space. The ID bottleneck introduced in this work is a general concept and can be implemented in several ways. Our particular ID implementation employs attention-based operators, leading to an end-to-end trainable architecture and input-dependent framework. More specifically, we implement the continuous-to-discrete operation via \"transposed\" cross-attention, where transposed refers to applying softmax on the output dimension. This softmax formulation enforces the input features to be routed to the internal discrete representations (IDRs) in an exclusive fashion, thus defining an input-dependent soft clustering of the feature space. The discrete-to-continuous transformation is implemented via cross-attention. Supervision is only applied to the final output, without any assumptions or regularization on the IDRs.", + "bbox": [ + 75, + 136, + 470, + 438 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We test iDisc on multiple indoor and outdoor datasets and probe its robustness via zero-shot testing. As of today, there is too little variety in MDE benchmarks for the outdoor scenario, since the only established benchmark is KITTI [17]. Moreover, we observe that all methods fail on outdoor zero-shot testing, suggesting that the KITTI dataset is not diverse enough and leads to overfitting, thus implying that it is not indicative of generalized performance. Hence, we find it compelling to establish a new benchmark setup for the MDE community by proposing two new train-test splits of more diverse and challenging high-quality outdoor datasets: Argoverse1.1 [8] and DDAD [18].", + "bbox": [ + 75, + 439, + 470, + 619 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our main contributions are as follows: (i) we introduce the Internal Discretization module, a novel architectural component that adeptly represents a scene by combining underlying patterns; (ii) we show that it is a generalization of SotA methods involving depth ordinal regression [3, 13]; (iii) we propose splits of two raw outdoor datasets [8, 18] with high-quality LiDAR measurements. We extensively test iDisc on six diverse datasets and, owing to the ID design, our model consistently outperforms SotA methods and presents better transferability. Moreover, we apply iDisc to surface normal estimation showing that the proposed module is general enough to tackle generic real-valued dense prediction tasks.", + "bbox": [ + 75, + 619, + 470, + 801 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 76, + 814, + 218, + 830 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The supervised setting of MDE assumes that pixel-wise depth annotations are available at training time and depth inference is performed on single images. The coarse-to-fine network introduced in Eigen et al. [12] is the cor", + "bbox": [ + 75, + 839, + 470, + 900 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "nerstone in MDE with end-to-end neural networks. The work established the optimization process via the Scale-Invariant log loss $(\\mathrm{SI}_{\\log})$ . Since then, the three main directions evolve: new architectures, such as residual networks [23], neural fields [30, 52], multi-scale fusion [25, 35], transformers [3, 54, 59]; improved optimization schemes, such as reverse-Huber loss [23], classification [6], or ordinal regression [3, 13]; multi-task learning to leverage ancillary information from the related task, such as surface normals estimation or semantic segmentation [12, 39, 51].", + "bbox": [ + 496, + 90, + 893, + 242 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Geometric priors have been widely utilized in the literature, particularly the piecewise planarity prior [5, 9, 14], serving as a proper real-world approximation. The geometric priors are usually incorporated by explicitly treating the image as a set of planes [26, 28, 29, 58], using a plane-inducing loss [57], forcing pixels to attend to the planar representation of other pixels [24, 38], or imposing consistency with other tasks' output [2, 33, 55], like surface normals. Priors can focus on a more holistic scene representation by dividing the whole scene into 3D planes without dependence on intrinsic camera parameters [53, 60], aiming at partitioning the scene into dominant depth planes. In contrast to geometric prior-based works, our method lifts any explicit geometric constraints on the scene. Instead, iDisc implicitly enforces the representation of scenes as a set of high-level patterns.", + "bbox": [ + 496, + 244, + 893, + 470 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Ordinal regression methods [3,4, 13] have proven to be a promising alternative to other geometry-driven approaches. The difference with classification models is that class \"values\" are learnable and are real numbers, thus the problem falls into the regression category. The typical SotA rationale is to explicitly discretize the continuous output depth range, rendering the approach similar to mask-based segmentation. Each of the scalar depth values is associated with a confidence mask which describes the probability of each pixel presenting such a depth value. Hence, SotA methods inherently assume that depth can be represented as a set of frontoparallel planes, that is, depth \"masks\".", + "bbox": [ + 496, + 473, + 893, + 654 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The main paradigm of ordinal regression methods is to first obtain hidden representations and scalar values of discrete depth values. The dot-product similarity between the feature maps and the depth representations is treated as logits and softmax is applied to extract confidence masks (in Fu et al. [13] this degenerates to argmax). Finally, the final prediction is defined as the per-pixel weighted average of the discrete depth values, with the confidence values serving as the weights. iDisc draws connections with the idea of depth discretization. However, our ID module is designed to be depth-agnostic. The discretization occurs at the abstract level of internal features from the ID bottleneck instead of the output depth level, unlike other methods.", + "bbox": [ + 496, + 656, + 893, + 852 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Iterative routing is related to our \"transposed\" crossattention. The first approach of this kind was Capsule Networks and their variants [20, 42]. Some formulations [32, 46]", + "bbox": [ + 496, + 854, + 893, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "21478", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/7b815d251a046cb5082a7bd385affd5a0d6db5a9092c6b80fa9ff6953390a6f6.jpg", + "image_caption": [ + "Figure 2. Model Architecture. The Internal Discretization Module imposes an information bottleneck via two consecutive stages: continuous-to-discrete (C2D) and discrete-to-continuous (D2C). The module processes multiple resolutions, i.e., $l \\in \\{1, 2, 3\\}$ , independently in parallel. The bottleneck embodies our assumption that a scene can be represented as a set of patterns. The C2D stage aggregates information, given a learnable prior ( $\\mathbf{H}_{\\text{prior}}^l$ ), from the $l$ -th resolution feature maps ( $\\mathbf{F}^l$ ) to a finite set of IDRs ( $\\mathbf{H}^l$ ). In particular, it learns how to define a partition function that is dependent on the input $\\mathbf{F}^l$ via transposed cross-attention, as in (1). The second stage (D2C) transfers the IDRs on the original continuous space using layers of cross-attention as in (2), for sake of simplicity, we depict only a generic $i$ -th layer. Cross-attention is guided by the similarity between decoded pixel embeddings ( $\\mathbf{P}^l$ ) and $\\mathbf{H}^l$ . The final prediction ( $\\hat{\\mathbf{D}}$ ) is the fusion, i.e., mean, of the intermediate representations $\\{\\hat{\\mathbf{D}}^l\\}_{l=1}^3$ ." + ], + "image_footnote": [], + "bbox": [ + 76, + 85, + 893, + 315 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "employ different kinds of attention mechanisms. Our attention mechanism draws connections with [32]. However, we do not allow permutation invariance, since our assumption is that each discrete representation internally describes a particular kind of pattern. In addition, we do not introduce any other architectural components such as gated recurrent units (GRU). In contrast to other methods, our attention is employed at a higher abstraction level, namely in the decoder.", + "bbox": [ + 75, + 441, + 473, + 563 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 76, + 579, + 169, + 594 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We propose an Internal Discretization (ID) module, to discretize the internal feature representation of encoder-decoder network architectures. We hypothesize that the module can break down the scenes into coherent concepts without semantic supervision. This section will first describe the module design and then discuss the network architecture. Sec. 3.1.1 defines the formulation of \"transposed\" cross-attention outlined in Sec. 1 and describes the main difference with previous formulations from Sec. 2. Moreover, we derive in Sec. 3.1.2 how the iDisc formulation can be interpreted as a generalization of SotA ordinal regression methods by reframing their original formulation. Eventually, Sec. 3.2 presents the optimization problem and the overall architecture.", + "bbox": [ + 75, + 604, + 472, + 801 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Internal Discretization Module", + "text_level": 1, + "bbox": [ + 76, + 815, + 352, + 830 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The ID module involves a continuous-discrete-continuous bottleneck composed of two main consecutive stages. The overall module is based on our hypothesis that scenes can be represented as a finite set of patterns. The first stage", + "bbox": [ + 75, + 839, + 470, + 902 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "consists in a continuous-to-discrete component, namely soft-exclusive discretization of the feature space. More specifically, it enforces an input-dependent soft clustering on the feature maps in an image-to-set fashion. The second stage completes the internal scene discretization by mapping the learned IDRs onto the continuous output space. IDRs are not bounded to focus exclusively on depth planes but are allowed to represent any high-level pattern or concept, such as objects, relative locations, and planes in the 3D space. In contrast with SotA ordinal regression methods [3,4,13], the IDRs are neither explicitly tied to depth values nor directly tied to the output. Moreover, our module operates at multiple intermediate resolutions and merges them only in the last layer. The overall architecture of iDisc, particularly our ID module, is shown in Fig. 2.", + "bbox": [ + 496, + 441, + 895, + 667 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1.1 Adaptive Feature Partitioning", + "text_level": 1, + "bbox": [ + 500, + 693, + 764, + 709 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The first stage of our ID module, Adaptive Feature Partitioning (AFP), generates proper discrete representations $(\\mathcal{H} \\coloneqq \\{\\mathbf{H}^l\\}_{l=1}^3)$ that quantize the feature space $(\\mathcal{F} \\coloneqq \\{\\mathbf{F}^l\\}_{l=1}^3)$ at each resolution $l$ . We drop the resolution superscript $l$ since resolutions are independently processed and only one generic resolution is treated here. iDisc does not simply learn fixed centroids, as in standard clustering, but rather learns how to define a partition function in an input-dependent fashion. More specifically, an iterative transposed cross-attention module is utilized. Given the specific input feature maps $(\\mathbf{F})$ , the iteration process refines (learnable) IDR priors $(\\mathbf{H}_{\\mathrm{prior}})$ over $R$ iterations.", + "bbox": [ + 496, + 719, + 893, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "21479", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "More specifically, the term \"transposed\" refers to the different axis along which the softmax operation is applied, namely $\\left[\\mathrm{softmax}(\\mathbf{KQ}^T)\\right]^T\\mathbf{V}$ instead of the canonical dot-product attention softmax(QK)V, with Q,K,V as query, key and value tensors, respectively. In particular, the tensors are obtained as projections of feature maps and IDR priors, $f_{\\mathbf{Q}}(\\mathbf{H}_{\\mathrm{prior}}),f_{\\mathbf{K}}(\\mathbf{F}),f_{\\mathbf{V}}(\\mathbf{F})$ . The $t$ -th iteration out of $R$ can be formulated as follows:", + "bbox": [ + 75, + 90, + 472, + 213 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nW _ {i j} ^ {t} = \\frac {\\exp \\left(\\mathbf {k} _ {i} ^ {T} \\mathbf {q} _ {j} ^ {t}\\right)}{\\sum_ {k = 1} ^ {N} \\exp \\left(\\mathbf {k} _ {i} ^ {T} \\mathbf {q} _ {k} ^ {t}\\right)}, \\mathbf {q} _ {j} ^ {t + 1} = \\sum_ {i = 1} ^ {M} W _ {i j} ^ {t} \\mathbf {v} _ {i}, \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 116, + 218, + 470, + 258 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathbf{q}_j, \\mathbf{k}_i, \\mathbf{v}_i \\in \\mathbb{R}^C$ are query, key and value respectively, $N$ is the number of IDRs, nameley, clusters, and $M$ is the number of pixels. The weights $W_{ij}$ may be normalized to 1 along the $i$ dimension to avoid vanishing or exploding quantities due to the summation of un-normalized distribution.", + "bbox": [ + 75, + 265, + 470, + 340 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The quantization stems from the inherent behavior of softmax. In particular, softmax forces competition among outputs: one output can be large only to the detriment of others. Therefore, when fixing $i$ , namely, given a feature, only a few attention weights $(W_{ij})$ may be significantly greater than zero. Hence, the content $\\mathbf{v}_i$ is routed only to a few IDRs at the successive iteration. Feature maps are fixed during the process and weights are shared by design, thus $\\{\\mathbf{k}_i, \\mathbf{v}_i\\}_{i=1}^M$ are the same across iterations. The induced competition enforces a soft clustering of the input feature space, where the last-iteration IDR represents the actual partition function $(\\mathbf{H} := \\mathbf{Q}^R)$ . The probabilities of belonging to one partition are the attention weights, namely $W_{ij}^R$ with $j$ -th query fixed. Since attention weights are inherently dependent on the input, the specific partitioning also depends on the input and takes place at inference time. The entire process of AFP leads to (soft) mutually exclusive IDRs.", + "bbox": [ + 75, + 340, + 472, + 598 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "As far as the partitioning rationale is concerned, the proposed AFP draws connections with iterative routing methods described in Sec. 2. However, important distinctions apply. First, IDRs are not randomly initialized as the \"slots\" in Locatello et al. [32] but present a learnable prior. Priors can be seen as learnable positional embeddings in the attention context, thus we do not allow a permutation-invariant set of representations. Moreover, non-adaptive partitioning can still take place via the learnable priors if the iterations are zero. Second, the overall architecture differs noticeably as described in Sec. 2, and in addition, iDisc partitions feature space at the decoder level, corresponding to more abstract, high-level concepts, while the SotA formulations focus on clustering at an abstraction level close to the input image.", + "bbox": [ + 75, + 598, + 470, + 810 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "One possible alternative approach to obtaining the aforementioned IDRs is the well-known image-to-set proposed in DETR [7], namely via classic cross-attention between representations and image feature maps. However, the corresponding representations might redundantly aggregate features, where the extreme corresponds to each output being", + "bbox": [ + 75, + 810, + 470, + 901 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "the mean of the input. Studies [15, 44] have shown that slow convergence in transformer-based architectures may be due to the non-localized context in cross-attention. The exclusiveness of the IDRs discourages the redundancy of information in different IDRs. We argue that exclusiveness allows the utilization of fewer representations (32 against the 256 utilized in [3] and [13]), and can improve both the interpretability of what IDRs are responsible for and training convergence.", + "bbox": [ + 496, + 90, + 893, + 227 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1.2 Internal Scene Discretization", + "text_level": 1, + "bbox": [ + 498, + 247, + 756, + 261 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In the second stage of the ID module, Internal Scene Discretization (ISD), the module ingests pixel embeddings $(\\mathcal{P} := \\{\\mathbf{P}^l\\}_{l=1}^3)$ from the decoder and IDRs $\\mathcal{H}$ from the first stage, both at different resolutions $l$ , as shown in Fig. 2. Each discrete representation carries both the signature, as the key, and the output-related content, as the value, of the pattern it represents. The similarity between IDRs and pixel embeddings is computed in order to spatially localize in the continuous output space where to transfer the information of each IDR. We utilize the dot-product similarity function.", + "bbox": [ + 496, + 271, + 893, + 421 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Furthermore, the kind of information to transfer onto the final prediction is not constrained, as we never explicitly handle depth values, usually called bins, until the final output. Thus, the IDRs are completely free to carry generic high-level concepts (such as object-ness, relative positioning, and geometric structures). This approach is in stark contrast with SotA methods [3,4, 13, 27], which explicitly constrain what the representations are about: scalar depth values. Instead, iDisc learns to generate unconstrained representations in an input-dependent fashion. The effective discretization of the scene occurs in the second stage thanks to the information transfer from the set of exclusive concepts $(\\mathcal{H})$ from AFP to the continuous space defined by $\\mathcal{P}$ . We show that our method is not bounded to depth estimation, but can be applied to generic continuous dense tasks, for instance, surface normal estimation. Consequently, we argue that the training signal of the task at hand determines how to internally discretize the scene, rendering our ID module general and usable in settings other than depth estimation.", + "bbox": [ + 496, + 422, + 893, + 709 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "From a practical point of view, the whole second stage consists in cross-attention layers applied to IDRs and pixel embeddings. As described in Sec. 3.1.1, we drop the resolution superscript $l$ . After that, the final depth maps are projected onto the output space and the multi-resolution depth predictions are combined. The $i$ -th layer is defined as:", + "bbox": [ + 496, + 710, + 893, + 801 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {D} _ {i + 1} = \\operatorname {s o f t m a x} \\left(\\mathbf {Q} _ {i} \\mathbf {K} _ {i} ^ {T}\\right) \\mathbf {V} _ {i} + \\mathbf {D} _ {i}, \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 573, + 814, + 893, + 832 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathbf{Q}_i = f_{Q_i}(\\mathbf{P})\\in \\mathbb{R}^{H\\times W\\times C}$ , $\\mathbf{P}$ are pixel embeddings with shape $(H,W)$ , and $\\mathbf{K}_i$ , $\\mathbf{V}_i\\in \\mathbb{R}^{N\\times C}$ are the $N$ IDRs under linear transformations $f_{K_i}(\\mathbf{H})$ , $f_{V_i}(\\mathbf{H})$ . The term $\\mathbf{Q}_i\\mathbf{K}_i^T$ determines the spatial location for which each", + "bbox": [ + 496, + 838, + 893, + 901 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "21480", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "specific IDR is responsible, while $\\mathbf{V}_i$ carries the semantic content to be transferred in the proper spatial locations.", + "bbox": [ + 75, + 90, + 468, + 119 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Our approach constitutes a generalization of depth estimation methods that involve (hybrid) ordinal regression. As described in Sec. 2, the common paradigm in ordinal regression methods is to explicitly discretize depth in a set of masks with a scalar depth value associated with it. Then, they predict the likelihood that each pixel belongs to such masks. Our change of paradigm stems from the reinterpretation of the mentioned ordinal regression pipeline which we translate into the following mathematical expression:", + "bbox": [ + 75, + 121, + 472, + 257 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {D} = \\operatorname {s o f t m a x} \\left(\\mathbf {P R} ^ {T} / T\\right) \\mathbf {v}, \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 178, + 263, + 468, + 281 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\mathbf{P}$ are the pixel embeddings at maximum resolution and $T$ is the softmax temperature. $\\mathbf{v} \\in \\mathbb{R}^{N \\times 1}$ are $N$ depth scalar values and $\\mathbf{R} \\in \\mathbb{R}^{N \\times (C - 1)}$ are their hidden representations, both processed as a unique stacked tensor $(\\mathbf{R}||\\mathbf{v} \\in \\mathbb{R}^{N \\times C})$ . From the reformulation in (3), one can observe that (3) is a degenerate case of (2). In particular, $f_{Q}$ degenerates to the identity function. $f_{K}$ and $f_{V}$ degenerate to selector functions: the former function selects up to the $C - 1$ dimensions and the latter selects the last dimension only. Moreover, the hidden representations are refined pixel embeddings $(f(\\mathbf{P}_i) = \\mathbf{H}_i = \\mathbf{R}||\\mathbf{v})$ , and $\\mathbf{D}$ in (3) is the final output, namely no multiple iterations are performed as in (2). The explicit entanglement between the semantic content of the hidden representations and the final output is due to hard-coding $\\mathbf{v}$ as depth scalar values.", + "bbox": [ + 75, + 289, + 472, + 515 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.2. Network Architecture", + "text_level": 1, + "bbox": [ + 76, + 523, + 282, + 537 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Our network described in Fig. 2 comprises first an encoder backbone, interchangeably convolutional or attention-based, producing features at different scales. The encoded features at different resolutions are refined, and information between resolutions is shared, both via four multi-scale deformable attention (MSDA) blocks [63]. The feature maps from MSDA at different scales are fed into the AFP module to extract IDRs $(\\mathcal{H})$ , and into the decoder to extract pixel embeddings in the continuous space $(\\mathcal{P})$ . Pixel embeddings at different resolutions are combined with the respective IDRs in the ISD stage of the ID module to extract the depth maps. The final depth prediction corresponds to the mean of the interpolated intermediate representations. The optimization process is guided only by the established $\\mathrm{SI}_{\\log}$ loss defined in [12], and no other regularization is exploited. $\\mathrm{SI}_{\\log}$ is defined as:", + "bbox": [ + 75, + 546, + 472, + 773 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {S I} _ {\\log}} (\\epsilon) = \\alpha \\sqrt {\\mathbb {V} [ \\epsilon ] + \\lambda \\mathbb {E} ^ {2} [ \\epsilon ]} \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 169, + 781, + 468, + 805 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\text {w i t h} \\epsilon = \\log (\\hat {y}) - \\log (y ^ {*}),\n$$\n", + "text_format": "latex", + "bbox": [ + 171, + 801, + 352, + 818 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\hat{y}$ is the predicted depth and $y^{*}$ is the ground-truth (GT) value. $\\mathbb{V}[\\epsilon ]$ and $\\mathbb{E}[\\epsilon ]$ are computed as the empirical variance and expected value over all pixels, namely, $\\{\\epsilon_i\\}_{i = 1}^N$ $\\mathbb{V}[\\epsilon ]$ is the purely scale-invariant loss, while $\\mathbb{E}^2 [\\epsilon ]$ fosters a proper scale. $\\alpha$ and $\\lambda$ are set to 10 and 0.15, as customary.", + "bbox": [ + 75, + 824, + 472, + 901 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/f17c9cbe2353e72fa9a47066bbe82a251fe44385531a47ab4a393b4c33481f4f.jpg", + "image_caption": [ + "Figure 3. Qualitative results on NYU. Each pair of consecutive rows corresponds to one test sample. Each odd row shows the input RGB image and depth predictions for the selected methods. Each even row shows GT depth and the prediction errors of the selected methods clipped at 0.5 meters. The error color map is coolwarm: blue corresponds to lower error values and red to higher values." + ], + "image_footnote": [], + "bbox": [ + 501, + 89, + 890, + 419 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 500, + 522, + 633, + 539 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Experimental Setup", + "text_level": 1, + "bbox": [ + 500, + 547, + 689, + 564 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1.1 Datasets", + "text_level": 1, + "bbox": [ + 500, + 571, + 612, + 585 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "NYU-Depth V2. NYU-Depth V2 (NYU) [36] is a dataset consisting of 464 indoor scenes with RGB images and quasi-dense depth images with $640 \\times 480$ resolution. Our models are trained on the train-test split proposed by previous methods [24], corresponding to 24,231 samples for training and 654 for testing. In addition to depth, the dataset provides surface normal data utilized for normal estimation. The train split used for normal estimation is the one proposed in [55]. Zero-shot testing datasets. We evaluate the generalizability of indoor models on two indoor datasets which are not seen during training. The selected datasets are SUN-RGBD [43] and DIODE-Indoor [47]. For both datasets, the resolution is reduced to match that of NYU, which is $640 \\times 480$ .", + "bbox": [ + 496, + 595, + 893, + 792 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "KITTI. The KITTI dataset provides stereo images and corresponding Velodyne LiDAR scans of outdoor scenes captured from a moving vehicle [17]. RGB and depth images have (mean) resolution of $1241 \\times 376$ . The split proposed by [12] (Eigen-split) with corrected depth is utilized as training and testing set, namely, 23,158 and 652 samples. The evaluation crop corresponds to the crop defined by [16]. All methods in", + "bbox": [ + 496, + 795, + 893, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "21481", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/ff6ee306e1cb02c3fe41324f2074246954ba091393b4961e92ac9170f0646dd3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 80, + 89, + 174, + 202 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/675d1465eb3306bd6beb3e5651aed532cfb3a7be85b64093f2b0912575370bfa.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 176, + 89, + 271, + 202 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/fb2b1b07b4ce47c510f43908dbd66678d2fe1185f1055193b1f7e3b03a45ecdf.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 274, + 89, + 369, + 202 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/f6e7cffc2765f3aa798183983e5ca1f370c86cb2488a29e108fa8c6126ce2456.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 370, + 89, + 467, + 202 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/904fc0d19d850dea0dadc74c8ace4454694f80b3d272c734cfaee9138136bc6d.jpg", + "image_caption": [ + "Figure 4. Attention maps on NYU for three different IDRs. Each row presents the attention map of a specific IDR for four test images. Each discrete representation focuses on a specific high-level concept. The first two rows pertain to IDRs at the lowest resolution while the last corresponds to the highest resolution. Best viewed on a screen and zoomed in." + ], + "image_footnote": [], + "bbox": [ + 80, + 204, + 174, + 262 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/017c253393fdc1e27ae13869ab5c08dc90c5c32ab03993ec1a113d06fc076233.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 176, + 204, + 271, + 262 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/2c366c3f448c1840deb98ee2bd4e7c307ffbce404f27aa507b5515f4f1a85b32.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 274, + 204, + 369, + 262 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/fe820b1a19c9b104f02a279a1e79fe64e3f414d08c033ca8acb563ae78f3ca55.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 370, + 204, + 467, + 262 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Sec. 4.2 that have source code and pre-trained models available are re-evaluated on KITTI with the evaluation mask from [16] to have consistent results.", + "bbox": [ + 76, + 359, + 468, + 402 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Argoverse1.1 and DDAD. We propose splits of two autonomous driving datasets, Argoverse1.1 (Argoverse) [8] and DDAD [18], for depth estimation. Argoverse and DDAD are both outdoor datasets that provide $360^{\\circ}$ HD images and the corresponding LiDAR scans from moving vehicles. We pre-process the original datasets to extract depth maps and avoid redundancy. Training set scenes are sampled when the vehicle has been displaced by at least 2 meters from the previous sample. For the testing set scenes, we increase this threshold to 50 meters to further diminish redundancy. Our Argoverse split accounts for 21,672 training samples and 476 test samples, while DDAD for 18,380 training and 860 testing samples. Samples in Argoverse are taken from the 6 cameras covering the full $360^{\\circ}$ panorama. For DDAD, we exclude 2 out of the 6 cameras since they have more than $30\\%$ pixels occluded by the camera capture system. We crop both RGB images and depth maps to have $1920 \\times 870$ resolution that is 180px and 210px cropped from the top for Argoverse and DDAD, respectively, to crop out a large portion of the sky and regions occluded by the ego-vehicle. For both datasets, we clip the maximum depth at $150\\mathrm{m}$ .", + "bbox": [ + 76, + 405, + 470, + 723 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1.2 Implementation Details", + "text_level": 1, + "bbox": [ + 76, + 739, + 294, + 755 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Evaluation Details. In all experiments, we do not exploit any test-time augmentations (TTA), camera parameters, or other tricks and regularizations, in contrast to many previous methods [3, 13, 24, 38, 59]. This provides a more challenging setup, which allows us to show the effectiveness of iDisc. As depth estimation metrics, we utilize root mean square error (RMS) and its log variant $(\\mathrm{RMS}_{\\log})$ , absolute error in log-scale $(\\mathrm{Log}_{10})$ , absolute (A.Rel) and squared (S.rel) mean relative error, the percentage of inlier pixels $(\\delta_{i})$ with", + "bbox": [ + 76, + 763, + 468, + 901 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/3a56e25eef3de38009bdbb69e40ba514b767ac4fa0641cd6a4a4db145aca3a5e.jpg", + "table_caption": [ + "Table 1. Comparison on NYU official test set. R101: ResNet-101 [19], D161: DenseNet-161 [21], EB5: EfficientNet-B5 [45], HR48: HRNet-48 [48], DD22: DRN-D-22 [56], ViTB: ViT-B/16+Resnet-50 [11], MViT: EfficientNet-B5-AP [50] + MiniViT, Swin{L, B, T}: Swin-{Large, Base, Tiny} [31]. $(\\dagger)$ : ImageNet-22k [10] pretraining, $(\\ddagger)$ : non-standard training set, $(\\ast)$ : in-house dataset pretraining, $(\\S)$ : re-evaluated without GT-based rescaling." + ], + "table_footnote": [], + "table_body": "
MethodEncoderδ1δ2δ3RMSA.RelLog10
Higher is betterLower is better
Eigen et al. [12]-0.7690.9500.9880.6410.158-
DORN [13]R1010.8280.9650.9920.5090.1150.051
VNL [55]-0.8750.9760.9940.4160.1080.048
BTS [24]D1610.8850.9780.9940.3920.1100.047
AdaBins‡ [3]MViT0.9030.9840.9970.3640.1030.044
DAV [22]DD220.8820.9800.9960.4120.108-
Long et al. [33]HR480.8900.9820.9960.3770.1010.044
TransDepth [54]ViTB0.9000.9830.9960.3650.1060.045
DPT* [41]ViTB0.9040.9880.9980.3570.1100.045
P3Depth§ [38]R1010.8300.9710.9950.4500.1300.056
NeWCRF [59]SwinL†0.9220.9920.9980.3340.0950.041
LocalBins‡ [4]MViT0.9070.9870.9980.3570.0990.042
OursR1010.8920.9830.9950.3800.1090.046
EB50.9030.9860.9970.3690.1040.044
SwinT0.8940.9830.9960.3770.1090.045
SwinB0.9260.9890.9970.3270.0910.039
SwinL†0.9400.9930.9990.3130.0860.037
", + "bbox": [ + 504, + 186, + 890, + 388 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "threshold $1.25^{i}$ , and scale-invariant error in log-scale $(\\mathrm{SI}_{\\log})$ : $100\\sqrt{\\mathrm{Var}(\\epsilon_{\\log})}$ . The maximum depth for NYU and all zero-shot testing in indoor datasets, specifically SUN-RGBD and Diode Indoor, is set to $10\\mathrm{m}$ while for KITTI it is set to $80\\mathrm{m}$ and for Argoverse and DDAD to $150\\mathrm{m}$ . Zero-shot testing is performed by evaluating a model trained on either KITTI or NYU and tested on either outdoor or indoor datasets, respectively, without additional fine-tuning. For surface normals estimation, the metrics are mean (Mean) and median (Med) absolute error, RMS angular error, and percentages of inlier pixels with thresholds at $11.5^{\\circ}$ , $22.5^{\\circ}$ , and $30^{\\circ}$ . GT-based mean depth rescaling is applied only on Diode Indoor for all methods since the dataset presents largely scale-equivariant scenes, such as plain walls with tiny details.", + "bbox": [ + 496, + 407, + 893, + 619 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Training Details. We implement iDisc in PyTorch [37]. For training, we use the AdamW [34] optimizer $(\\beta_{1} = 0.9, \\beta_{2} = 0.999)$ with an initial learning rate of 0.0002 for every experiment, and weight decay set to 0.02. As a scheduler, we exploit Cosine Annealing starting from $30\\%$ of the training, with final learning rate of 0.00002. We run 45k optimization iterations with a batch size of 16. All backbones are initialized with weights from ImageNet-pretrained models. The augmentations include both geometric (random rotation and scale) and appearance (random brightness, gamma, saturation, hue shift) augmentations. The required training time amounts to 20 hours on 4 NVidia Titan RTX.", + "bbox": [ + 496, + 619, + 893, + 801 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Comparison with the State of the Art", + "text_level": 1, + "bbox": [ + 498, + 815, + 821, + 830 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Indoor Datasets. Results on NYU are presented in Table 1. The results show that we set the new state of the art on the benchmark, improving by more than $6\\%$ on RMS and $9\\%$ on A.Rel over the previous SotA. Moreover, results highlight", + "bbox": [ + 496, + 839, + 893, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "21482", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/f88fc9362f0b2803734373b82754f4c8850feac95ec480196966f6422e7add67.jpg", + "table_caption": [ + "Table 2. Zero-shot testing of models trained on NYU. All methods are trained on NYU and tested without further fine-tuning on the official validation set of SUN-RGBD and Diode Indoor." + ], + "table_footnote": [], + "table_body": "
Test setMethodδ1↑RMS ↓A.Rel ↓SIlog ↓
SUN-RGBDBTS [24]0.7450.5020.16814.25
AdaBins [3]0.7680.4760.15513.20
P3Depth [38]0.6980.5410.17815.02
NeWCRF [59]0.7990.4290.15011.27
Ours0.8380.3870.12810.91
DiodeBTS [24]0.7050.9650.21123.78
AdaBins [3]0.7330.8720.20922.54
P3Depth [38]0.7320.8770.20222.16
NeWCRF [59]0.7990.7690.16418.69
Ours0.8100.7210.15618.11
", + "bbox": [ + 81, + 130, + 468, + 281 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "how iDisc is more sample-efficient than other transformer-based architectures [3,4,41,54,59] since we achieve better results even when employing smaller and less heavily pretrained backbone architectures. In addition, results show a significant improvement in performance with our model instantiated with a full-convolutional backbone over other full-convolutional-based models [12, 13, 22, 24, 38]. Table 2 presents zero-shot testing of NYU models on SUN-RGBD and Diode. In both cases, iDisc exhibits a compelling generalization performance, which we argue is due to implicitly learning the underlying patterns, namely, IDRs, of indoor scene structure via the ID module.", + "bbox": [ + 75, + 296, + 468, + 474 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Qualitative results in Fig. 3 emphasize how the method excels in capturing the overall scene complexity. In particular, iDisc correctly captures discontinuities without depth over-excitation due to chromatic edges, such as the sink in row 1, and captures the right perspectivity between foreground and background depth planes such as between the bed (row 2) or sofa (row 3) and the walls behind. In addition, the model presents a reduced error around edges, even when compared to higher-resolution models such as [3]. We argue that iDisc actually reasons at the pattern level, thus capturing better the structure of the scene. This is particularly appreciable in indoor scenes, since these are usually populated by a multitude of objects. This behavior is displayed in the attention maps of Fig. 4. Fig. 4 shows how IDRs at lower resolution capture specific components, such as the relative position of the background (row 1) and foreground objects (row 2), while IDRs at higher resolution behave as depth refiners, attending typically to high-frequency features, such as upper (row 3) or lower borders of objects. It is worth noting that an IDR attends to the image borders when the particular concept it looks for is not present in the image. That is, the borders are the last resort in which the IDR tries to find its corresponding pattern (e.g., row 2, col. 1).", + "bbox": [ + 75, + 478, + 470, + 824 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Outdoor Datasets. Results on KITTI in Table 3 demonstrate that iDisc sets the new SotA for this primary outdoor dataset, improving by more than $3\\%$ in RMS and by $0.9\\%$ in $\\delta_{0.5}$ over the previous SotA. However, KITTI results present saturated metrics. For instance, $\\delta_{3}$ is not reported since ev", + "bbox": [ + 75, + 825, + 470, + 901 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 3. Comparison on KITTI Eigen-split test set. Models without $\\delta_{0.5}$ have implementation (partially) unavailable. R101: ResNet-101 [19], D161: DenseNet-161 [21], EB5: EfficientNet-B5 [45], ViTB: ViT-B/16+Resnet-50 [11], MViT: EfficientNet-B5-AP [50] + MiniViT, Swin{L, B, T}: Swin-{Large, Base, Tiny} [31]. ( $\\dagger$ ): ImageNet-22k [10] pretraining, ( $\\ddagger$ ): non-standard training set, (*): in-house dataset pretraining, ( $\\S$ ): re-evaluated without GT-based rescaling.", + "bbox": [ + 496, + 88, + 893, + 199 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/a40dd424b450428ebab2f5663dd3d48490f74d1aad73d98322f53901131fac1b.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodEncoderδ0.5δ1δ2RMSRMSlogA.RelS.Rel
Higher is betterLower is better
Eigenet al. [12]--0.6920.8997.1560.2700.1901.515
DORN [13]R101-0.9320.9842.7270.1200.0720.307
BTS [24]D1610.8700.9640.9952.4590.0900.0570.199
AdaBins$ [3]MVIT0.8680.9640.9952.3600.0880.0580.199
TransDepth [54]ViTB-0.9560.9942.7550.0980.0640.252
DPT* [41]ViTB0.8650.9650.9962.3150.0880.0590.190
P3Depth$ [38]R1010.8520.9590.9942.5190.0950.0600.206
NeWCRF [59]SwinL†0.8870.9740.9972.1290.0790.0520.155
OursR1010.8600.9650.9962.3620.0900.0590.197
EB50.8520.9630.9942.5100.0940.0630.223
SwinT0.8700.9680.9962.2910.0870.0580.184
SwinB0.8850.9740.9972.1490.0810.0540.159
SwinL†0.8960.9770.9972.0670.0770.0500.145
", + "bbox": [ + 503, + 199, + 893, + 345 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/531494df91ae6dac1f9b36e16d81d4127311ea43d8a1d62f6d6b512d8fa259a3.jpg", + "table_caption": [ + "Table 4. Comparison on Argoverse and DDAD proposed splits. Comparison of performance of methods trained on either Argoverse or DDAD and tested on the same dataset." + ], + "table_footnote": [], + "table_body": "
DatasetMethodδ1Higher is betterRMSRMSlogLower is better
ArgoverseBTS [24]0.7800.9080.9548.3190.2670.1862.56
AdaBins [3]0.7500.9010.9528.6860.2780.1952.36
NeWCRF [59]0.7070.8710.9399.4370.3210.2323.23
Ours0.8210.9230.9607.5670.2430.1632.22
DDADBTS [24]0.7570.9130.96210.110.2510.1862.27
AdaBins [3]0.7480.9120.96210.240.2550.2012.30
NeWCRF [59]0.7020.8810.95110.980.2710.2192.83
Ours0.8090.9340.9718.9890.2210.1631.85
", + "bbox": [ + 503, + 401, + 893, + 508 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "ery method scores $>0.99$ , with recent ones scoring 0.999. Therefore, we propose to utilize the metric $\\delta_{0.5}$ , to better convey meaningful evaluation information. In addition, iDisc performs remarkably well on the highly competitive official KITTI benchmark, ranking $3^{\\mathrm{rd}}$ among all methods and $1^{\\mathrm{st}}$ among all published MDE methods.", + "bbox": [ + 496, + 523, + 893, + 613 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Moreover, Table 4 shows the results of methods trained and evaluated on the splits from Argoverse and DDAD proposed in this work. All methods have been trained with the same architecture and pipeline utilized for training on KITTI. We argue that the high degree of sparseness in GT of the two proposed datasets, in contrast to KITTI, deeply affects windowed methods such as [3, 59]. Qualitative results in Fig. 5 suggest that the scene level discretization leads to retaining small objects and sharp transitions between foreground objects and background: background in row 1, and boxes in row 2. These results show the better ability of iDisc to capture fine-grained depth variations on close-by and similar objects, including crowd in row 3. Zero-shot testing from KITTI to DDAD and Argoverse are presented in Supplement.", + "bbox": [ + 496, + 613, + 895, + 839 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Surface Normals Estimation. We emphasize that the proposed method has more general applications by testing iDisc on a different continuous dense prediction task such as surface normals estimation. Results in Table 5 evidence that we", + "bbox": [ + 496, + 839, + 895, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "21483", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/f82b0babd1075fcb975a78fbf0e4a4b041b327437274f1d2c11d1b5647e1e348.jpg", + "image_caption": [ + "Figure 5. Qualitative results on KITTI. Three zoomed-in crops of different test images are shown. The comparisons show the ability of iDisc to capture small details, proper background transition, and fine-grained variations in, e.g., crowded scenes. Best viewed on a screen." + ], + "image_footnote": [], + "bbox": [ + 80, + 88, + 279, + 255 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/41ded1307a8edf0d1920bc3db437e4594b87fa46abb3465d079ee4ff03c674a2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 282, + 88, + 480, + 255 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/54bdec52e72f6686cee7da9a7e096a354a8073ae592f317a8588dac5c29e1794.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 483, + 88, + 684, + 255 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/8eab2abe2f01cf49224ff05f658f93e28df1652ef233a7d1deba6c06ed77b7b6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 686, + 88, + 890, + 255 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/9c5be2e3a32de141f8fa8143e07bbe6bd8f5c79b236f7ebaa795f7c4157bbe42.jpg", + "table_caption": [ + "Table 5. Comparison of surface normals estimation methods on NYU official test set. iDisc architecture and training pipeline is the same as the one utilized for indoor depth estimation." + ], + "table_footnote": [], + "table_body": "
Method11.5°22.5°30°RMSMeanMed
Higher is betterLower is better
SURGE [49]0.4730.6890.766-20.612.2
GeoNet [39]0.4840.4840.79526.919.011.8
PAP [61]0.4880.7220.79825.518.611.7
GeoNet++ [40]0.5020.7320.80726.718.511.2
Bae et al. [1]0.6220.7930.85223.514.97.5
Ours0.6380.7980.85622.814.67.3
", + "bbox": [ + 80, + 343, + 468, + 457 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "set the new state of the art on surface normals estimation. It is worth mentioning that all other methods are specifically designed for normals estimation, while we keep the same architecture and framework from indoor depth estimation.", + "bbox": [ + 75, + 470, + 468, + 532 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.3. Ablation study", + "text_level": 1, + "bbox": [ + 76, + 542, + 227, + 559 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The importance of each component introduced in iDisc is evaluated by ablating the method in Table 6.", + "bbox": [ + 75, + 566, + 468, + 595 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Depth Discretization. Internal scene discretization provides a clear improvement over its explicit counterpart (row 3 vs. 2), which is already beneficial in terms of robustness. Adding the MSDA module on top of explicit discretization (row 5) recovers part of the performance gap between the latter and our full method (row 8). We argue that MSDA recovers a better scene scale by refining feature maps at different scales at once, which is helpful for higher-resolution feature maps.", + "bbox": [ + 75, + 598, + 468, + 718 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Component Interactions. Using either the MSDA module or the AFP module together with internal scene discretization results in similar performance (rows 4 and 6). We argue that the two modules are complementary, and they synergize when combined (row 8). The complementarity can be explained as follows: in the former scenario (row 4), MSDA preemptively refines feature maps to be partitioned by the non-adaptive clustering, that is, by the IDR priors described in Sec. 3, while on latter one (row 6), AFP allows the IDRs to adapt themselves to partition the unrefined feature space properly. Row 7 shows that the architecture closer to the one in [32], particularly random initialization, hurts perfor", + "bbox": [ + 75, + 719, + 470, + 901 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 6. Ablation of iDisc. EDD: Explicit Depth Discretization [3, 13], ISD: Internal Scene discretization, AFP: Adaptive Feature Partitioning, MSDA: MultiScale Deformable Attention. The EDD module, used in SotA methods, and our ISD module are mutually exclusive. AFP with $(\\checkmark_{\\mathbf{R}})$ refers to random initialization of IDRs and architecture similar to [32]. The last row corresponds to our complete iDisc model.", + "bbox": [ + 498, + 301, + 890, + 398 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/7bdb7e522195c96f329e79574ce4951b328c3feae85d2667f44e72cf96810a80.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
EDDISDAFPMSDAδ1↑RMS ↓A.Rel ↓
1XXXX0.8900.3700.104
2XXX0.9050.3670.102
3XXX0.9190.3400.096
4XX0.9310.3190.091
5XX0.9310.3260.091
6XX0.9340.3190.088
7X✓R0.9300.3190.089
8X0.9400.3130.086
", + "bbox": [ + 503, + 398, + 888, + 534 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "mance since the internal representations do not embody any domain-specific prior information.", + "bbox": [ + 498, + 547, + 890, + 578 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 590, + 617, + 607 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We have introduced a new module, called Internal Discretization, for MDE. The module represents the assumption that scenes can be represented as a finite set of patterns. Hence, iDisc leverages an internally discretized representation of the scene that is enforced via a continuous-discrete-continuous bottleneck, namely ID module. We have validated the proposed method, without any TTA or tricks, on the primary indoor and outdoor benchmarks for MDE, and have set the new state of the art among supervised approaches. Results showed that learning the underlying patterns, while not imposing any explicit constraints or regularization on the output, is beneficial for performance and generalization. iDisc also works out-of-the-box for normal estimation, beating all specialized SotA methods. In addition, we propose two new challenging outdoor dataset splits, aiming to benefit the community with more general and diverse benchmarks.", + "bbox": [ + 496, + 616, + 893, + 858 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgment. This work is funded by Toyota Motor Europe via the research project TRACE-Zürich.", + "bbox": [ + 498, + 869, + 890, + 901 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "21484", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Gwangbin Bae, Ignas Budvytis, and Roberto Cipolla. Estimating and exploiting the aleatoric uncertainty in surface normal estimation. Proceedings of the IEEE International Conference on Computer Vision, pages 13117-13126, 9 2021. 8", + "[2] Gwangbin Bae, Ignas Budvytis, and Roberto Cipolla. Irondepth: Iterative refinement of single-view depth using surface normal and its uncertainty. In *British Machine Vision Conference (BMVC)*, 2022. 2", + "[3] Shariq Farooq Bhat, Ibrahim Alhashim, and Peter Wonka. Adabins: Depth estimation using adaptive bins. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 4008-4017, 11 2020. 1, 2, 3, 4, 5, 6, 7, 8", + "[4] Shariq Farooq Bhat, Ibraheem Alhashim, and Peter Wonka. Localbins: Improving depth estimation by learning local distributions. In European Conference Computer Vision (ECCV), pages 480-496, 2022. 1, 2, 3, 4, 6, 7", + "[5] András Bódis-Szomóru, Hayko Riemenschneider, and Luc Van Gool. Fast, approximate piecewise-planar modeling based on sparse structure-from-motion and superpixels. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 469-476, 9 2014. 2", + "[6] Yuanzhouhan Cao, Zifeng Wu, and Chunhua Shen. Estimating depth from monocular images as classification using deep fully convolutional residual networks. IEEE Transactions on Circuits and Systems for Video Technology, 28:3174-3182, 5 2016. 2", + "[7] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), 12346 LNCS:213-229, 5 2020. 4", + "[8] Ming Fang Chang, John Lambert, Patsorn Sangkloy, Jagjeet Singh, Slawomir Bak, Andrew Hartnett, De Wang, Peter Carr, Simon Lucey, Deva Ramanan, and James Hays. Argoverse: 3d tracking and forecasting with rich maps. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2019-June:8740-8749, 11 2019. 2, 6", + "[9] Anne Laure Chauve, Patrick Labatut, and Jean Philippe Pons. Robust piecewise-planar 3d reconstruction and completion from large-scale unstructured point data. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 1261-1268, 2010. 2", + "[10] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE Conference on Computer Vision and Pattern Recognition, pages 248–255, 2009. 6, 7", + "[11] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at" + ], + "bbox": [ + 78, + 114, + 470, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "scale. In 9th International Conference on Learning Representations, ICLR 2021, Virtual Event, Austria, May 3-7, 2021. OpenReview.net, 2021. 1, 6, 7", + "[12] David Eigen, Christian Puhrsch, and Rob Fergus. Depth map prediction from a single image using a multi-scale deep network. Advances in Neural Information Processing Systems, 3:2366-2374, 6 2014. 1, 2, 5, 6, 7", + "[13] Huan Fu, Mingming Gong, Chaohui Wang, Kayhan Bat-manghelich, and Dacheng Tao. Deep ordinal regression network for monocular depth estimation. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 2002-2011, 6 2018. 1, 2, 3, 4, 6, 7, 8", + "[14] David Gallup, Jan Michael Frahm, and Marc Pollefeys. Piecewise planar and non-planar stereo for urban scene reconstruction. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 1418-1425, 2010. 2", + "[15] Peng Gao, Minghang Zheng, Xiaogang Wang, Jifeng Dai, and Hongsheng Li. Fast convergence of detr with spatially modulated co-attention. Proceedings of the IEEE International Conference on Computer Vision, pages 3601-3610, 8 2021. 4", + "[16] Ravi Garg, BG Vijay Kumar, Gustavo Carneiro, and Ian Reid. Unsupervised cnn for single view depth estimation: Geometry to the rescue. In European Conference on Computer Vision, pages 740-756. Springer, 2016. 5, 6", + "[17] Andreas Geiger, Philip Lenz, and Raquel Urtasun. Are we ready for autonomous driving? the kitti vision benchmark suite. In Conference on Computer Vision and Pattern Recognition (CVPR), 2012. 2, 5", + "[18] Vitor Guizilini, Rares Ambrus, Sudeep Pillai, Allan Raventos, and Adrien Gaidon. 3d packing for self-supervised monocular depth estimation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2, 6", + "[19] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2016-December:770-778, 12 2015. 6, 7", + "[20] Geoffrey E. Hinton, Sara Sabour, and Nicholas Frosst. Matrix capsules with EM routing. In 6th International Conference on Learning Representations, ICLR, 2018. 2", + "[21] Gao Huang, Zhuang Liu, Laurens Van Der Maaten, and Kilian Q. Weinberger. Densely connected convolutional networks. Proceedings - 30th IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2017, 2017-January:2261-2269, 8 2016. 6, 7", + "[22] Lam Huynh, Phong Nguyen-Ha, Jiri Matas, Esa Rahtu, and Janne Heikkilä. Guiding monocular depth estimation using depth-attention volume. Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), 12371 LNCS:581-597, 4 2020. 1, 6, 7", + "[23] Iro Laina, Christian Rupprecht, Vasileios Belagiannis, Federico Tombari, and Nassir Navab. Deeper depth prediction with fully convolutional residual networks. Proceedings - 2016 4th International Conference on 3D Vision, 3DV 2016, pages 239-248, 6 2016. 2" + ], + "bbox": [ + 503, + 92, + 893, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "21485", + "bbox": [ + 478, + 945, + 517, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[24] Jin Han Lee, Myung-Kyu Han, Dong Wook Ko, and Il Hong Suh. From big to small: Multi-scale local planar guidance for monocular depth estimation. arXiv e-prints, abs/1907.10326, 7 2019. 1, 2, 5, 6, 7", + "[25] Jae Han Lee, Minhyeok Heo, Kyung Rae Kim, and Chang Su Kim. Single-image depth estimation based on fourier domain analysis. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 330-339, 12 2018. 2", + "[26] Boying Li, Yuan Huang, Zeyu Liu, Danping Zou, and Wenxian Yu. Structdepth: Leveraging the structural regularities for self-supervised indoor depth estimation. Proceedings of the IEEE International Conference on Computer Vision, pages 12643-12653, 8 2021. 2", + "[27] Zhenyu Li, Zehui Chen, Xianming Liu, and Junjun Jiang. Depthformer: Exploiting long-range correlation and local information for accurate monocular depth estimation. arXiv e-prints, abs/2203.14211, 3 2022. 4", + "[28] Chen Liu, Kihwan Kim, Jinwei Gu, Yasutaka Furukawa, and Jan Kautz. Planercnn: 3d plane detection and reconstruction from a single image. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2019-June:4445-4454, 12 2018. 2", + "[29] Chen Liu, Jimei Yang, Duygu Ceylan, Ersin Yumer, and Yasutaka Furukawa. Planenet: Piece-wise planar reconstruction from a single rgb image. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 2579-2588, 4 2018. 2", + "[30] Fayao Liu, Chunhua Shen, Guosheng Lin, and Ian Reid. Learning depth from single monocular images using deep convolutional neural fields. IEEE Transactions on Pattern Analysis and Machine Intelligence, 38:2024-2039, 2 2015. 2", + "[31] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. Proceedings of the IEEE International Conference on Computer Vision, pages 9992-10002, 3 2021. 6, 7", + "[32] Francesco Locatello, Dirk Weissenborn, Thomas Unterthiner, Aravindh Mahendran, Georg Heigold, Jakob Uszkoreit, Alexey Dosovitskiy, and Thomas Kipf. Object-centric learning with slot attention. Advances in Neural Information Processing Systems, 2020-December, 6 2020. 2, 3, 4, 8", + "[33] Xiaoxiao Long, Cheng Lin, Lingjie Liu, Wei Li, Christian Theobalt, Ruigang Yang, and Wenping Wang. Adaptive surface normal constraint for depth estimation. Proceedings of the IEEE International Conference on Computer Vision, pages 12829-12838, 3 2021. 1, 2, 6", + "[34] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. 7th International Conference on Learning Representations, ICLR 2019, 11 2017. 6", + "[35] S. H. Mahdi Miangoleh, Sebastian Dille, Long Mai, Sylvain Paris, and Yagiz Aksoy. Boosting monocular depth estimation models to high-resolution via content-adaptive multi-resolution merging. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 9680-9689, 5 2021. 2" + ], + "bbox": [ + 78, + 90, + 470, + 898 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[36] Pushmeet Kohli Nathan Silberman, Derek Hoiem and Rob Fergus. Indoor segmentation and support inference from rgbd images. In ECCV, 2012. 5", + "[37] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Kopf, Edward Yang, Zachary DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. Pytorch: An imperative style, high-performance deep learning library. In Advances in Neural Information Processing Systems 32, pages 8024-8035. Curran Associates, Inc., 2019. 6", + "[38] Vaishakh Patil, Christos Sakaridis, Alexander Liniger, and Luc Van Gool. P3Depth: Monocular depth estimation with a piecewise planarity prior. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR, pages 1600-1611. IEEE, 2022. 1, 2, 6, 7", + "[39] Xiaojuan Qi, Renjie Liao, Zhengzhe Liu, Raquel Urtasun, and Jiaya Jia. Geonet: Geometric neural network for joint depth and surface normal estimation. In 2018 IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2018, Salt Lake City, UT, USA, June 18-22, 2018, pages 283-291. Computer Vision Foundation / IEEE Computer Society, 2018. 2, 8", + "[40] Xiaojuan Qi, Zhengzhe Liu, Renjie Liao, Philip H. S. Torr, Raquel Urtasun, and Jiaya Jia. Geonet++: Iterative geometric neural network with edge-aware refinement for joint depth and surface normal estimation. IEEE Trans. Pattern Anal. Mach. Intell., 44(2):969-984, 2022. 8", + "[41] René Ranftl, Alexey Bochkovskiy, and Vladlen Koltun. Vision transformers for dense prediction. Proceedings of the IEEE International Conference on Computer Vision, pages 12159-12168, 3 2021. 1, 6, 7", + "[42] Sara Sabour, Nicholas Frosst, and Geoffrey E. Hinton. Dynamic routing between capsules. In Isabelle Guyon, Ulrike von Luxburg, Samy Bengio, Hanna M. Wallach, Rob Fergus, S. V. N. Vishwanathan, and Roman Garnett, editors, Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA, pages 3856-3866, 2017. 2", + "[43] Shuran Song, Samuel P. Lichtenberg, and Jianxiong Xiao. Sun rgb-d: A rgb-d scene understanding benchmark suite. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 07-12-June-2015:567-576, 10 2015. 5", + "[44] Zhiqing Sun, Shengcao Cao, Yiming Yang, and Kris Kitani. Rethinking transformer-based set prediction for object detection. Proceedings of the IEEE International Conference on Computer Vision, pages 3591-3600, 11 2020. 4", + "[45] Mingxing Tan and Quoc V. Le. Efficientnet: Rethinking model scaling for convolutional neural networks. 36th International Conference on Machine Learning, ICML 2019, 2019-June:10691-10700, 5 2019. 6, 7", + "[46] Yao-Hung Hubert Tsai, Nitish Srivastava, Hanlin Goh, and Ruslan Salakhutdinov. Capsules with inverted dot-product attention routing. arXiv e-prints, abs/2002.04764, 2020. 2" + ], + "bbox": [ + 501, + 92, + 893, + 900 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "21486", + "bbox": [ + 478, + 945, + 519, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[47] Igor Vasiljevic, Nicholas I. Kolkin, Shanyi Zhang, Ruotian Luo, Haochen Wang, Falcon Z. Dai, Andrea F. Daniele, Mohammadreza Mostajabi, Steven Basart, Matthew R. Walter, and Gregory Shakhnarovich. DIODE: A dense indoor and outdoor depth dataset. arXiv e-prints, abs/1908.00463, 2019. 5", + "[48] Jingdong Wang, Ke Sun, Tianheng Cheng, Borui Jiang, Chaorui Deng, Yang Zhao, Dong Liu, Yadong Mu, Mingkui Tan, Xinggang Wang, Wenyu Liu, and Bin Xiao. Deep high-resolution representation learning for visual recognition. IEEE Transactions on Pattern Analysis and Machine Intelligence, 43:3349-3364, 8 2019. 6", + "[49] Peng Wang, Xiaohui Shen, Bryan C. Russell, Scott Cohen, Brian L. Price, and Alan L. Yuille. SURGE: surface regularized geometry estimation from a single image. In Daniel D. Lee, Masashi Sugiyama, Ulrike von Luxburg, Isabelle Guyon, and Roman Garnett, editors, Advances in Neural Information Processing Systems, pages 172-180, 2016. 8", + "[50] Cihang Xie, Mingxing Tan, Boqing Gong, Jiang Wang, Alan L. Yuille, and Quoc V. Le. Adversarial examples improve image recognition. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 816-825, 11 2019. 6, 7", + "[51] Dan Xu, Wanli Ouyang, Xiaogang Wang, and Nicu Sebe. Pad-net: Multi-tasks guided prediction-and-distillation network for simultaneous depth estimation and scene parsing. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 675-684, 5 2018. 2", + "[52] Dan Xu, Wei Wang, Hao Tang, Hong Liu, Nicu Sebe, and Elisa Ricci. Structured attention guided convolutional neural fields for monocular depth estimation. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 3917-3925, 3 2018. 2", + "[53] Fengting Yang and Zihan Zhou. Recovering 3d planes from a single image via convolutional neural networks. Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), 11214 LNCS:87-103, 2018. 2", + "[54] Guanglei Yang, Hao Tang, Mingli Ding, Nicu Sebe, and Elisa Ricci. Transformer-based attention networks for continuous pixel-wise prediction. Proceedings of the IEEE International Conference on Computer Vision, pages 16249-16259, 3 2021. 1, 2, 6, 7", + "[55] Wei Yin, Yifan Liu, Chunhua Shen, and Youliang Yan. Enforcing geometric constraints of virtual normal for depth prediction. Proceedings of the IEEE International Conference on Computer Vision, pages 5683-5692, 7 2019. 1, 2, 5, 6", + "[56] Fisher Yu, Vladlen Koltun, and Thomas Funkhouser. Dilated residual networks. Proceedings - 30th IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2017, 2017-January:636-644, 5 2017. 6", + "[57] Zehao Yu, Lei Jin, and Shenghua Gao. $\\mathbf{P}^2$ net: Patch-match and plane-regularization for unsupervised indoor depth estimation. In European Conference on Computer Vision, pages 206–222, 7 2020. 2", + "[58] Zehao Yu, Jia Zheng, Dongze Lian, Zihan Zhou, and Shenghua Gao. Single-image piece-wise planar 3d recon" + ], + "bbox": [ + 78, + 90, + 470, + 902 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "struction via associative embedding. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2019-June:1029-1037, 2 2019. 2", + "[59] Weihao Yuan, Xiaodong Gu, Zuozhuo Dai, Siyu Zhu, and Ping Tan. Neural window fully-connected crfs for monocular depth estimation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR, pages 3906-3915. IEEE, 2022. 1, 2, 5, 6, 7, 8", + "[60] Weidong Zhang, Wei Zhang, and Yinda Zhang. Geolayout: Geometry driven room layout estimation based on depth maps of planes. In European Conference on Computer Vision, pages 632-648. Springer Science and Business Media Deutschland GmbH, 8 2020. 2", + "[61] Zhenyu Zhang, Zhen Cui, Chunyan Xu, Yan Yan, Nicu Sebe, and Jian Yang. Pattern-affinitive propagation across depth, surface normal and semantic segmentation. In IEEE Computer Society Conference on Computer Vision and Pattern Recognition CVPR, pages 4101-4110, 6 2019. 8", + "[62] Brady Zhou, Philipp Krahenbuhl, and Vladlen Koltun. Does computer vision matter for action? Science Robotics, 4, 5 2019. 1", + "[63] Xizhou Zhu, Weijie Su, Lewei Lu, Bin Li, Xiaogang Wang, and Jifeng Dai. Deformable DETR: deformable transformers for end-to-end object detection. In 9th International Conference on Learning Representations ICLR, 2021. 5" + ], + "bbox": [ + 503, + 92, + 893, + 444 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "21487", + "bbox": [ + 478, + 945, + 517, + 955 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/2023/iDisc_ Internal Discretization for Monocular Depth Estimation/f111aba4-6ed5-4778-a051-8ab6247508d3_model.json b/2023/iDisc_ Internal Discretization for Monocular Depth Estimation/f111aba4-6ed5-4778-a051-8ab6247508d3_model.json new file mode 100644 index 0000000000000000000000000000000000000000..4ac21652918dc4eab8019895ddeb4a3b8eb3e0be --- /dev/null +++ b/2023/iDisc_ Internal Discretization for Monocular Depth Estimation/f111aba4-6ed5-4778-a051-8ab6247508d3_model.json @@ -0,0 +1,2246 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.808, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.17, + 0.131, + 0.802, + 0.152 + ], + "angle": 0, + "content": "iDisc: Internal Discretization for Monocular Depth Estimation" + }, + { + "type": "text", + "bbox": [ + 0.273, + 0.181, + 0.697, + 0.199 + ], + "angle": 0, + "content": "Luigi Piccinelli Christos Sakaridis Fisher Yu" + }, + { + "type": "text", + "bbox": [ + 0.342, + 0.206, + 0.623, + 0.224 + ], + "angle": 0, + "content": "Computer Vision Lab, ETH Zürich" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.259, + 0.314, + 0.274 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.293, + 0.474, + 0.671 + ], + "angle": 0, + "content": "Monocular depth estimation is fundamental for 3D scene understanding and downstream applications. However, even under the supervised setup, it is still challenging and ill-posed due to the lack of full geometric constraints. Although a scene can consist of millions of pixels, there are fewer high-level patterns. We propose iDisc to learn those patterns with internal discretized representations. The method implicitly partitions the scene into a set of high-level patterns. In particular, our new module, Internal Discretization (ID), implements a continuous-discrete-continuous bottleneck to learn those concepts without supervision. In contrast to state-of-the-art methods, the proposed model does not enforce any explicit constraints or priors on the depth output. The whole network with the ID module can be trained end-to-end, thanks to the bottleneck module based on attention. Our method sets the new state of the art with significant improvements on NYU-Depth v2 and KITTI, outperforming all published methods on the official KITTI benchmark. iDisc can also achieve state-of-the-art results on surface normal estimation. Further, we explore the model generalization capability via zero-shot testing. We observe the compelling need to promote diversification in the outdoor scenario. Hence, we introduce splits of two autonomous driving datasets, DDAD and Argoverse. Code is available at http://vis.xyz/pub/idisc." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.693, + 0.21, + 0.708 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.72, + 0.472, + 0.901 + ], + "angle": 0, + "content": "Depth estimation is essential in computer vision, especially for understanding geometric relations in a scene. This task consists in predicting the distance between the projection center and the 3D point corresponding to each pixel. Depth estimation finds direct significance in downstream applications such as 3D modeling, robotics, and autonomous cars. Some research [62] shows that depth estimation is a crucial prompt to be leveraged for action reasoning and execution. In particular, we tackle the task of monocular depth estimation (MDE). MDE is an ill-posed problem due to its inherent scale ambiguity: the same 2D input image can correspond to an infinite number of 3D scenes." + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.258, + 0.697, + 0.366 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.558, + 0.368, + 0.641, + 0.38 + ], + "angle": 0, + "content": "(a) Input image" + }, + { + "type": "image", + "bbox": [ + 0.698, + 0.258, + 0.892, + 0.366 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.749, + 0.368, + 0.839, + 0.38 + ], + "angle": 0, + "content": "(b) Output depth" + }, + { + "type": "image", + "bbox": [ + 0.502, + 0.384, + 0.697, + 0.492 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.515, + 0.494, + 0.684, + 0.506 + ], + "angle": 0, + "content": "(c) Intermediate representations" + }, + { + "type": "image", + "bbox": [ + 0.698, + 0.384, + 0.892, + 0.492 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.726, + 0.494, + 0.862, + 0.506 + ], + "angle": 0, + "content": "(d) Internal discretization" + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.512, + 0.895, + 0.636 + ], + "angle": 0, + "content": "Figure 1. We propose iDisc which implicitly enforces an internal discretization of the scene via a continuous-discrete-continuous bottleneck. Supervision is applied to the output depth only, i.e., the fused intermediate representations in (c), while the internal discrete representations are implicitly learned by the model. (d) displays some actual internal discretization patterns captured from the input, e.g., foreground, object relationships, and 3D planes. Our iDisc model is able to predict high-quality depth maps by capturing scene interactions and structure." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.655, + 0.895, + 0.804 + ], + "angle": 0, + "content": "State-of-the-art (SotA) methods typically involve convolutional networks [12, 13, 24] or, since the advent of vision Transformer [11], transformer architectures [3, 41, 54, 59]. Most methods either impose geometric constraints on the image [22, 33, 38, 55], namely, planarity priors or explicitly discretize the continuous depth range [3, 4, 13]. The latter can be viewed as learning frontoparallel planes. These imposed priors inherently limit the expressiveness of the respective models, as they cannot model arbitrary depth patterns, ubiquitous in real-world scenes." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.811, + 0.897, + 0.903 + ], + "angle": 0, + "content": "We instead propose a more general depth estimation model, called iDisc, which does not explicitly impose any constraint on the final prediction. We design an Internal Discretization (ID) of the scene which is in principle depth-agnostic. Our assumption behind this ID is that each scene can be implicitly described by a set of concepts or patterns," + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.957 + ], + "angle": 0, + "content": "21477" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.471, + 0.137 + ], + "angle": 0, + "content": "such as objects, planes, edges, and perspectivity relationships. The specific training signal determines which patterns to learn (see Fig. 1)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.137, + 0.472, + 0.439 + ], + "angle": 0, + "content": "We design a continuous-to-discrete bottleneck through which the information is passed in order to obtain such internal scene discretization, namely the underlying patterns. In the bottleneck, the scene feature space is partitioned via learnable and input-dependent quantizers, which in turn transfer the information onto the continuous output space. The ID bottleneck introduced in this work is a general concept and can be implemented in several ways. Our particular ID implementation employs attention-based operators, leading to an end-to-end trainable architecture and input-dependent framework. More specifically, we implement the continuous-to-discrete operation via \"transposed\" cross-attention, where transposed refers to applying softmax on the output dimension. This softmax formulation enforces the input features to be routed to the internal discrete representations (IDRs) in an exclusive fashion, thus defining an input-dependent soft clustering of the feature space. The discrete-to-continuous transformation is implemented via cross-attention. Supervision is only applied to the final output, without any assumptions or regularization on the IDRs." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.44, + 0.472, + 0.62 + ], + "angle": 0, + "content": "We test iDisc on multiple indoor and outdoor datasets and probe its robustness via zero-shot testing. As of today, there is too little variety in MDE benchmarks for the outdoor scenario, since the only established benchmark is KITTI [17]. Moreover, we observe that all methods fail on outdoor zero-shot testing, suggesting that the KITTI dataset is not diverse enough and leads to overfitting, thus implying that it is not indicative of generalized performance. Hence, we find it compelling to establish a new benchmark setup for the MDE community by proposing two new train-test splits of more diverse and challenging high-quality outdoor datasets: Argoverse1.1 [8] and DDAD [18]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.62, + 0.472, + 0.802 + ], + "angle": 0, + "content": "Our main contributions are as follows: (i) we introduce the Internal Discretization module, a novel architectural component that adeptly represents a scene by combining underlying patterns; (ii) we show that it is a generalization of SotA methods involving depth ordinal regression [3, 13]; (iii) we propose splits of two raw outdoor datasets [8, 18] with high-quality LiDAR measurements. We extensively test iDisc on six diverse datasets and, owing to the ID design, our model consistently outperforms SotA methods and presents better transferability. Moreover, we apply iDisc to surface normal estimation showing that the proposed module is general enough to tackle generic real-valued dense prediction tasks." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.815, + 0.22, + 0.831 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.84, + 0.472, + 0.901 + ], + "angle": 0, + "content": "The supervised setting of MDE assumes that pixel-wise depth annotations are available at training time and depth inference is performed on single images. The coarse-to-fine network introduced in Eigen et al. [12] is the cor" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.895, + 0.243 + ], + "angle": 0, + "content": "nerstone in MDE with end-to-end neural networks. The work established the optimization process via the Scale-Invariant log loss \\((\\mathrm{SI}_{\\log})\\). Since then, the three main directions evolve: new architectures, such as residual networks [23], neural fields [30, 52], multi-scale fusion [25, 35], transformers [3, 54, 59]; improved optimization schemes, such as reverse-Huber loss [23], classification [6], or ordinal regression [3, 13]; multi-task learning to leverage ancillary information from the related task, such as surface normals estimation or semantic segmentation [12, 39, 51]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.245, + 0.895, + 0.472 + ], + "angle": 0, + "content": "Geometric priors have been widely utilized in the literature, particularly the piecewise planarity prior [5, 9, 14], serving as a proper real-world approximation. The geometric priors are usually incorporated by explicitly treating the image as a set of planes [26, 28, 29, 58], using a plane-inducing loss [57], forcing pixels to attend to the planar representation of other pixels [24, 38], or imposing consistency with other tasks' output [2, 33, 55], like surface normals. Priors can focus on a more holistic scene representation by dividing the whole scene into 3D planes without dependence on intrinsic camera parameters [53, 60], aiming at partitioning the scene into dominant depth planes. In contrast to geometric prior-based works, our method lifts any explicit geometric constraints on the scene. Instead, iDisc implicitly enforces the representation of scenes as a set of high-level patterns." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.474, + 0.895, + 0.655 + ], + "angle": 0, + "content": "Ordinal regression methods [3,4, 13] have proven to be a promising alternative to other geometry-driven approaches. The difference with classification models is that class \"values\" are learnable and are real numbers, thus the problem falls into the regression category. The typical SotA rationale is to explicitly discretize the continuous output depth range, rendering the approach similar to mask-based segmentation. Each of the scalar depth values is associated with a confidence mask which describes the probability of each pixel presenting such a depth value. Hence, SotA methods inherently assume that depth can be represented as a set of frontoparallel planes, that is, depth \"masks\"." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.657, + 0.895, + 0.853 + ], + "angle": 0, + "content": "The main paradigm of ordinal regression methods is to first obtain hidden representations and scalar values of discrete depth values. The dot-product similarity between the feature maps and the depth representations is treated as logits and softmax is applied to extract confidence masks (in Fu et al. [13] this degenerates to argmax). Finally, the final prediction is defined as the per-pixel weighted average of the discrete depth values, with the confidence values serving as the weights. iDisc draws connections with the idea of depth discretization. However, our ID module is designed to be depth-agnostic. The discretization occurs at the abstract level of internal features from the ID bottleneck instead of the output depth level, unlike other methods." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.856, + 0.895, + 0.901 + ], + "angle": 0, + "content": "Iterative routing is related to our \"transposed\" crossattention. The first approach of this kind was Capsule Networks and their variants [20, 42]. Some formulations [32, 46]" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.957 + ], + "angle": 0, + "content": "21478" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.078, + 0.087, + 0.895, + 0.316 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.075, + 0.321, + 0.897, + 0.435 + ], + "angle": 0, + "content": "Figure 2. Model Architecture. The Internal Discretization Module imposes an information bottleneck via two consecutive stages: continuous-to-discrete (C2D) and discrete-to-continuous (D2C). The module processes multiple resolutions, i.e., \\( l \\in \\{1, 2, 3\\} \\), independently in parallel. The bottleneck embodies our assumption that a scene can be represented as a set of patterns. The C2D stage aggregates information, given a learnable prior (\\( \\mathbf{H}_{\\text{prior}}^l \\)), from the \\( l \\)-th resolution feature maps (\\( \\mathbf{F}^l \\)) to a finite set of IDRs (\\( \\mathbf{H}^l \\)). In particular, it learns how to define a partition function that is dependent on the input \\( \\mathbf{F}^l \\) via transposed cross-attention, as in (1). The second stage (D2C) transfers the IDRs on the original continuous space using layers of cross-attention as in (2), for sake of simplicity, we depict only a generic \\( i \\)-th layer. Cross-attention is guided by the similarity between decoded pixel embeddings (\\( \\mathbf{P}^l \\)) and \\( \\mathbf{H}^l \\). The final prediction (\\( \\hat{\\mathbf{D}} \\)) is the fusion, i.e., mean, of the intermediate representations \\( \\{\\hat{\\mathbf{D}}^l\\}_{l=1}^3 \\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.442, + 0.475, + 0.564 + ], + "angle": 0, + "content": "employ different kinds of attention mechanisms. Our attention mechanism draws connections with [32]. However, we do not allow permutation invariance, since our assumption is that each discrete representation internally describes a particular kind of pattern. In addition, we do not introduce any other architectural components such as gated recurrent units (GRU). In contrast to other methods, our attention is employed at a higher abstraction level, namely in the decoder." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.58, + 0.17, + 0.595 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.606, + 0.473, + 0.803 + ], + "angle": 0, + "content": "We propose an Internal Discretization (ID) module, to discretize the internal feature representation of encoder-decoder network architectures. We hypothesize that the module can break down the scenes into coherent concepts without semantic supervision. This section will first describe the module design and then discuss the network architecture. Sec. 3.1.1 defines the formulation of \"transposed\" cross-attention outlined in Sec. 1 and describes the main difference with previous formulations from Sec. 2. Moreover, we derive in Sec. 3.1.2 how the iDisc formulation can be interpreted as a generalization of SotA ordinal regression methods by reframing their original formulation. Eventually, Sec. 3.2 presents the optimization problem and the overall architecture." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.816, + 0.353, + 0.831 + ], + "angle": 0, + "content": "3.1. Internal Discretization Module" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.84, + 0.471, + 0.903 + ], + "angle": 0, + "content": "The ID module involves a continuous-discrete-continuous bottleneck composed of two main consecutive stages. The overall module is based on our hypothesis that scenes can be represented as a finite set of patterns. The first stage" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.442, + 0.896, + 0.669 + ], + "angle": 0, + "content": "consists in a continuous-to-discrete component, namely soft-exclusive discretization of the feature space. More specifically, it enforces an input-dependent soft clustering on the feature maps in an image-to-set fashion. The second stage completes the internal scene discretization by mapping the learned IDRs onto the continuous output space. IDRs are not bounded to focus exclusively on depth planes but are allowed to represent any high-level pattern or concept, such as objects, relative locations, and planes in the 3D space. In contrast with SotA ordinal regression methods [3,4,13], the IDRs are neither explicitly tied to depth values nor directly tied to the output. Moreover, our module operates at multiple intermediate resolutions and merges them only in the last layer. The overall architecture of iDisc, particularly our ID module, is shown in Fig. 2." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.694, + 0.766, + 0.71 + ], + "angle": 0, + "content": "3.1.1 Adaptive Feature Partitioning" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.72, + 0.895, + 0.901 + ], + "angle": 0, + "content": "The first stage of our ID module, Adaptive Feature Partitioning (AFP), generates proper discrete representations \\((\\mathcal{H} \\coloneqq \\{\\mathbf{H}^l\\}_{l=1}^3)\\) that quantize the feature space \\((\\mathcal{F} \\coloneqq \\{\\mathbf{F}^l\\}_{l=1}^3)\\) at each resolution \\(l\\). We drop the resolution superscript \\(l\\) since resolutions are independently processed and only one generic resolution is treated here. iDisc does not simply learn fixed centroids, as in standard clustering, but rather learns how to define a partition function in an input-dependent fashion. More specifically, an iterative transposed cross-attention module is utilized. Given the specific input feature maps \\((\\mathbf{F})\\), the iteration process refines (learnable) IDR priors \\((\\mathbf{H}_{\\mathrm{prior}})\\) over \\(R\\) iterations." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "21479" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.214 + ], + "angle": 0, + "content": "More specifically, the term \"transposed\" refers to the different axis along which the softmax operation is applied, namely \\(\\left[\\mathrm{softmax}(\\mathbf{KQ}^T)\\right]^T\\mathbf{V}\\) instead of the canonical dot-product attention softmax(QK)V, with Q,K,V as query, key and value tensors, respectively. In particular, the tensors are obtained as projections of feature maps and IDR priors, \\(f_{\\mathbf{Q}}(\\mathbf{H}_{\\mathrm{prior}}),f_{\\mathbf{K}}(\\mathbf{F}),f_{\\mathbf{V}}(\\mathbf{F})\\) . The \\(t\\) -th iteration out of \\(R\\) can be formulated as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.117, + 0.219, + 0.471, + 0.26 + ], + "angle": 0, + "content": "\\[\nW _ {i j} ^ {t} = \\frac {\\exp \\left(\\mathbf {k} _ {i} ^ {T} \\mathbf {q} _ {j} ^ {t}\\right)}{\\sum_ {k = 1} ^ {N} \\exp \\left(\\mathbf {k} _ {i} ^ {T} \\mathbf {q} _ {k} ^ {t}\\right)}, \\mathbf {q} _ {j} ^ {t + 1} = \\sum_ {i = 1} ^ {M} W _ {i j} ^ {t} \\mathbf {v} _ {i}, \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.266, + 0.471, + 0.342 + ], + "angle": 0, + "content": "where \\(\\mathbf{q}_j, \\mathbf{k}_i, \\mathbf{v}_i \\in \\mathbb{R}^C\\) are query, key and value respectively, \\(N\\) is the number of IDRs, nameley, clusters, and \\(M\\) is the number of pixels. The weights \\(W_{ij}\\) may be normalized to 1 along the \\(i\\) dimension to avoid vanishing or exploding quantities due to the summation of un-normalized distribution." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.342, + 0.473, + 0.599 + ], + "angle": 0, + "content": "The quantization stems from the inherent behavior of softmax. In particular, softmax forces competition among outputs: one output can be large only to the detriment of others. Therefore, when fixing \\( i \\), namely, given a feature, only a few attention weights \\( (W_{ij}) \\) may be significantly greater than zero. Hence, the content \\( \\mathbf{v}_i \\) is routed only to a few IDRs at the successive iteration. Feature maps are fixed during the process and weights are shared by design, thus \\( \\{\\mathbf{k}_i, \\mathbf{v}_i\\}_{i=1}^M \\) are the same across iterations. The induced competition enforces a soft clustering of the input feature space, where the last-iteration IDR represents the actual partition function \\( (\\mathbf{H} := \\mathbf{Q}^R) \\). The probabilities of belonging to one partition are the attention weights, namely \\( W_{ij}^R \\) with \\( j \\)-th query fixed. Since attention weights are inherently dependent on the input, the specific partitioning also depends on the input and takes place at inference time. The entire process of AFP leads to (soft) mutually exclusive IDRs." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.599, + 0.471, + 0.811 + ], + "angle": 0, + "content": "As far as the partitioning rationale is concerned, the proposed AFP draws connections with iterative routing methods described in Sec. 2. However, important distinctions apply. First, IDRs are not randomly initialized as the \"slots\" in Locatello et al. [32] but present a learnable prior. Priors can be seen as learnable positional embeddings in the attention context, thus we do not allow a permutation-invariant set of representations. Moreover, non-adaptive partitioning can still take place via the learnable priors if the iterations are zero. Second, the overall architecture differs noticeably as described in Sec. 2, and in addition, iDisc partitions feature space at the decoder level, corresponding to more abstract, high-level concepts, while the SotA formulations focus on clustering at an abstraction level close to the input image." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.811, + 0.472, + 0.902 + ], + "angle": 0, + "content": "One possible alternative approach to obtaining the aforementioned IDRs is the well-known image-to-set proposed in DETR [7], namely via classic cross-attention between representations and image feature maps. However, the corresponding representations might redundantly aggregate features, where the extreme corresponds to each output being" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.895, + 0.228 + ], + "angle": 0, + "content": "the mean of the input. Studies [15, 44] have shown that slow convergence in transformer-based architectures may be due to the non-localized context in cross-attention. The exclusiveness of the IDRs discourages the redundancy of information in different IDRs. We argue that exclusiveness allows the utilization of fewer representations (32 against the 256 utilized in [3] and [13]), and can improve both the interpretability of what IDRs are responsible for and training convergence." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.248, + 0.757, + 0.262 + ], + "angle": 0, + "content": "3.1.2 Internal Scene Discretization" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.272, + 0.895, + 0.422 + ], + "angle": 0, + "content": "In the second stage of the ID module, Internal Scene Discretization (ISD), the module ingests pixel embeddings \\((\\mathcal{P} := \\{\\mathbf{P}^l\\}_{l=1}^3)\\) from the decoder and IDRs \\(\\mathcal{H}\\) from the first stage, both at different resolutions \\(l\\), as shown in Fig. 2. Each discrete representation carries both the signature, as the key, and the output-related content, as the value, of the pattern it represents. The similarity between IDRs and pixel embeddings is computed in order to spatially localize in the continuous output space where to transfer the information of each IDR. We utilize the dot-product similarity function." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.424, + 0.895, + 0.71 + ], + "angle": 0, + "content": "Furthermore, the kind of information to transfer onto the final prediction is not constrained, as we never explicitly handle depth values, usually called bins, until the final output. Thus, the IDRs are completely free to carry generic high-level concepts (such as object-ness, relative positioning, and geometric structures). This approach is in stark contrast with SotA methods [3,4, 13, 27], which explicitly constrain what the representations are about: scalar depth values. Instead, iDisc learns to generate unconstrained representations in an input-dependent fashion. The effective discretization of the scene occurs in the second stage thanks to the information transfer from the set of exclusive concepts \\((\\mathcal{H})\\) from AFP to the continuous space defined by \\(\\mathcal{P}\\). We show that our method is not bounded to depth estimation, but can be applied to generic continuous dense tasks, for instance, surface normal estimation. Consequently, we argue that the training signal of the task at hand determines how to internally discretize the scene, rendering our ID module general and usable in settings other than depth estimation." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.711, + 0.894, + 0.802 + ], + "angle": 0, + "content": "From a practical point of view, the whole second stage consists in cross-attention layers applied to IDRs and pixel embeddings. As described in Sec. 3.1.1, we drop the resolution superscript \\( l \\). After that, the final depth maps are projected onto the output space and the multi-resolution depth predictions are combined. The \\( i \\)-th layer is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.575, + 0.815, + 0.894, + 0.833 + ], + "angle": 0, + "content": "\\[\n\\mathbf {D} _ {i + 1} = \\operatorname {s o f t m a x} \\left(\\mathbf {Q} _ {i} \\mathbf {K} _ {i} ^ {T}\\right) \\mathbf {V} _ {i} + \\mathbf {D} _ {i}, \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.839, + 0.895, + 0.902 + ], + "angle": 0, + "content": "where \\(\\mathbf{Q}_i = f_{Q_i}(\\mathbf{P})\\in \\mathbb{R}^{H\\times W\\times C}\\), \\(\\mathbf{P}\\) are pixel embeddings with shape \\((H,W)\\), and \\(\\mathbf{K}_i\\), \\(\\mathbf{V}_i\\in \\mathbb{R}^{N\\times C}\\) are the \\(N\\) IDRs under linear transformations \\(f_{K_i}(\\mathbf{H})\\), \\(f_{V_i}(\\mathbf{H})\\). The term \\(\\mathbf{Q}_i\\mathbf{K}_i^T\\) determines the spatial location for which each" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.957 + ], + "angle": 0, + "content": "21480" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.121 + ], + "angle": 0, + "content": "specific IDR is responsible, while \\(\\mathbf{V}_i\\) carries the semantic content to be transferred in the proper spatial locations." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.122, + 0.473, + 0.258 + ], + "angle": 0, + "content": "Our approach constitutes a generalization of depth estimation methods that involve (hybrid) ordinal regression. As described in Sec. 2, the common paradigm in ordinal regression methods is to explicitly discretize depth in a set of masks with a scalar depth value associated with it. Then, they predict the likelihood that each pixel belongs to such masks. Our change of paradigm stems from the reinterpretation of the mentioned ordinal regression pipeline which we translate into the following mathematical expression:" + }, + { + "type": "equation", + "bbox": [ + 0.179, + 0.265, + 0.47, + 0.282 + ], + "angle": 0, + "content": "\\[\n\\mathbf {D} = \\operatorname {s o f t m a x} \\left(\\mathbf {P R} ^ {T} / T\\right) \\mathbf {v}, \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.29, + 0.473, + 0.516 + ], + "angle": 0, + "content": "where \\(\\mathbf{P}\\) are the pixel embeddings at maximum resolution and \\(T\\) is the softmax temperature. \\(\\mathbf{v} \\in \\mathbb{R}^{N \\times 1}\\) are \\(N\\) depth scalar values and \\(\\mathbf{R} \\in \\mathbb{R}^{N \\times (C - 1)}\\) are their hidden representations, both processed as a unique stacked tensor \\((\\mathbf{R}||\\mathbf{v} \\in \\mathbb{R}^{N \\times C})\\). From the reformulation in (3), one can observe that (3) is a degenerate case of (2). In particular, \\(f_{Q}\\) degenerates to the identity function. \\(f_{K}\\) and \\(f_{V}\\) degenerate to selector functions: the former function selects up to the \\(C - 1\\) dimensions and the latter selects the last dimension only. Moreover, the hidden representations are refined pixel embeddings \\((f(\\mathbf{P}_i) = \\mathbf{H}_i = \\mathbf{R}||\\mathbf{v})\\), and \\(\\mathbf{D}\\) in (3) is the final output, namely no multiple iterations are performed as in (2). The explicit entanglement between the semantic content of the hidden representations and the final output is due to hard-coding \\(\\mathbf{v}\\) as depth scalar values." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.524, + 0.283, + 0.538 + ], + "angle": 0, + "content": "3.2. Network Architecture" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.547, + 0.473, + 0.775 + ], + "angle": 0, + "content": "Our network described in Fig. 2 comprises first an encoder backbone, interchangeably convolutional or attention-based, producing features at different scales. The encoded features at different resolutions are refined, and information between resolutions is shared, both via four multi-scale deformable attention (MSDA) blocks [63]. The feature maps from MSDA at different scales are fed into the AFP module to extract IDRs \\((\\mathcal{H})\\), and into the decoder to extract pixel embeddings in the continuous space \\((\\mathcal{P})\\). Pixel embeddings at different resolutions are combined with the respective IDRs in the ISD stage of the ID module to extract the depth maps. The final depth prediction corresponds to the mean of the interpolated intermediate representations. The optimization process is guided only by the established \\(\\mathrm{SI}_{\\log}\\) loss defined in [12], and no other regularization is exploited. \\(\\mathrm{SI}_{\\log}\\) is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.17, + 0.782, + 0.47, + 0.806 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {S I} _ {\\log}} (\\epsilon) = \\alpha \\sqrt {\\mathbb {V} [ \\epsilon ] + \\lambda \\mathbb {E} ^ {2} [ \\epsilon ]} \\tag {4}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.173, + 0.803, + 0.354, + 0.819 + ], + "angle": 0, + "content": "\\[\n\\text {w i t h} \\epsilon = \\log (\\hat {y}) - \\log (y ^ {*}),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.825, + 0.473, + 0.902 + ], + "angle": 0, + "content": "where \\(\\hat{y}\\) is the predicted depth and \\(y^{*}\\) is the ground-truth (GT) value. \\(\\mathbb{V}[\\epsilon ]\\) and \\(\\mathbb{E}[\\epsilon ]\\) are computed as the empirical variance and expected value over all pixels, namely, \\(\\{\\epsilon_i\\}_{i = 1}^N\\) \\(\\mathbb{V}[\\epsilon ]\\) is the purely scale-invariant loss, while \\(\\mathbb{E}^2 [\\epsilon ]\\) fosters a proper scale. \\(\\alpha\\) and \\(\\lambda\\) are set to 10 and 0.15, as customary." + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.09, + 0.892, + 0.42 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.424, + 0.895, + 0.508 + ], + "angle": 0, + "content": "Figure 3. Qualitative results on NYU. Each pair of consecutive rows corresponds to one test sample. Each odd row shows the input RGB image and depth predictions for the selected methods. Each even row shows GT depth and the prediction errors of the selected methods clipped at 0.5 meters. The error color map is coolwarm: blue corresponds to lower error values and red to higher values." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.523, + 0.634, + 0.54 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.548, + 0.691, + 0.565 + ], + "angle": 0, + "content": "4.1. Experimental Setup" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.572, + 0.614, + 0.586 + ], + "angle": 0, + "content": "4.1.1 Datasets" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.597, + 0.895, + 0.794 + ], + "angle": 0, + "content": "NYU-Depth V2. NYU-Depth V2 (NYU) [36] is a dataset consisting of 464 indoor scenes with RGB images and quasi-dense depth images with \\(640 \\times 480\\) resolution. Our models are trained on the train-test split proposed by previous methods [24], corresponding to 24,231 samples for training and 654 for testing. In addition to depth, the dataset provides surface normal data utilized for normal estimation. The train split used for normal estimation is the one proposed in [55]. Zero-shot testing datasets. We evaluate the generalizability of indoor models on two indoor datasets which are not seen during training. The selected datasets are SUN-RGBD [43] and DIODE-Indoor [47]. For both datasets, the resolution is reduced to match that of NYU, which is \\(640 \\times 480\\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.796, + 0.895, + 0.901 + ], + "angle": 0, + "content": "KITTI. The KITTI dataset provides stereo images and corresponding Velodyne LiDAR scans of outdoor scenes captured from a moving vehicle [17]. RGB and depth images have (mean) resolution of \\(1241 \\times 376\\). The split proposed by [12] (Eigen-split) with corrected depth is utilized as training and testing set, namely, 23,158 and 652 samples. The evaluation crop corresponds to the crop defined by [16]. All methods in" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "21481" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.081, + 0.09, + 0.176, + 0.203 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.178, + 0.09, + 0.272, + 0.203 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.275, + 0.09, + 0.37, + 0.203 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.371, + 0.09, + 0.468, + 0.203 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.081, + 0.205, + 0.176, + 0.263 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.178, + 0.205, + 0.272, + 0.263 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.275, + 0.205, + 0.37, + 0.263 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.371, + 0.205, + 0.468, + 0.263 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.077, + 0.264, + 0.47, + 0.345 + ], + "angle": 0, + "content": "Figure 4. Attention maps on NYU for three different IDRs. Each row presents the attention map of a specific IDR for four test images. Each discrete representation focuses on a specific high-level concept. The first two rows pertain to IDRs at the lowest resolution while the last corresponds to the highest resolution. Best viewed on a screen and zoomed in." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.36, + 0.47, + 0.404 + ], + "angle": 0, + "content": "Sec. 4.2 that have source code and pre-trained models available are re-evaluated on KITTI with the evaluation mask from [16] to have consistent results." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.406, + 0.471, + 0.724 + ], + "angle": 0, + "content": "Argoverse1.1 and DDAD. We propose splits of two autonomous driving datasets, Argoverse1.1 (Argoverse) [8] and DDAD [18], for depth estimation. Argoverse and DDAD are both outdoor datasets that provide \\(360^{\\circ}\\) HD images and the corresponding LiDAR scans from moving vehicles. We pre-process the original datasets to extract depth maps and avoid redundancy. Training set scenes are sampled when the vehicle has been displaced by at least 2 meters from the previous sample. For the testing set scenes, we increase this threshold to 50 meters to further diminish redundancy. Our Argoverse split accounts for 21,672 training samples and 476 test samples, while DDAD for 18,380 training and 860 testing samples. Samples in Argoverse are taken from the 6 cameras covering the full \\(360^{\\circ}\\) panorama. For DDAD, we exclude 2 out of the 6 cameras since they have more than \\(30\\%\\) pixels occluded by the camera capture system. We crop both RGB images and depth maps to have \\(1920 \\times 870\\) resolution that is 180px and 210px cropped from the top for Argoverse and DDAD, respectively, to crop out a large portion of the sky and regions occluded by the ego-vehicle. For both datasets, we clip the maximum depth at \\(150\\mathrm{m}\\)." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.741, + 0.295, + 0.756 + ], + "angle": 0, + "content": "4.1.2 Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.765, + 0.47, + 0.902 + ], + "angle": 0, + "content": "Evaluation Details. In all experiments, we do not exploit any test-time augmentations (TTA), camera parameters, or other tricks and regularizations, in contrast to many previous methods [3, 13, 24, 38, 59]. This provides a more challenging setup, which allows us to show the effectiveness of iDisc. As depth estimation metrics, we utilize root mean square error (RMS) and its log variant \\((\\mathrm{RMS}_{\\log})\\), absolute error in log-scale \\((\\mathrm{Log}_{10})\\), absolute (A.Rel) and squared (S.rel) mean relative error, the percentage of inlier pixels \\((\\delta_{i})\\) with" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.089, + 0.894, + 0.186 + ], + "angle": 0, + "content": "Table 1. Comparison on NYU official test set. R101: ResNet-101 [19], D161: DenseNet-161 [21], EB5: EfficientNet-B5 [45], HR48: HRNet-48 [48], DD22: DRN-D-22 [56], ViTB: ViT-B/16+Resnet-50 [11], MViT: EfficientNet-B5-AP [50] + MiniViT, Swin{L, B, T}: Swin-{Large, Base, Tiny} [31]. \\((\\dagger)\\): ImageNet-22k [10] pretraining, \\((\\ddagger)\\): non-standard training set, \\((\\ast)\\): in-house dataset pretraining, \\((\\S)\\): re-evaluated without GT-based rescaling." + }, + { + "type": "table", + "bbox": [ + 0.505, + 0.187, + 0.892, + 0.39 + ], + "angle": 0, + "content": "
MethodEncoderδ1δ2δ3RMSA.RelLog10
Higher is betterLower is better
Eigen et al. [12]-0.7690.9500.9880.6410.158-
DORN [13]R1010.8280.9650.9920.5090.1150.051
VNL [55]-0.8750.9760.9940.4160.1080.048
BTS [24]D1610.8850.9780.9940.3920.1100.047
AdaBins‡ [3]MViT0.9030.9840.9970.3640.1030.044
DAV [22]DD220.8820.9800.9960.4120.108-
Long et al. [33]HR480.8900.9820.9960.3770.1010.044
TransDepth [54]ViTB0.9000.9830.9960.3650.1060.045
DPT* [41]ViTB0.9040.9880.9980.3570.1100.045
P3Depth§ [38]R1010.8300.9710.9950.4500.1300.056
NeWCRF [59]SwinL†0.9220.9920.9980.3340.0950.041
LocalBins‡ [4]MViT0.9070.9870.9980.3570.0990.042
OursR1010.8920.9830.9950.3800.1090.046
EB50.9030.9860.9970.3690.1040.044
SwinT0.8940.9830.9960.3770.1090.045
SwinB0.9260.9890.9970.3270.0910.039
SwinL†0.9400.9930.9990.3130.0860.037
" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.408, + 0.895, + 0.62 + ], + "angle": 0, + "content": "threshold \\(1.25^{i}\\), and scale-invariant error in log-scale \\((\\mathrm{SI}_{\\log})\\): \\(100\\sqrt{\\mathrm{Var}(\\epsilon_{\\log})}\\). The maximum depth for NYU and all zero-shot testing in indoor datasets, specifically SUN-RGBD and Diode Indoor, is set to \\(10\\mathrm{m}\\) while for KITTI it is set to \\(80\\mathrm{m}\\) and for Argoverse and DDAD to \\(150\\mathrm{m}\\). Zero-shot testing is performed by evaluating a model trained on either KITTI or NYU and tested on either outdoor or indoor datasets, respectively, without additional fine-tuning. For surface normals estimation, the metrics are mean (Mean) and median (Med) absolute error, RMS angular error, and percentages of inlier pixels with thresholds at \\(11.5^{\\circ}\\), \\(22.5^{\\circ}\\), and \\(30^{\\circ}\\). GT-based mean depth rescaling is applied only on Diode Indoor for all methods since the dataset presents largely scale-equivariant scenes, such as plain walls with tiny details." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.621, + 0.895, + 0.802 + ], + "angle": 0, + "content": "Training Details. We implement iDisc in PyTorch [37]. For training, we use the AdamW [34] optimizer \\((\\beta_{1} = 0.9, \\beta_{2} = 0.999)\\) with an initial learning rate of 0.0002 for every experiment, and weight decay set to 0.02. As a scheduler, we exploit Cosine Annealing starting from \\(30\\%\\) of the training, with final learning rate of 0.00002. We run 45k optimization iterations with a batch size of 16. All backbones are initialized with weights from ImageNet-pretrained models. The augmentations include both geometric (random rotation and scale) and appearance (random brightness, gamma, saturation, hue shift) augmentations. The required training time amounts to 20 hours on 4 NVidia Titan RTX." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.816, + 0.822, + 0.832 + ], + "angle": 0, + "content": "4.2. Comparison with the State of the Art" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.894, + 0.901 + ], + "angle": 0, + "content": "Indoor Datasets. Results on NYU are presented in Table 1. The results show that we set the new state of the art on the benchmark, improving by more than \\(6\\%\\) on RMS and \\(9\\%\\) on A.Rel over the previous SotA. Moreover, results highlight" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "21482" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.089, + 0.472, + 0.13 + ], + "angle": 0, + "content": "Table 2. Zero-shot testing of models trained on NYU. All methods are trained on NYU and tested without further fine-tuning on the official validation set of SUN-RGBD and Diode Indoor." + }, + { + "type": "table", + "bbox": [ + 0.082, + 0.131, + 0.47, + 0.282 + ], + "angle": 0, + "content": "
Test setMethodδ1↑RMS ↓A.Rel ↓SIlog ↓
SUN-RGBDBTS [24]0.7450.5020.16814.25
AdaBins [3]0.7680.4760.15513.20
P3Depth [38]0.6980.5410.17815.02
NeWCRF [59]0.7990.4290.15011.27
Ours0.8380.3870.12810.91
DiodeBTS [24]0.7050.9650.21123.78
AdaBins [3]0.7330.8720.20922.54
P3Depth [38]0.7320.8770.20222.16
NeWCRF [59]0.7990.7690.16418.69
Ours0.8100.7210.15618.11
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.297, + 0.47, + 0.476 + ], + "angle": 0, + "content": "how iDisc is more sample-efficient than other transformer-based architectures [3,4,41,54,59] since we achieve better results even when employing smaller and less heavily pretrained backbone architectures. In addition, results show a significant improvement in performance with our model instantiated with a full-convolutional backbone over other full-convolutional-based models [12, 13, 22, 24, 38]. Table 2 presents zero-shot testing of NYU models on SUN-RGBD and Diode. In both cases, iDisc exhibits a compelling generalization performance, which we argue is due to implicitly learning the underlying patterns, namely, IDRs, of indoor scene structure via the ID module." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.479, + 0.472, + 0.825 + ], + "angle": 0, + "content": "Qualitative results in Fig. 3 emphasize how the method excels in capturing the overall scene complexity. In particular, iDisc correctly captures discontinuities without depth over-excitation due to chromatic edges, such as the sink in row 1, and captures the right perspectivity between foreground and background depth planes such as between the bed (row 2) or sofa (row 3) and the walls behind. In addition, the model presents a reduced error around edges, even when compared to higher-resolution models such as [3]. We argue that iDisc actually reasons at the pattern level, thus capturing better the structure of the scene. This is particularly appreciable in indoor scenes, since these are usually populated by a multitude of objects. This behavior is displayed in the attention maps of Fig. 4. Fig. 4 shows how IDRs at lower resolution capture specific components, such as the relative position of the background (row 1) and foreground objects (row 2), while IDRs at higher resolution behave as depth refiners, attending typically to high-frequency features, such as upper (row 3) or lower borders of objects. It is worth noting that an IDR attends to the image borders when the particular concept it looks for is not present in the image. That is, the borders are the last resort in which the IDR tries to find its corresponding pattern (e.g., row 2, col. 1)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.826, + 0.472, + 0.902 + ], + "angle": 0, + "content": "Outdoor Datasets. Results on KITTI in Table 3 demonstrate that iDisc sets the new SotA for this primary outdoor dataset, improving by more than \\(3\\%\\) in RMS and by \\(0.9\\%\\) in \\(\\delta_{0.5}\\) over the previous SotA. However, KITTI results present saturated metrics. For instance, \\(\\delta_{3}\\) is not reported since ev" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.089, + 0.895, + 0.2 + ], + "angle": 0, + "content": "Table 3. Comparison on KITTI Eigen-split test set. Models without \\(\\delta_{0.5}\\) have implementation (partially) unavailable. R101: ResNet-101 [19], D161: DenseNet-161 [21], EB5: EfficientNet-B5 [45], ViTB: ViT-B/16+Resnet-50 [11], MViT: EfficientNet-B5-AP [50] + MiniViT, Swin{L, B, T}: Swin-{Large, Base, Tiny} [31]. (\\(\\dagger\\)): ImageNet-22k [10] pretraining, (\\(\\ddagger\\)): non-standard training set, (*): in-house dataset pretraining, (\\(\\S\\)): re-evaluated without GT-based rescaling." + }, + { + "type": "table", + "bbox": [ + 0.504, + 0.2, + 0.895, + 0.346 + ], + "angle": 0, + "content": "
MethodEncoderδ0.5δ1δ2RMSRMSlogA.RelS.Rel
Higher is betterLower is better
Eigenet al. [12]--0.6920.8997.1560.2700.1901.515
DORN [13]R101-0.9320.9842.7270.1200.0720.307
BTS [24]D1610.8700.9640.9952.4590.0900.0570.199
AdaBins$ [3]MVIT0.8680.9640.9952.3600.0880.0580.199
TransDepth [54]ViTB-0.9560.9942.7550.0980.0640.252
DPT* [41]ViTB0.8650.9650.9962.3150.0880.0590.190
P3Depth$ [38]R1010.8520.9590.9942.5190.0950.0600.206
NeWCRF [59]SwinL†0.8870.9740.9972.1290.0790.0520.155
OursR1010.8600.9650.9962.3620.0900.0590.197
EB50.8520.9630.9942.5100.0940.0630.223
SwinT0.8700.9680.9962.2910.0870.0580.184
SwinB0.8850.9740.9972.1490.0810.0540.159
SwinL†0.8960.9770.9972.0670.0770.0500.145
" + }, + { + "type": "table_caption", + "bbox": [ + 0.498, + 0.36, + 0.895, + 0.401 + ], + "angle": 0, + "content": "Table 4. Comparison on Argoverse and DDAD proposed splits. Comparison of performance of methods trained on either Argoverse or DDAD and tested on the same dataset." + }, + { + "type": "table", + "bbox": [ + 0.504, + 0.402, + 0.895, + 0.51 + ], + "angle": 0, + "content": "
DatasetMethodδ1Higher is betterRMSRMSlogLower is better
ArgoverseBTS [24]0.7800.9080.9548.3190.2670.1862.56
AdaBins [3]0.7500.9010.9528.6860.2780.1952.36
NeWCRF [59]0.7070.8710.9399.4370.3210.2323.23
Ours0.8210.9230.9607.5670.2430.1632.22
DDADBTS [24]0.7570.9130.96210.110.2510.1862.27
AdaBins [3]0.7480.9120.96210.240.2550.2012.30
NeWCRF [59]0.7020.8810.95110.980.2710.2192.83
Ours0.8090.9340.9718.9890.2210.1631.85
" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.524, + 0.895, + 0.614 + ], + "angle": 0, + "content": "ery method scores \\(>0.99\\), with recent ones scoring 0.999. Therefore, we propose to utilize the metric \\(\\delta_{0.5}\\), to better convey meaningful evaluation information. In addition, iDisc performs remarkably well on the highly competitive official KITTI benchmark, ranking \\(3^{\\mathrm{rd}}\\) among all methods and \\(1^{\\mathrm{st}}\\) among all published MDE methods." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.614, + 0.896, + 0.84 + ], + "angle": 0, + "content": "Moreover, Table 4 shows the results of methods trained and evaluated on the splits from Argoverse and DDAD proposed in this work. All methods have been trained with the same architecture and pipeline utilized for training on KITTI. We argue that the high degree of sparseness in GT of the two proposed datasets, in contrast to KITTI, deeply affects windowed methods such as [3, 59]. Qualitative results in Fig. 5 suggest that the scene level discretization leads to retaining small objects and sharp transitions between foreground objects and background: background in row 1, and boxes in row 2. These results show the better ability of iDisc to capture fine-grained depth variations on close-by and similar objects, including crowd in row 3. Zero-shot testing from KITTI to DDAD and Argoverse are presented in Supplement." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.897, + 0.901 + ], + "angle": 0, + "content": "Surface Normals Estimation. We emphasize that the proposed method has more general applications by testing iDisc on a different continuous dense prediction task such as surface normals estimation. Results in Table 5 evidence that we" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.957 + ], + "angle": 0, + "content": "21483" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.081, + 0.089, + 0.28, + 0.256 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.284, + 0.089, + 0.482, + 0.256 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.484, + 0.089, + 0.685, + 0.256 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.687, + 0.089, + 0.892, + 0.256 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.262, + 0.894, + 0.292 + ], + "angle": 0, + "content": "Figure 5. Qualitative results on KITTI. Three zoomed-in crops of different test images are shown. The comparisons show the ability of iDisc to capture small details, proper background transition, and fine-grained variations in, e.g., crowded scenes. Best viewed on a screen." + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.302, + 0.47, + 0.342 + ], + "angle": 0, + "content": "Table 5. Comparison of surface normals estimation methods on NYU official test set. iDisc architecture and training pipeline is the same as the one utilized for indoor depth estimation." + }, + { + "type": "table", + "bbox": [ + 0.081, + 0.344, + 0.47, + 0.458 + ], + "angle": 0, + "content": "
Method11.5°22.5°30°RMSMeanMed
Higher is betterLower is better
SURGE [49]0.4730.6890.766-20.612.2
GeoNet [39]0.4840.4840.79526.919.011.8
PAP [61]0.4880.7220.79825.518.611.7
GeoNet++ [40]0.5020.7320.80726.718.511.2
Bae et al. [1]0.6220.7930.85223.514.97.5
Ours0.6380.7980.85622.814.67.3
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.472, + 0.47, + 0.534 + ], + "angle": 0, + "content": "set the new state of the art on surface normals estimation. It is worth mentioning that all other methods are specifically designed for normals estimation, while we keep the same architecture and framework from indoor depth estimation." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.544, + 0.228, + 0.56 + ], + "angle": 0, + "content": "4.3. Ablation study" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.568, + 0.469, + 0.597 + ], + "angle": 0, + "content": "The importance of each component introduced in iDisc is evaluated by ablating the method in Table 6." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.599, + 0.47, + 0.719 + ], + "angle": 0, + "content": "Depth Discretization. Internal scene discretization provides a clear improvement over its explicit counterpart (row 3 vs. 2), which is already beneficial in terms of robustness. Adding the MSDA module on top of explicit discretization (row 5) recovers part of the performance gap between the latter and our full method (row 8). We argue that MSDA recovers a better scene scale by refining feature maps at different scales at once, which is helpful for higher-resolution feature maps." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.72, + 0.471, + 0.902 + ], + "angle": 0, + "content": "Component Interactions. Using either the MSDA module or the AFP module together with internal scene discretization results in similar performance (rows 4 and 6). We argue that the two modules are complementary, and they synergize when combined (row 8). The complementarity can be explained as follows: in the former scenario (row 4), MSDA preemptively refines feature maps to be partitioned by the non-adaptive clustering, that is, by the IDR priors described in Sec. 3, while on latter one (row 6), AFP allows the IDRs to adapt themselves to partition the unrefined feature space properly. Row 7 shows that the architecture closer to the one in [32], particularly random initialization, hurts perfor" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.302, + 0.892, + 0.399 + ], + "angle": 0, + "content": "Table 6. Ablation of iDisc. EDD: Explicit Depth Discretization [3, 13], ISD: Internal Scene discretization, AFP: Adaptive Feature Partitioning, MSDA: MultiScale Deformable Attention. The EDD module, used in SotA methods, and our ISD module are mutually exclusive. AFP with \\((\\checkmark_{\\mathbf{R}})\\) refers to random initialization of IDRs and architecture similar to [32]. The last row corresponds to our complete iDisc model." + }, + { + "type": "table", + "bbox": [ + 0.504, + 0.399, + 0.889, + 0.535 + ], + "angle": 0, + "content": "
EDDISDAFPMSDAδ1↑RMS ↓A.Rel ↓
1XXXX0.8900.3700.104
2XXX0.9050.3670.102
3XXX0.9190.3400.096
4XX0.9310.3190.091
5XX0.9310.3260.091
6XX0.9340.3190.088
7X✓R0.9300.3190.089
8X0.9400.3130.086
" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.548, + 0.892, + 0.579 + ], + "angle": 0, + "content": "mance since the internal representations do not embody any domain-specific prior information." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.592, + 0.619, + 0.608 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.617, + 0.895, + 0.859 + ], + "angle": 0, + "content": "We have introduced a new module, called Internal Discretization, for MDE. The module represents the assumption that scenes can be represented as a finite set of patterns. Hence, iDisc leverages an internally discretized representation of the scene that is enforced via a continuous-discrete-continuous bottleneck, namely ID module. We have validated the proposed method, without any TTA or tricks, on the primary indoor and outdoor benchmarks for MDE, and have set the new state of the art among supervised approaches. Results showed that learning the underlying patterns, while not imposing any explicit constraints or regularization on the output, is beneficial for performance and generalization. iDisc also works out-of-the-box for normal estimation, beating all specialized SotA methods. In addition, we propose two new challenging outdoor dataset splits, aiming to benefit the community with more general and diverse benchmarks." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.871, + 0.892, + 0.902 + ], + "angle": 0, + "content": "Acknowledgment. This work is funded by Toyota Motor Europe via the research project TRACE-Zürich." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.957 + ], + "angle": 0, + "content": "21484" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.105 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.472, + 0.183 + ], + "angle": 0, + "content": "[1] Gwangbin Bae, Ignas Budvytis, and Roberto Cipolla. Estimating and exploiting the aleatoric uncertainty in surface normal estimation. Proceedings of the IEEE International Conference on Computer Vision, pages 13117-13126, 9 2021. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.186, + 0.472, + 0.24 + ], + "angle": 0, + "content": "[2] Gwangbin Bae, Ignas Budvytis, and Roberto Cipolla. Irondepth: Iterative refinement of single-view depth using surface normal and its uncertainty. In *British Machine Vision Conference (BMVC)*, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.242, + 0.472, + 0.311 + ], + "angle": 0, + "content": "[3] Shariq Farooq Bhat, Ibrahim Alhashim, and Peter Wonka. Adabins: Depth estimation using adaptive bins. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 4008-4017, 11 2020. 1, 2, 3, 4, 5, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.313, + 0.472, + 0.368 + ], + "angle": 0, + "content": "[4] Shariq Farooq Bhat, Ibraheem Alhashim, and Peter Wonka. Localbins: Improving depth estimation by learning local distributions. In European Conference Computer Vision (ECCV), pages 480-496, 2022. 1, 2, 3, 4, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.369, + 0.472, + 0.45 + ], + "angle": 0, + "content": "[5] András Bódis-Szomóru, Hayko Riemenschneider, and Luc Van Gool. Fast, approximate piecewise-planar modeling based on sparse structure-from-motion and superpixels. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 469-476, 9 2014. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.453, + 0.472, + 0.52 + ], + "angle": 0, + "content": "[6] Yuanzhouhan Cao, Zifeng Wu, and Chunhua Shen. Estimating depth from monocular images as classification using deep fully convolutional residual networks. IEEE Transactions on Circuits and Systems for Video Technology, 28:3174-3182, 5 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.523, + 0.472, + 0.605 + ], + "angle": 0, + "content": "[7] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), 12346 LNCS:213-229, 5 2020. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.607, + 0.472, + 0.702 + ], + "angle": 0, + "content": "[8] Ming Fang Chang, John Lambert, Patsorn Sangkloy, Jagjeet Singh, Slawomir Bak, Andrew Hartnett, De Wang, Peter Carr, Simon Lucey, Deva Ramanan, and James Hays. Argoverse: 3d tracking and forecasting with rich maps. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2019-June:8740-8749, 11 2019. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.706, + 0.472, + 0.773 + ], + "angle": 0, + "content": "[9] Anne Laure Chauve, Patrick Labatut, and Jean Philippe Pons. Robust piecewise-planar 3d reconstruction and completion from large-scale unstructured point data. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 1261-1268, 2010. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.776, + 0.472, + 0.83 + ], + "angle": 0, + "content": "[10] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE Conference on Computer Vision and Pattern Recognition, pages 248–255, 2009. 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.832, + 0.472, + 0.901 + ], + "angle": 0, + "content": "[11] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.472, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.895, + 0.134 + ], + "angle": 0, + "content": "scale. In 9th International Conference on Learning Representations, ICLR 2021, Virtual Event, Austria, May 3-7, 2021. OpenReview.net, 2021. 1, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.136, + 0.894, + 0.189 + ], + "angle": 0, + "content": "[12] David Eigen, Christian Puhrsch, and Rob Fergus. Depth map prediction from a single image using a multi-scale deep network. Advances in Neural Information Processing Systems, 3:2366-2374, 6 2014. 1, 2, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.191, + 0.895, + 0.272 + ], + "angle": 0, + "content": "[13] Huan Fu, Mingming Gong, Chaohui Wang, Kayhan Bat-manghelich, and Dacheng Tao. Deep ordinal regression network for monocular depth estimation. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 2002-2011, 6 2018. 1, 2, 3, 4, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.274, + 0.895, + 0.342 + ], + "angle": 0, + "content": "[14] David Gallup, Jan Michael Frahm, and Marc Pollefeys. Piecewise planar and non-planar stereo for urban scene reconstruction. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 1418-1425, 2010. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.344, + 0.895, + 0.399 + ], + "angle": 0, + "content": "[15] Peng Gao, Minghang Zheng, Xiaogang Wang, Jifeng Dai, and Hongsheng Li. Fast convergence of detr with spatially modulated co-attention. Proceedings of the IEEE International Conference on Computer Vision, pages 3601-3610, 8 2021. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.401, + 0.895, + 0.455 + ], + "angle": 0, + "content": "[16] Ravi Garg, BG Vijay Kumar, Gustavo Carneiro, and Ian Reid. Unsupervised cnn for single view depth estimation: Geometry to the rescue. In European Conference on Computer Vision, pages 740-756. Springer, 2016. 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.457, + 0.894, + 0.51 + ], + "angle": 0, + "content": "[17] Andreas Geiger, Philip Lenz, and Raquel Urtasun. Are we ready for autonomous driving? the kitti vision benchmark suite. In Conference on Computer Vision and Pattern Recognition (CVPR), 2012. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.512, + 0.894, + 0.566 + ], + "angle": 0, + "content": "[18] Vitor Guizilini, Rares Ambrus, Sudeep Pillai, Allan Raventos, and Adrien Gaidon. 3d packing for self-supervised monocular depth estimation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.568, + 0.895, + 0.635 + ], + "angle": 0, + "content": "[19] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2016-December:770-778, 12 2015. 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.637, + 0.894, + 0.678 + ], + "angle": 0, + "content": "[20] Geoffrey E. Hinton, Sara Sabour, and Nicholas Frosst. Matrix capsules with EM routing. In 6th International Conference on Learning Representations, ICLR, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.679, + 0.894, + 0.747 + ], + "angle": 0, + "content": "[21] Gao Huang, Zhuang Liu, Laurens Van Der Maaten, and Kilian Q. Weinberger. Densely connected convolutional networks. Proceedings - 30th IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2017, 2017-January:2261-2269, 8 2016. 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.749, + 0.894, + 0.83 + ], + "angle": 0, + "content": "[22] Lam Huynh, Phong Nguyen-Ha, Jiri Matas, Esa Rahtu, and Janne Heikkilä. Guiding monocular depth estimation using depth-attention volume. Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), 12371 LNCS:581-597, 4 2020. 1, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.832, + 0.895, + 0.901 + ], + "angle": 0, + "content": "[23] Iro Laina, Christian Rupprecht, Vasileios Belagiannis, Federico Tombari, and Nassir Navab. Deeper depth prediction with fully convolutional residual networks. Proceedings - 2016 4th International Conference on 3D Vision, 3DV 2016, pages 239-248, 6 2016. 2" + }, + { + "type": "list", + "bbox": [ + 0.504, + 0.093, + 0.895, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.946, + 0.518, + 0.957 + ], + "angle": 0, + "content": "21485" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.147 + ], + "angle": 0, + "content": "[24] Jin Han Lee, Myung-Kyu Han, Dong Wook Ko, and Il Hong Suh. From big to small: Multi-scale local planar guidance for monocular depth estimation. arXiv e-prints, abs/1907.10326, 7 2019. 1, 2, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.151, + 0.472, + 0.22 + ], + "angle": 0, + "content": "[25] Jae Han Lee, Minhyeok Heo, Kyung Rae Kim, and Chang Su Kim. Single-image depth estimation based on fourier domain analysis. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 330-339, 12 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.224, + 0.472, + 0.292 + ], + "angle": 0, + "content": "[26] Boying Li, Yuan Huang, Zeyu Liu, Danping Zou, and Wenxian Yu. Structdepth: Leveraging the structural regularities for self-supervised indoor depth estimation. Proceedings of the IEEE International Conference on Computer Vision, pages 12643-12653, 8 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.296, + 0.472, + 0.351 + ], + "angle": 0, + "content": "[27] Zhenyu Li, Zehui Chen, Xianming Liu, and Junjun Jiang. Depthformer: Exploiting long-range correlation and local information for accurate monocular depth estimation. arXiv e-prints, abs/2203.14211, 3 2022. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.354, + 0.472, + 0.422 + ], + "angle": 0, + "content": "[28] Chen Liu, Kihwan Kim, Jinwei Gu, Yasutaka Furukawa, and Jan Kautz. Planercnn: 3d plane detection and reconstruction from a single image. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2019-June:4445-4454, 12 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.426, + 0.472, + 0.495 + ], + "angle": 0, + "content": "[29] Chen Liu, Jimei Yang, Duygu Ceylan, Ersin Yumer, and Yasutaka Furukawa. Planenet: Piece-wise planar reconstruction from a single rgb image. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 2579-2588, 4 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.498, + 0.472, + 0.554 + ], + "angle": 0, + "content": "[30] Fayao Liu, Chunhua Shen, Guosheng Lin, and Ian Reid. Learning depth from single monocular images using deep convolutional neural fields. IEEE Transactions on Pattern Analysis and Machine Intelligence, 38:2024-2039, 2 2015. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.557, + 0.472, + 0.626 + ], + "angle": 0, + "content": "[31] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. Proceedings of the IEEE International Conference on Computer Vision, pages 9992-10002, 3 2021. 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.629, + 0.472, + 0.698 + ], + "angle": 0, + "content": "[32] Francesco Locatello, Dirk Weissenborn, Thomas Unterthiner, Aravindh Mahendran, Georg Heigold, Jakob Uszkoreit, Alexey Dosovitskiy, and Thomas Kipf. Object-centric learning with slot attention. Advances in Neural Information Processing Systems, 2020-December, 6 2020. 2, 3, 4, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.701, + 0.472, + 0.77 + ], + "angle": 0, + "content": "[33] Xiaoxiao Long, Cheng Lin, Lingjie Liu, Wei Li, Christian Theobalt, Ruigang Yang, and Wenping Wang. Adaptive surface normal constraint for depth estimation. Proceedings of the IEEE International Conference on Computer Vision, pages 12829-12838, 3 2021. 1, 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.773, + 0.472, + 0.814 + ], + "angle": 0, + "content": "[34] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. 7th International Conference on Learning Representations, ICLR 2019, 11 2017. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.818, + 0.472, + 0.9 + ], + "angle": 0, + "content": "[35] S. H. Mahdi Miangoleh, Sebastian Dille, Long Mai, Sylvain Paris, and Yagiz Aksoy. Boosting monocular depth estimation models to high-resolution via content-adaptive multi-resolution merging. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 9680-9689, 5 2021. 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.472, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.134 + ], + "angle": 0, + "content": "[36] Pushmeet Kohli Nathan Silberman, Derek Hoiem and Rob Fergus. Indoor segmentation and support inference from rgbd images. In ECCV, 2012. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.136, + 0.894, + 0.26 + ], + "angle": 0, + "content": "[37] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Kopf, Edward Yang, Zachary DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. Pytorch: An imperative style, high-performance deep learning library. In Advances in Neural Information Processing Systems 32, pages 8024-8035. Curran Associates, Inc., 2019. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.263, + 0.895, + 0.331 + ], + "angle": 0, + "content": "[38] Vaishakh Patil, Christos Sakaridis, Alexander Liniger, and Luc Van Gool. P3Depth: Monocular depth estimation with a piecewise planarity prior. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR, pages 1600-1611. IEEE, 2022. 1, 2, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.334, + 0.895, + 0.43 + ], + "angle": 0, + "content": "[39] Xiaojuan Qi, Renjie Liao, Zhengzhe Liu, Raquel Urtasun, and Jiaya Jia. Geonet: Geometric neural network for joint depth and surface normal estimation. In 2018 IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2018, Salt Lake City, UT, USA, June 18-22, 2018, pages 283-291. Computer Vision Foundation / IEEE Computer Society, 2018. 2, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.433, + 0.894, + 0.501 + ], + "angle": 0, + "content": "[40] Xiaojuan Qi, Zhengzhe Liu, Renjie Liao, Philip H. S. Torr, Raquel Urtasun, and Jiaya Jia. Geonet++: Iterative geometric neural network with edge-aware refinement for joint depth and surface normal estimation. IEEE Trans. Pattern Anal. Mach. Intell., 44(2):969-984, 2022. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.504, + 0.894, + 0.558 + ], + "angle": 0, + "content": "[41] René Ranftl, Alexey Bochkovskiy, and Vladlen Koltun. Vision transformers for dense prediction. Proceedings of the IEEE International Conference on Computer Vision, pages 12159-12168, 3 2021. 1, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.562, + 0.894, + 0.671 + ], + "angle": 0, + "content": "[42] Sara Sabour, Nicholas Frosst, and Geoffrey E. Hinton. Dynamic routing between capsules. In Isabelle Guyon, Ulrike von Luxburg, Samy Bengio, Hanna M. Wallach, Rob Fergus, S. V. N. Vishwanathan, and Roman Garnett, editors, Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA, pages 3856-3866, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.674, + 0.894, + 0.741 + ], + "angle": 0, + "content": "[43] Shuran Song, Samuel P. Lichtenberg, and Jianxiong Xiao. Sun rgb-d: A rgb-d scene understanding benchmark suite. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 07-12-June-2015:567-576, 10 2015. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.745, + 0.894, + 0.8 + ], + "angle": 0, + "content": "[44] Zhiqing Sun, Shengcao Cao, Yiming Yang, and Kris Kitani. Rethinking transformer-based set prediction for object detection. Proceedings of the IEEE International Conference on Computer Vision, pages 3591-3600, 11 2020. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.802, + 0.894, + 0.857 + ], + "angle": 0, + "content": "[45] Mingxing Tan and Quoc V. Le. Efficientnet: Rethinking model scaling for convolutional neural networks. 36th International Conference on Machine Learning, ICML 2019, 2019-June:10691-10700, 5 2019. 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.86, + 0.894, + 0.901 + ], + "angle": 0, + "content": "[46] Yao-Hung Hubert Tsai, Nitish Srivastava, Hanlin Goh, and Ruslan Salakhutdinov. Capsules with inverted dot-product attention routing. arXiv e-prints, abs/2002.04764, 2020. 2" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.895, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.946, + 0.52, + 0.957 + ], + "angle": 0, + "content": "21486" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.472, + 0.174 + ], + "angle": 0, + "content": "[47] Igor Vasiljevic, Nicholas I. Kolkin, Shanyi Zhang, Ruotian Luo, Haochen Wang, Falcon Z. Dai, Andrea F. Daniele, Mohammadreza Mostajabi, Steven Basart, Matthew R. Walter, and Gregory Shakhnarovich. DIODE: A dense indoor and outdoor depth dataset. arXiv e-prints, abs/1908.00463, 2019. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.176, + 0.472, + 0.259 + ], + "angle": 0, + "content": "[48] Jingdong Wang, Ke Sun, Tianheng Cheng, Borui Jiang, Chaorui Deng, Yang Zhao, Dong Liu, Yadong Mu, Mingkui Tan, Xinggang Wang, Wenyu Liu, and Bin Xiao. Deep high-resolution representation learning for visual recognition. IEEE Transactions on Pattern Analysis and Machine Intelligence, 43:3349-3364, 8 2019. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.261, + 0.472, + 0.342 + ], + "angle": 0, + "content": "[49] Peng Wang, Xiaohui Shen, Bryan C. Russell, Scott Cohen, Brian L. Price, and Alan L. Yuille. SURGE: surface regularized geometry estimation from a single image. In Daniel D. Lee, Masashi Sugiyama, Ulrike von Luxburg, Isabelle Guyon, and Roman Garnett, editors, Advances in Neural Information Processing Systems, pages 172-180, 2016. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.344, + 0.472, + 0.412 + ], + "angle": 0, + "content": "[50] Cihang Xie, Mingxing Tan, Boqing Gong, Jiang Wang, Alan L. Yuille, and Quoc V. Le. Adversarial examples improve image recognition. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 816-825, 11 2019. 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.413, + 0.472, + 0.495 + ], + "angle": 0, + "content": "[51] Dan Xu, Wanli Ouyang, Xiaogang Wang, and Nicu Sebe. Pad-net: Multi-tasks guided prediction-and-distillation network for simultaneous depth estimation and scene parsing. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 675-684, 5 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.497, + 0.472, + 0.566 + ], + "angle": 0, + "content": "[52] Dan Xu, Wei Wang, Hao Tang, Hong Liu, Nicu Sebe, and Elisa Ricci. Structured attention guided convolutional neural fields for monocular depth estimation. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 3917-3925, 3 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.567, + 0.472, + 0.635 + ], + "angle": 0, + "content": "[53] Fengting Yang and Zihan Zhou. Recovering 3d planes from a single image via convolutional neural networks. Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), 11214 LNCS:87-103, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.637, + 0.472, + 0.704 + ], + "angle": 0, + "content": "[54] Guanglei Yang, Hao Tang, Mingli Ding, Nicu Sebe, and Elisa Ricci. Transformer-based attention networks for continuous pixel-wise prediction. Proceedings of the IEEE International Conference on Computer Vision, pages 16249-16259, 3 2021. 1, 2, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.706, + 0.472, + 0.761 + ], + "angle": 0, + "content": "[55] Wei Yin, Yifan Liu, Chunhua Shen, and Youliang Yan. Enforcing geometric constraints of virtual normal for depth prediction. Proceedings of the IEEE International Conference on Computer Vision, pages 5683-5692, 7 2019. 1, 2, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.762, + 0.472, + 0.817 + ], + "angle": 0, + "content": "[56] Fisher Yu, Vladlen Koltun, and Thomas Funkhouser. Dilated residual networks. Proceedings - 30th IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2017, 2017-January:636-644, 5 2017. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.818, + 0.472, + 0.871 + ], + "angle": 0, + "content": "[57] Zehao Yu, Lei Jin, and Shenghua Gao. \\(\\mathbf{P}^2\\) net: Patch-match and plane-regularization for unsupervised indoor depth estimation. In European Conference on Computer Vision, pages 206–222, 7 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.873, + 0.472, + 0.903 + ], + "angle": 0, + "content": "[58] Zehao Yu, Jia Zheng, Dongze Lian, Zihan Zhou, and Shenghua Gao. Single-image piece-wise planar 3d recon" + }, + { + "type": "list", + "bbox": [ + 0.079, + 0.092, + 0.472, + 0.903 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.535, + 0.093, + 0.894, + 0.134 + ], + "angle": 0, + "content": "struction via associative embedding. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2019-June:1029-1037, 2 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.136, + 0.894, + 0.205 + ], + "angle": 0, + "content": "[59] Weihao Yuan, Xiaodong Gu, Zuozhuo Dai, Siyu Zhu, and Ping Tan. Neural window fully-connected crfs for monocular depth estimation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR, pages 3906-3915. IEEE, 2022. 1, 2, 5, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.207, + 0.894, + 0.274 + ], + "angle": 0, + "content": "[60] Weidong Zhang, Wei Zhang, and Yinda Zhang. Geolayout: Geometry driven room layout estimation based on depth maps of planes. In European Conference on Computer Vision, pages 632-648. Springer Science and Business Media Deutschland GmbH, 8 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.277, + 0.894, + 0.345 + ], + "angle": 0, + "content": "[61] Zhenyu Zhang, Zhen Cui, Chunyan Xu, Yan Yan, Nicu Sebe, and Jian Yang. Pattern-affinitive propagation across depth, surface normal and semantic segmentation. In IEEE Computer Society Conference on Computer Vision and Pattern Recognition CVPR, pages 4101-4110, 6 2019. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.347, + 0.894, + 0.387 + ], + "angle": 0, + "content": "[62] Brady Zhou, Philipp Krahenbuhl, and Vladlen Koltun. Does computer vision matter for action? Science Robotics, 4, 5 2019. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.39, + 0.894, + 0.445 + ], + "angle": 0, + "content": "[63] Xizhou Zhu, Weijie Su, Lewei Lu, Bin Li, Xiaogang Wang, and Jifeng Dai. Deformable DETR: deformable transformers for end-to-end object detection. In 9th International Conference on Learning Representations ICLR, 2021. 5" + }, + { + "type": "list", + "bbox": [ + 0.504, + 0.093, + 0.894, + 0.445 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.946, + 0.519, + 0.957 + ], + "angle": 0, + "content": "21487" + } + ] +] \ No newline at end of file diff --git a/2023/iDisc_ Internal Discretization for Monocular Depth Estimation/f111aba4-6ed5-4778-a051-8ab6247508d3_origin.pdf b/2023/iDisc_ Internal Discretization for Monocular Depth Estimation/f111aba4-6ed5-4778-a051-8ab6247508d3_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..9a1d2f1e2d7aed3700d95d0b830f6e153e6bb089 --- /dev/null +++ b/2023/iDisc_ Internal Discretization for Monocular Depth Estimation/f111aba4-6ed5-4778-a051-8ab6247508d3_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d93f67587c8598b91d90d94f2285d18033dc9014d3df86a81bb3fea026c2a96 +size 3033131 diff --git a/2023/iDisc_ Internal Discretization for Monocular Depth Estimation/full.md b/2023/iDisc_ Internal Discretization for Monocular Depth Estimation/full.md new file mode 100644 index 0000000000000000000000000000000000000000..1e1eae35dc81cbd7d6fb5d30f6a9ac20d96d5a8d --- /dev/null +++ b/2023/iDisc_ Internal Discretization for Monocular Depth Estimation/full.md @@ -0,0 +1,307 @@ +# iDisc: Internal Discretization for Monocular Depth Estimation + +Luigi Piccinelli Christos Sakaridis Fisher Yu + +Computer Vision Lab, ETH Zürich + +# Abstract + +Monocular depth estimation is fundamental for 3D scene understanding and downstream applications. However, even under the supervised setup, it is still challenging and ill-posed due to the lack of full geometric constraints. Although a scene can consist of millions of pixels, there are fewer high-level patterns. We propose iDisc to learn those patterns with internal discretized representations. The method implicitly partitions the scene into a set of high-level patterns. In particular, our new module, Internal Discretization (ID), implements a continuous-discrete-continuous bottleneck to learn those concepts without supervision. In contrast to state-of-the-art methods, the proposed model does not enforce any explicit constraints or priors on the depth output. The whole network with the ID module can be trained end-to-end, thanks to the bottleneck module based on attention. Our method sets the new state of the art with significant improvements on NYU-Depth v2 and KITTI, outperforming all published methods on the official KITTI benchmark. iDisc can also achieve state-of-the-art results on surface normal estimation. Further, we explore the model generalization capability via zero-shot testing. We observe the compelling need to promote diversification in the outdoor scenario. Hence, we introduce splits of two autonomous driving datasets, DDAD and Argoverse. Code is available at http://vis.xyz/pub/idisc. + +# 1. Introduction + +Depth estimation is essential in computer vision, especially for understanding geometric relations in a scene. This task consists in predicting the distance between the projection center and the 3D point corresponding to each pixel. Depth estimation finds direct significance in downstream applications such as 3D modeling, robotics, and autonomous cars. Some research [62] shows that depth estimation is a crucial prompt to be leveraged for action reasoning and execution. In particular, we tackle the task of monocular depth estimation (MDE). MDE is an ill-posed problem due to its inherent scale ambiguity: the same 2D input image can correspond to an infinite number of 3D scenes. + +![](images/8ba22e74df8a546a7c53dddaaca8625992d9f6ed5692c5fbba528c9c016e51d4.jpg) +(a) Input image + +![](images/f005ed225df087a2be0bc84bfe03875ccb90c154d40ec389de3b6f70c18d5bb0.jpg) +(b) Output depth + +![](images/2e8c07acd4f3533dd988334195875e5b8b0914a829e433cb67d2c0d162911607.jpg) +(c) Intermediate representations + +![](images/870d50ba2271e434f8e85ba6f034fb1178463bcbcb085e72560087d90b8584c5.jpg) +(d) Internal discretization +Figure 1. We propose iDisc which implicitly enforces an internal discretization of the scene via a continuous-discrete-continuous bottleneck. Supervision is applied to the output depth only, i.e., the fused intermediate representations in (c), while the internal discrete representations are implicitly learned by the model. (d) displays some actual internal discretization patterns captured from the input, e.g., foreground, object relationships, and 3D planes. Our iDisc model is able to predict high-quality depth maps by capturing scene interactions and structure. + +State-of-the-art (SotA) methods typically involve convolutional networks [12, 13, 24] or, since the advent of vision Transformer [11], transformer architectures [3, 41, 54, 59]. Most methods either impose geometric constraints on the image [22, 33, 38, 55], namely, planarity priors or explicitly discretize the continuous depth range [3, 4, 13]. The latter can be viewed as learning frontoparallel planes. These imposed priors inherently limit the expressiveness of the respective models, as they cannot model arbitrary depth patterns, ubiquitous in real-world scenes. + +We instead propose a more general depth estimation model, called iDisc, which does not explicitly impose any constraint on the final prediction. We design an Internal Discretization (ID) of the scene which is in principle depth-agnostic. Our assumption behind this ID is that each scene can be implicitly described by a set of concepts or patterns, + +such as objects, planes, edges, and perspectivity relationships. The specific training signal determines which patterns to learn (see Fig. 1). + +We design a continuous-to-discrete bottleneck through which the information is passed in order to obtain such internal scene discretization, namely the underlying patterns. In the bottleneck, the scene feature space is partitioned via learnable and input-dependent quantizers, which in turn transfer the information onto the continuous output space. The ID bottleneck introduced in this work is a general concept and can be implemented in several ways. Our particular ID implementation employs attention-based operators, leading to an end-to-end trainable architecture and input-dependent framework. More specifically, we implement the continuous-to-discrete operation via "transposed" cross-attention, where transposed refers to applying softmax on the output dimension. This softmax formulation enforces the input features to be routed to the internal discrete representations (IDRs) in an exclusive fashion, thus defining an input-dependent soft clustering of the feature space. The discrete-to-continuous transformation is implemented via cross-attention. Supervision is only applied to the final output, without any assumptions or regularization on the IDRs. + +We test iDisc on multiple indoor and outdoor datasets and probe its robustness via zero-shot testing. As of today, there is too little variety in MDE benchmarks for the outdoor scenario, since the only established benchmark is KITTI [17]. Moreover, we observe that all methods fail on outdoor zero-shot testing, suggesting that the KITTI dataset is not diverse enough and leads to overfitting, thus implying that it is not indicative of generalized performance. Hence, we find it compelling to establish a new benchmark setup for the MDE community by proposing two new train-test splits of more diverse and challenging high-quality outdoor datasets: Argoverse1.1 [8] and DDAD [18]. + +Our main contributions are as follows: (i) we introduce the Internal Discretization module, a novel architectural component that adeptly represents a scene by combining underlying patterns; (ii) we show that it is a generalization of SotA methods involving depth ordinal regression [3, 13]; (iii) we propose splits of two raw outdoor datasets [8, 18] with high-quality LiDAR measurements. We extensively test iDisc on six diverse datasets and, owing to the ID design, our model consistently outperforms SotA methods and presents better transferability. Moreover, we apply iDisc to surface normal estimation showing that the proposed module is general enough to tackle generic real-valued dense prediction tasks. + +# 2. Related Work + +The supervised setting of MDE assumes that pixel-wise depth annotations are available at training time and depth inference is performed on single images. The coarse-to-fine network introduced in Eigen et al. [12] is the cor + +nerstone in MDE with end-to-end neural networks. The work established the optimization process via the Scale-Invariant log loss $(\mathrm{SI}_{\log})$ . Since then, the three main directions evolve: new architectures, such as residual networks [23], neural fields [30, 52], multi-scale fusion [25, 35], transformers [3, 54, 59]; improved optimization schemes, such as reverse-Huber loss [23], classification [6], or ordinal regression [3, 13]; multi-task learning to leverage ancillary information from the related task, such as surface normals estimation or semantic segmentation [12, 39, 51]. + +Geometric priors have been widely utilized in the literature, particularly the piecewise planarity prior [5, 9, 14], serving as a proper real-world approximation. The geometric priors are usually incorporated by explicitly treating the image as a set of planes [26, 28, 29, 58], using a plane-inducing loss [57], forcing pixels to attend to the planar representation of other pixels [24, 38], or imposing consistency with other tasks' output [2, 33, 55], like surface normals. Priors can focus on a more holistic scene representation by dividing the whole scene into 3D planes without dependence on intrinsic camera parameters [53, 60], aiming at partitioning the scene into dominant depth planes. In contrast to geometric prior-based works, our method lifts any explicit geometric constraints on the scene. Instead, iDisc implicitly enforces the representation of scenes as a set of high-level patterns. + +Ordinal regression methods [3,4, 13] have proven to be a promising alternative to other geometry-driven approaches. The difference with classification models is that class "values" are learnable and are real numbers, thus the problem falls into the regression category. The typical SotA rationale is to explicitly discretize the continuous output depth range, rendering the approach similar to mask-based segmentation. Each of the scalar depth values is associated with a confidence mask which describes the probability of each pixel presenting such a depth value. Hence, SotA methods inherently assume that depth can be represented as a set of frontoparallel planes, that is, depth "masks". + +The main paradigm of ordinal regression methods is to first obtain hidden representations and scalar values of discrete depth values. The dot-product similarity between the feature maps and the depth representations is treated as logits and softmax is applied to extract confidence masks (in Fu et al. [13] this degenerates to argmax). Finally, the final prediction is defined as the per-pixel weighted average of the discrete depth values, with the confidence values serving as the weights. iDisc draws connections with the idea of depth discretization. However, our ID module is designed to be depth-agnostic. The discretization occurs at the abstract level of internal features from the ID bottleneck instead of the output depth level, unlike other methods. + +Iterative routing is related to our "transposed" crossattention. The first approach of this kind was Capsule Networks and their variants [20, 42]. Some formulations [32, 46] + +![](images/7b815d251a046cb5082a7bd385affd5a0d6db5a9092c6b80fa9ff6953390a6f6.jpg) +Figure 2. Model Architecture. The Internal Discretization Module imposes an information bottleneck via two consecutive stages: continuous-to-discrete (C2D) and discrete-to-continuous (D2C). The module processes multiple resolutions, i.e., $l \in \{1, 2, 3\}$ , independently in parallel. The bottleneck embodies our assumption that a scene can be represented as a set of patterns. The C2D stage aggregates information, given a learnable prior ( $\mathbf{H}_{\text{prior}}^l$ ), from the $l$ -th resolution feature maps ( $\mathbf{F}^l$ ) to a finite set of IDRs ( $\mathbf{H}^l$ ). In particular, it learns how to define a partition function that is dependent on the input $\mathbf{F}^l$ via transposed cross-attention, as in (1). The second stage (D2C) transfers the IDRs on the original continuous space using layers of cross-attention as in (2), for sake of simplicity, we depict only a generic $i$ -th layer. Cross-attention is guided by the similarity between decoded pixel embeddings ( $\mathbf{P}^l$ ) and $\mathbf{H}^l$ . The final prediction ( $\hat{\mathbf{D}}$ ) is the fusion, i.e., mean, of the intermediate representations $\{\hat{\mathbf{D}}^l\}_{l=1}^3$ . + +employ different kinds of attention mechanisms. Our attention mechanism draws connections with [32]. However, we do not allow permutation invariance, since our assumption is that each discrete representation internally describes a particular kind of pattern. In addition, we do not introduce any other architectural components such as gated recurrent units (GRU). In contrast to other methods, our attention is employed at a higher abstraction level, namely in the decoder. + +# 3. Method + +We propose an Internal Discretization (ID) module, to discretize the internal feature representation of encoder-decoder network architectures. We hypothesize that the module can break down the scenes into coherent concepts without semantic supervision. This section will first describe the module design and then discuss the network architecture. Sec. 3.1.1 defines the formulation of "transposed" cross-attention outlined in Sec. 1 and describes the main difference with previous formulations from Sec. 2. Moreover, we derive in Sec. 3.1.2 how the iDisc formulation can be interpreted as a generalization of SotA ordinal regression methods by reframing their original formulation. Eventually, Sec. 3.2 presents the optimization problem and the overall architecture. + +# 3.1. Internal Discretization Module + +The ID module involves a continuous-discrete-continuous bottleneck composed of two main consecutive stages. The overall module is based on our hypothesis that scenes can be represented as a finite set of patterns. The first stage + +consists in a continuous-to-discrete component, namely soft-exclusive discretization of the feature space. More specifically, it enforces an input-dependent soft clustering on the feature maps in an image-to-set fashion. The second stage completes the internal scene discretization by mapping the learned IDRs onto the continuous output space. IDRs are not bounded to focus exclusively on depth planes but are allowed to represent any high-level pattern or concept, such as objects, relative locations, and planes in the 3D space. In contrast with SotA ordinal regression methods [3,4,13], the IDRs are neither explicitly tied to depth values nor directly tied to the output. Moreover, our module operates at multiple intermediate resolutions and merges them only in the last layer. The overall architecture of iDisc, particularly our ID module, is shown in Fig. 2. + +# 3.1.1 Adaptive Feature Partitioning + +The first stage of our ID module, Adaptive Feature Partitioning (AFP), generates proper discrete representations $(\mathcal{H} \coloneqq \{\mathbf{H}^l\}_{l=1}^3)$ that quantize the feature space $(\mathcal{F} \coloneqq \{\mathbf{F}^l\}_{l=1}^3)$ at each resolution $l$ . We drop the resolution superscript $l$ since resolutions are independently processed and only one generic resolution is treated here. iDisc does not simply learn fixed centroids, as in standard clustering, but rather learns how to define a partition function in an input-dependent fashion. More specifically, an iterative transposed cross-attention module is utilized. Given the specific input feature maps $(\mathbf{F})$ , the iteration process refines (learnable) IDR priors $(\mathbf{H}_{\mathrm{prior}})$ over $R$ iterations. + +More specifically, the term "transposed" refers to the different axis along which the softmax operation is applied, namely $\left[\mathrm{softmax}(\mathbf{KQ}^T)\right]^T\mathbf{V}$ instead of the canonical dot-product attention softmax(QK)V, with Q,K,V as query, key and value tensors, respectively. In particular, the tensors are obtained as projections of feature maps and IDR priors, $f_{\mathbf{Q}}(\mathbf{H}_{\mathrm{prior}}),f_{\mathbf{K}}(\mathbf{F}),f_{\mathbf{V}}(\mathbf{F})$ . The $t$ -th iteration out of $R$ can be formulated as follows: + +$$ +W _ {i j} ^ {t} = \frac {\exp \left(\mathbf {k} _ {i} ^ {T} \mathbf {q} _ {j} ^ {t}\right)}{\sum_ {k = 1} ^ {N} \exp \left(\mathbf {k} _ {i} ^ {T} \mathbf {q} _ {k} ^ {t}\right)}, \mathbf {q} _ {j} ^ {t + 1} = \sum_ {i = 1} ^ {M} W _ {i j} ^ {t} \mathbf {v} _ {i}, \tag {1} +$$ + +where $\mathbf{q}_j, \mathbf{k}_i, \mathbf{v}_i \in \mathbb{R}^C$ are query, key and value respectively, $N$ is the number of IDRs, nameley, clusters, and $M$ is the number of pixels. The weights $W_{ij}$ may be normalized to 1 along the $i$ dimension to avoid vanishing or exploding quantities due to the summation of un-normalized distribution. + +The quantization stems from the inherent behavior of softmax. In particular, softmax forces competition among outputs: one output can be large only to the detriment of others. Therefore, when fixing $i$ , namely, given a feature, only a few attention weights $(W_{ij})$ may be significantly greater than zero. Hence, the content $\mathbf{v}_i$ is routed only to a few IDRs at the successive iteration. Feature maps are fixed during the process and weights are shared by design, thus $\{\mathbf{k}_i, \mathbf{v}_i\}_{i=1}^M$ are the same across iterations. The induced competition enforces a soft clustering of the input feature space, where the last-iteration IDR represents the actual partition function $(\mathbf{H} := \mathbf{Q}^R)$ . The probabilities of belonging to one partition are the attention weights, namely $W_{ij}^R$ with $j$ -th query fixed. Since attention weights are inherently dependent on the input, the specific partitioning also depends on the input and takes place at inference time. The entire process of AFP leads to (soft) mutually exclusive IDRs. + +As far as the partitioning rationale is concerned, the proposed AFP draws connections with iterative routing methods described in Sec. 2. However, important distinctions apply. First, IDRs are not randomly initialized as the "slots" in Locatello et al. [32] but present a learnable prior. Priors can be seen as learnable positional embeddings in the attention context, thus we do not allow a permutation-invariant set of representations. Moreover, non-adaptive partitioning can still take place via the learnable priors if the iterations are zero. Second, the overall architecture differs noticeably as described in Sec. 2, and in addition, iDisc partitions feature space at the decoder level, corresponding to more abstract, high-level concepts, while the SotA formulations focus on clustering at an abstraction level close to the input image. + +One possible alternative approach to obtaining the aforementioned IDRs is the well-known image-to-set proposed in DETR [7], namely via classic cross-attention between representations and image feature maps. However, the corresponding representations might redundantly aggregate features, where the extreme corresponds to each output being + +the mean of the input. Studies [15, 44] have shown that slow convergence in transformer-based architectures may be due to the non-localized context in cross-attention. The exclusiveness of the IDRs discourages the redundancy of information in different IDRs. We argue that exclusiveness allows the utilization of fewer representations (32 against the 256 utilized in [3] and [13]), and can improve both the interpretability of what IDRs are responsible for and training convergence. + +# 3.1.2 Internal Scene Discretization + +In the second stage of the ID module, Internal Scene Discretization (ISD), the module ingests pixel embeddings $(\mathcal{P} := \{\mathbf{P}^l\}_{l=1}^3)$ from the decoder and IDRs $\mathcal{H}$ from the first stage, both at different resolutions $l$ , as shown in Fig. 2. Each discrete representation carries both the signature, as the key, and the output-related content, as the value, of the pattern it represents. The similarity between IDRs and pixel embeddings is computed in order to spatially localize in the continuous output space where to transfer the information of each IDR. We utilize the dot-product similarity function. + +Furthermore, the kind of information to transfer onto the final prediction is not constrained, as we never explicitly handle depth values, usually called bins, until the final output. Thus, the IDRs are completely free to carry generic high-level concepts (such as object-ness, relative positioning, and geometric structures). This approach is in stark contrast with SotA methods [3,4, 13, 27], which explicitly constrain what the representations are about: scalar depth values. Instead, iDisc learns to generate unconstrained representations in an input-dependent fashion. The effective discretization of the scene occurs in the second stage thanks to the information transfer from the set of exclusive concepts $(\mathcal{H})$ from AFP to the continuous space defined by $\mathcal{P}$ . We show that our method is not bounded to depth estimation, but can be applied to generic continuous dense tasks, for instance, surface normal estimation. Consequently, we argue that the training signal of the task at hand determines how to internally discretize the scene, rendering our ID module general and usable in settings other than depth estimation. + +From a practical point of view, the whole second stage consists in cross-attention layers applied to IDRs and pixel embeddings. As described in Sec. 3.1.1, we drop the resolution superscript $l$ . After that, the final depth maps are projected onto the output space and the multi-resolution depth predictions are combined. The $i$ -th layer is defined as: + +$$ +\mathbf {D} _ {i + 1} = \operatorname {s o f t m a x} \left(\mathbf {Q} _ {i} \mathbf {K} _ {i} ^ {T}\right) \mathbf {V} _ {i} + \mathbf {D} _ {i}, \tag {2} +$$ + +where $\mathbf{Q}_i = f_{Q_i}(\mathbf{P})\in \mathbb{R}^{H\times W\times C}$ , $\mathbf{P}$ are pixel embeddings with shape $(H,W)$ , and $\mathbf{K}_i$ , $\mathbf{V}_i\in \mathbb{R}^{N\times C}$ are the $N$ IDRs under linear transformations $f_{K_i}(\mathbf{H})$ , $f_{V_i}(\mathbf{H})$ . The term $\mathbf{Q}_i\mathbf{K}_i^T$ determines the spatial location for which each + +specific IDR is responsible, while $\mathbf{V}_i$ carries the semantic content to be transferred in the proper spatial locations. + +Our approach constitutes a generalization of depth estimation methods that involve (hybrid) ordinal regression. As described in Sec. 2, the common paradigm in ordinal regression methods is to explicitly discretize depth in a set of masks with a scalar depth value associated with it. Then, they predict the likelihood that each pixel belongs to such masks. Our change of paradigm stems from the reinterpretation of the mentioned ordinal regression pipeline which we translate into the following mathematical expression: + +$$ +\mathbf {D} = \operatorname {s o f t m a x} \left(\mathbf {P R} ^ {T} / T\right) \mathbf {v}, \tag {3} +$$ + +where $\mathbf{P}$ are the pixel embeddings at maximum resolution and $T$ is the softmax temperature. $\mathbf{v} \in \mathbb{R}^{N \times 1}$ are $N$ depth scalar values and $\mathbf{R} \in \mathbb{R}^{N \times (C - 1)}$ are their hidden representations, both processed as a unique stacked tensor $(\mathbf{R}||\mathbf{v} \in \mathbb{R}^{N \times C})$ . From the reformulation in (3), one can observe that (3) is a degenerate case of (2). In particular, $f_{Q}$ degenerates to the identity function. $f_{K}$ and $f_{V}$ degenerate to selector functions: the former function selects up to the $C - 1$ dimensions and the latter selects the last dimension only. Moreover, the hidden representations are refined pixel embeddings $(f(\mathbf{P}_i) = \mathbf{H}_i = \mathbf{R}||\mathbf{v})$ , and $\mathbf{D}$ in (3) is the final output, namely no multiple iterations are performed as in (2). The explicit entanglement between the semantic content of the hidden representations and the final output is due to hard-coding $\mathbf{v}$ as depth scalar values. + +# 3.2. Network Architecture + +Our network described in Fig. 2 comprises first an encoder backbone, interchangeably convolutional or attention-based, producing features at different scales. The encoded features at different resolutions are refined, and information between resolutions is shared, both via four multi-scale deformable attention (MSDA) blocks [63]. The feature maps from MSDA at different scales are fed into the AFP module to extract IDRs $(\mathcal{H})$ , and into the decoder to extract pixel embeddings in the continuous space $(\mathcal{P})$ . Pixel embeddings at different resolutions are combined with the respective IDRs in the ISD stage of the ID module to extract the depth maps. The final depth prediction corresponds to the mean of the interpolated intermediate representations. The optimization process is guided only by the established $\mathrm{SI}_{\log}$ loss defined in [12], and no other regularization is exploited. $\mathrm{SI}_{\log}$ is defined as: + +$$ +\mathcal {L} _ {\mathrm {S I} _ {\log}} (\epsilon) = \alpha \sqrt {\mathbb {V} [ \epsilon ] + \lambda \mathbb {E} ^ {2} [ \epsilon ]} \tag {4} +$$ + +$$ +\text {w i t h} \epsilon = \log (\hat {y}) - \log (y ^ {*}), +$$ + +where $\hat{y}$ is the predicted depth and $y^{*}$ is the ground-truth (GT) value. $\mathbb{V}[\epsilon ]$ and $\mathbb{E}[\epsilon ]$ are computed as the empirical variance and expected value over all pixels, namely, $\{\epsilon_i\}_{i = 1}^N$ $\mathbb{V}[\epsilon ]$ is the purely scale-invariant loss, while $\mathbb{E}^2 [\epsilon ]$ fosters a proper scale. $\alpha$ and $\lambda$ are set to 10 and 0.15, as customary. + +![](images/f17c9cbe2353e72fa9a47066bbe82a251fe44385531a47ab4a393b4c33481f4f.jpg) +Figure 3. Qualitative results on NYU. Each pair of consecutive rows corresponds to one test sample. Each odd row shows the input RGB image and depth predictions for the selected methods. Each even row shows GT depth and the prediction errors of the selected methods clipped at 0.5 meters. The error color map is coolwarm: blue corresponds to lower error values and red to higher values. + +# 4. Experiments + +# 4.1. Experimental Setup + +# 4.1.1 Datasets + +NYU-Depth V2. NYU-Depth V2 (NYU) [36] is a dataset consisting of 464 indoor scenes with RGB images and quasi-dense depth images with $640 \times 480$ resolution. Our models are trained on the train-test split proposed by previous methods [24], corresponding to 24,231 samples for training and 654 for testing. In addition to depth, the dataset provides surface normal data utilized for normal estimation. The train split used for normal estimation is the one proposed in [55]. Zero-shot testing datasets. We evaluate the generalizability of indoor models on two indoor datasets which are not seen during training. The selected datasets are SUN-RGBD [43] and DIODE-Indoor [47]. For both datasets, the resolution is reduced to match that of NYU, which is $640 \times 480$ . + +KITTI. The KITTI dataset provides stereo images and corresponding Velodyne LiDAR scans of outdoor scenes captured from a moving vehicle [17]. RGB and depth images have (mean) resolution of $1241 \times 376$ . The split proposed by [12] (Eigen-split) with corrected depth is utilized as training and testing set, namely, 23,158 and 652 samples. The evaluation crop corresponds to the crop defined by [16]. All methods in + +![](images/ff6ee306e1cb02c3fe41324f2074246954ba091393b4961e92ac9170f0646dd3.jpg) + +![](images/675d1465eb3306bd6beb3e5651aed532cfb3a7be85b64093f2b0912575370bfa.jpg) + +![](images/fb2b1b07b4ce47c510f43908dbd66678d2fe1185f1055193b1f7e3b03a45ecdf.jpg) + +![](images/f6e7cffc2765f3aa798183983e5ca1f370c86cb2488a29e108fa8c6126ce2456.jpg) + +![](images/904fc0d19d850dea0dadc74c8ace4454694f80b3d272c734cfaee9138136bc6d.jpg) +Figure 4. Attention maps on NYU for three different IDRs. Each row presents the attention map of a specific IDR for four test images. Each discrete representation focuses on a specific high-level concept. The first two rows pertain to IDRs at the lowest resolution while the last corresponds to the highest resolution. Best viewed on a screen and zoomed in. + +![](images/017c253393fdc1e27ae13869ab5c08dc90c5c32ab03993ec1a113d06fc076233.jpg) + +![](images/2c366c3f448c1840deb98ee2bd4e7c307ffbce404f27aa507b5515f4f1a85b32.jpg) + +![](images/fe820b1a19c9b104f02a279a1e79fe64e3f414d08c033ca8acb563ae78f3ca55.jpg) + +Sec. 4.2 that have source code and pre-trained models available are re-evaluated on KITTI with the evaluation mask from [16] to have consistent results. + +Argoverse1.1 and DDAD. We propose splits of two autonomous driving datasets, Argoverse1.1 (Argoverse) [8] and DDAD [18], for depth estimation. Argoverse and DDAD are both outdoor datasets that provide $360^{\circ}$ HD images and the corresponding LiDAR scans from moving vehicles. We pre-process the original datasets to extract depth maps and avoid redundancy. Training set scenes are sampled when the vehicle has been displaced by at least 2 meters from the previous sample. For the testing set scenes, we increase this threshold to 50 meters to further diminish redundancy. Our Argoverse split accounts for 21,672 training samples and 476 test samples, while DDAD for 18,380 training and 860 testing samples. Samples in Argoverse are taken from the 6 cameras covering the full $360^{\circ}$ panorama. For DDAD, we exclude 2 out of the 6 cameras since they have more than $30\%$ pixels occluded by the camera capture system. We crop both RGB images and depth maps to have $1920 \times 870$ resolution that is 180px and 210px cropped from the top for Argoverse and DDAD, respectively, to crop out a large portion of the sky and regions occluded by the ego-vehicle. For both datasets, we clip the maximum depth at $150\mathrm{m}$ . + +# 4.1.2 Implementation Details + +Evaluation Details. In all experiments, we do not exploit any test-time augmentations (TTA), camera parameters, or other tricks and regularizations, in contrast to many previous methods [3, 13, 24, 38, 59]. This provides a more challenging setup, which allows us to show the effectiveness of iDisc. As depth estimation metrics, we utilize root mean square error (RMS) and its log variant $(\mathrm{RMS}_{\log})$ , absolute error in log-scale $(\mathrm{Log}_{10})$ , absolute (A.Rel) and squared (S.rel) mean relative error, the percentage of inlier pixels $(\delta_{i})$ with + +Table 1. Comparison on NYU official test set. R101: ResNet-101 [19], D161: DenseNet-161 [21], EB5: EfficientNet-B5 [45], HR48: HRNet-48 [48], DD22: DRN-D-22 [56], ViTB: ViT-B/16+Resnet-50 [11], MViT: EfficientNet-B5-AP [50] + MiniViT, Swin{L, B, T}: Swin-{Large, Base, Tiny} [31]. $(\dagger)$ : ImageNet-22k [10] pretraining, $(\ddagger)$ : non-standard training set, $(\ast)$ : in-house dataset pretraining, $(\S)$ : re-evaluated without GT-based rescaling. + +
MethodEncoderδ1δ2δ3RMSA.RelLog10
Higher is betterLower is better
Eigen et al. [12]-0.7690.9500.9880.6410.158-
DORN [13]R1010.8280.9650.9920.5090.1150.051
VNL [55]-0.8750.9760.9940.4160.1080.048
BTS [24]D1610.8850.9780.9940.3920.1100.047
AdaBins‡ [3]MViT0.9030.9840.9970.3640.1030.044
DAV [22]DD220.8820.9800.9960.4120.108-
Long et al. [33]HR480.8900.9820.9960.3770.1010.044
TransDepth [54]ViTB0.9000.9830.9960.3650.1060.045
DPT* [41]ViTB0.9040.9880.9980.3570.1100.045
P3Depth§ [38]R1010.8300.9710.9950.4500.1300.056
NeWCRF [59]SwinL†0.9220.9920.9980.3340.0950.041
LocalBins‡ [4]MViT0.9070.9870.9980.3570.0990.042
OursR1010.8920.9830.9950.3800.1090.046
EB50.9030.9860.9970.3690.1040.044
SwinT0.8940.9830.9960.3770.1090.045
SwinB0.9260.9890.9970.3270.0910.039
SwinL†0.9400.9930.9990.3130.0860.037
+ +threshold $1.25^{i}$ , and scale-invariant error in log-scale $(\mathrm{SI}_{\log})$ : $100\sqrt{\mathrm{Var}(\epsilon_{\log})}$ . The maximum depth for NYU and all zero-shot testing in indoor datasets, specifically SUN-RGBD and Diode Indoor, is set to $10\mathrm{m}$ while for KITTI it is set to $80\mathrm{m}$ and for Argoverse and DDAD to $150\mathrm{m}$ . Zero-shot testing is performed by evaluating a model trained on either KITTI or NYU and tested on either outdoor or indoor datasets, respectively, without additional fine-tuning. For surface normals estimation, the metrics are mean (Mean) and median (Med) absolute error, RMS angular error, and percentages of inlier pixels with thresholds at $11.5^{\circ}$ , $22.5^{\circ}$ , and $30^{\circ}$ . GT-based mean depth rescaling is applied only on Diode Indoor for all methods since the dataset presents largely scale-equivariant scenes, such as plain walls with tiny details. + +Training Details. We implement iDisc in PyTorch [37]. For training, we use the AdamW [34] optimizer $(\beta_{1} = 0.9, \beta_{2} = 0.999)$ with an initial learning rate of 0.0002 for every experiment, and weight decay set to 0.02. As a scheduler, we exploit Cosine Annealing starting from $30\%$ of the training, with final learning rate of 0.00002. We run 45k optimization iterations with a batch size of 16. All backbones are initialized with weights from ImageNet-pretrained models. The augmentations include both geometric (random rotation and scale) and appearance (random brightness, gamma, saturation, hue shift) augmentations. The required training time amounts to 20 hours on 4 NVidia Titan RTX. + +# 4.2. Comparison with the State of the Art + +Indoor Datasets. Results on NYU are presented in Table 1. The results show that we set the new state of the art on the benchmark, improving by more than $6\%$ on RMS and $9\%$ on A.Rel over the previous SotA. Moreover, results highlight + +Table 2. Zero-shot testing of models trained on NYU. All methods are trained on NYU and tested without further fine-tuning on the official validation set of SUN-RGBD and Diode Indoor. + +
Test setMethodδ1↑RMS ↓A.Rel ↓SIlog ↓
SUN-RGBDBTS [24]0.7450.5020.16814.25
AdaBins [3]0.7680.4760.15513.20
P3Depth [38]0.6980.5410.17815.02
NeWCRF [59]0.7990.4290.15011.27
Ours0.8380.3870.12810.91
DiodeBTS [24]0.7050.9650.21123.78
AdaBins [3]0.7330.8720.20922.54
P3Depth [38]0.7320.8770.20222.16
NeWCRF [59]0.7990.7690.16418.69
Ours0.8100.7210.15618.11
+ +how iDisc is more sample-efficient than other transformer-based architectures [3,4,41,54,59] since we achieve better results even when employing smaller and less heavily pretrained backbone architectures. In addition, results show a significant improvement in performance with our model instantiated with a full-convolutional backbone over other full-convolutional-based models [12, 13, 22, 24, 38]. Table 2 presents zero-shot testing of NYU models on SUN-RGBD and Diode. In both cases, iDisc exhibits a compelling generalization performance, which we argue is due to implicitly learning the underlying patterns, namely, IDRs, of indoor scene structure via the ID module. + +Qualitative results in Fig. 3 emphasize how the method excels in capturing the overall scene complexity. In particular, iDisc correctly captures discontinuities without depth over-excitation due to chromatic edges, such as the sink in row 1, and captures the right perspectivity between foreground and background depth planes such as between the bed (row 2) or sofa (row 3) and the walls behind. In addition, the model presents a reduced error around edges, even when compared to higher-resolution models such as [3]. We argue that iDisc actually reasons at the pattern level, thus capturing better the structure of the scene. This is particularly appreciable in indoor scenes, since these are usually populated by a multitude of objects. This behavior is displayed in the attention maps of Fig. 4. Fig. 4 shows how IDRs at lower resolution capture specific components, such as the relative position of the background (row 1) and foreground objects (row 2), while IDRs at higher resolution behave as depth refiners, attending typically to high-frequency features, such as upper (row 3) or lower borders of objects. It is worth noting that an IDR attends to the image borders when the particular concept it looks for is not present in the image. That is, the borders are the last resort in which the IDR tries to find its corresponding pattern (e.g., row 2, col. 1). + +Outdoor Datasets. Results on KITTI in Table 3 demonstrate that iDisc sets the new SotA for this primary outdoor dataset, improving by more than $3\%$ in RMS and by $0.9\%$ in $\delta_{0.5}$ over the previous SotA. However, KITTI results present saturated metrics. For instance, $\delta_{3}$ is not reported since ev + +Table 3. Comparison on KITTI Eigen-split test set. Models without $\delta_{0.5}$ have implementation (partially) unavailable. R101: ResNet-101 [19], D161: DenseNet-161 [21], EB5: EfficientNet-B5 [45], ViTB: ViT-B/16+Resnet-50 [11], MViT: EfficientNet-B5-AP [50] + MiniViT, Swin{L, B, T}: Swin-{Large, Base, Tiny} [31]. ( $\dagger$ ): ImageNet-22k [10] pretraining, ( $\ddagger$ ): non-standard training set, (*): in-house dataset pretraining, ( $\S$ ): re-evaluated without GT-based rescaling. + +
MethodEncoderδ0.5δ1δ2RMSRMSlogA.RelS.Rel
Higher is betterLower is better
Eigenet al. [12]--0.6920.8997.1560.2700.1901.515
DORN [13]R101-0.9320.9842.7270.1200.0720.307
BTS [24]D1610.8700.9640.9952.4590.0900.0570.199
AdaBins$ [3]MVIT0.8680.9640.9952.3600.0880.0580.199
TransDepth [54]ViTB-0.9560.9942.7550.0980.0640.252
DPT* [41]ViTB0.8650.9650.9962.3150.0880.0590.190
P3Depth$ [38]R1010.8520.9590.9942.5190.0950.0600.206
NeWCRF [59]SwinL†0.8870.9740.9972.1290.0790.0520.155
OursR1010.8600.9650.9962.3620.0900.0590.197
EB50.8520.9630.9942.5100.0940.0630.223
SwinT0.8700.9680.9962.2910.0870.0580.184
SwinB0.8850.9740.9972.1490.0810.0540.159
SwinL†0.8960.9770.9972.0670.0770.0500.145
+ +Table 4. Comparison on Argoverse and DDAD proposed splits. Comparison of performance of methods trained on either Argoverse or DDAD and tested on the same dataset. + +
DatasetMethodδ1Higher is betterRMSRMSlogLower is better
ArgoverseBTS [24]0.7800.9080.9548.3190.2670.1862.56
AdaBins [3]0.7500.9010.9528.6860.2780.1952.36
NeWCRF [59]0.7070.8710.9399.4370.3210.2323.23
Ours0.8210.9230.9607.5670.2430.1632.22
DDADBTS [24]0.7570.9130.96210.110.2510.1862.27
AdaBins [3]0.7480.9120.96210.240.2550.2012.30
NeWCRF [59]0.7020.8810.95110.980.2710.2192.83
Ours0.8090.9340.9718.9890.2210.1631.85
+ +ery method scores $>0.99$ , with recent ones scoring 0.999. Therefore, we propose to utilize the metric $\delta_{0.5}$ , to better convey meaningful evaluation information. In addition, iDisc performs remarkably well on the highly competitive official KITTI benchmark, ranking $3^{\mathrm{rd}}$ among all methods and $1^{\mathrm{st}}$ among all published MDE methods. + +Moreover, Table 4 shows the results of methods trained and evaluated on the splits from Argoverse and DDAD proposed in this work. All methods have been trained with the same architecture and pipeline utilized for training on KITTI. We argue that the high degree of sparseness in GT of the two proposed datasets, in contrast to KITTI, deeply affects windowed methods such as [3, 59]. Qualitative results in Fig. 5 suggest that the scene level discretization leads to retaining small objects and sharp transitions between foreground objects and background: background in row 1, and boxes in row 2. These results show the better ability of iDisc to capture fine-grained depth variations on close-by and similar objects, including crowd in row 3. Zero-shot testing from KITTI to DDAD and Argoverse are presented in Supplement. + +Surface Normals Estimation. We emphasize that the proposed method has more general applications by testing iDisc on a different continuous dense prediction task such as surface normals estimation. Results in Table 5 evidence that we + +![](images/f82b0babd1075fcb975a78fbf0e4a4b041b327437274f1d2c11d1b5647e1e348.jpg) +Figure 5. Qualitative results on KITTI. Three zoomed-in crops of different test images are shown. The comparisons show the ability of iDisc to capture small details, proper background transition, and fine-grained variations in, e.g., crowded scenes. Best viewed on a screen. + +![](images/41ded1307a8edf0d1920bc3db437e4594b87fa46abb3465d079ee4ff03c674a2.jpg) + +![](images/54bdec52e72f6686cee7da9a7e096a354a8073ae592f317a8588dac5c29e1794.jpg) + +![](images/8eab2abe2f01cf49224ff05f658f93e28df1652ef233a7d1deba6c06ed77b7b6.jpg) + +Table 5. Comparison of surface normals estimation methods on NYU official test set. iDisc architecture and training pipeline is the same as the one utilized for indoor depth estimation. + +
Method11.5°22.5°30°RMSMeanMed
Higher is betterLower is better
SURGE [49]0.4730.6890.766-20.612.2
GeoNet [39]0.4840.4840.79526.919.011.8
PAP [61]0.4880.7220.79825.518.611.7
GeoNet++ [40]0.5020.7320.80726.718.511.2
Bae et al. [1]0.6220.7930.85223.514.97.5
Ours0.6380.7980.85622.814.67.3
+ +set the new state of the art on surface normals estimation. It is worth mentioning that all other methods are specifically designed for normals estimation, while we keep the same architecture and framework from indoor depth estimation. + +# 4.3. Ablation study + +The importance of each component introduced in iDisc is evaluated by ablating the method in Table 6. + +Depth Discretization. Internal scene discretization provides a clear improvement over its explicit counterpart (row 3 vs. 2), which is already beneficial in terms of robustness. Adding the MSDA module on top of explicit discretization (row 5) recovers part of the performance gap between the latter and our full method (row 8). We argue that MSDA recovers a better scene scale by refining feature maps at different scales at once, which is helpful for higher-resolution feature maps. + +Component Interactions. Using either the MSDA module or the AFP module together with internal scene discretization results in similar performance (rows 4 and 6). We argue that the two modules are complementary, and they synergize when combined (row 8). The complementarity can be explained as follows: in the former scenario (row 4), MSDA preemptively refines feature maps to be partitioned by the non-adaptive clustering, that is, by the IDR priors described in Sec. 3, while on latter one (row 6), AFP allows the IDRs to adapt themselves to partition the unrefined feature space properly. Row 7 shows that the architecture closer to the one in [32], particularly random initialization, hurts perfor + +Table 6. Ablation of iDisc. EDD: Explicit Depth Discretization [3, 13], ISD: Internal Scene discretization, AFP: Adaptive Feature Partitioning, MSDA: MultiScale Deformable Attention. The EDD module, used in SotA methods, and our ISD module are mutually exclusive. AFP with $(\checkmark_{\mathbf{R}})$ refers to random initialization of IDRs and architecture similar to [32]. The last row corresponds to our complete iDisc model. + +
EDDISDAFPMSDAδ1↑RMS ↓A.Rel ↓
1XXXX0.8900.3700.104
2XXX0.9050.3670.102
3XXX0.9190.3400.096
4XX0.9310.3190.091
5XX0.9310.3260.091
6XX0.9340.3190.088
7X✓R0.9300.3190.089
8X0.9400.3130.086
+ +mance since the internal representations do not embody any domain-specific prior information. + +# 5. Conclusion + +We have introduced a new module, called Internal Discretization, for MDE. The module represents the assumption that scenes can be represented as a finite set of patterns. Hence, iDisc leverages an internally discretized representation of the scene that is enforced via a continuous-discrete-continuous bottleneck, namely ID module. We have validated the proposed method, without any TTA or tricks, on the primary indoor and outdoor benchmarks for MDE, and have set the new state of the art among supervised approaches. Results showed that learning the underlying patterns, while not imposing any explicit constraints or regularization on the output, is beneficial for performance and generalization. iDisc also works out-of-the-box for normal estimation, beating all specialized SotA methods. In addition, we propose two new challenging outdoor dataset splits, aiming to benefit the community with more general and diverse benchmarks. + +Acknowledgment. This work is funded by Toyota Motor Europe via the research project TRACE-Zürich. + +# References + +[1] Gwangbin Bae, Ignas Budvytis, and Roberto Cipolla. Estimating and exploiting the aleatoric uncertainty in surface normal estimation. Proceedings of the IEEE International Conference on Computer Vision, pages 13117-13126, 9 2021. 8 +[2] Gwangbin Bae, Ignas Budvytis, and Roberto Cipolla. Irondepth: Iterative refinement of single-view depth using surface normal and its uncertainty. In *British Machine Vision Conference (BMVC)*, 2022. 2 +[3] Shariq Farooq Bhat, Ibrahim Alhashim, and Peter Wonka. Adabins: Depth estimation using adaptive bins. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 4008-4017, 11 2020. 1, 2, 3, 4, 5, 6, 7, 8 +[4] Shariq Farooq Bhat, Ibraheem Alhashim, and Peter Wonka. Localbins: Improving depth estimation by learning local distributions. In European Conference Computer Vision (ECCV), pages 480-496, 2022. 1, 2, 3, 4, 6, 7 +[5] András Bódis-Szomóru, Hayko Riemenschneider, and Luc Van Gool. Fast, approximate piecewise-planar modeling based on sparse structure-from-motion and superpixels. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 469-476, 9 2014. 2 +[6] Yuanzhouhan Cao, Zifeng Wu, and Chunhua Shen. Estimating depth from monocular images as classification using deep fully convolutional residual networks. IEEE Transactions on Circuits and Systems for Video Technology, 28:3174-3182, 5 2016. 2 +[7] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), 12346 LNCS:213-229, 5 2020. 4 +[8] Ming Fang Chang, John Lambert, Patsorn Sangkloy, Jagjeet Singh, Slawomir Bak, Andrew Hartnett, De Wang, Peter Carr, Simon Lucey, Deva Ramanan, and James Hays. Argoverse: 3d tracking and forecasting with rich maps. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2019-June:8740-8749, 11 2019. 2, 6 +[9] Anne Laure Chauve, Patrick Labatut, and Jean Philippe Pons. Robust piecewise-planar 3d reconstruction and completion from large-scale unstructured point data. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 1261-1268, 2010. 2 +[10] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE Conference on Computer Vision and Pattern Recognition, pages 248–255, 2009. 6, 7 +[11] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at + +scale. In 9th International Conference on Learning Representations, ICLR 2021, Virtual Event, Austria, May 3-7, 2021. OpenReview.net, 2021. 1, 6, 7 +[12] David Eigen, Christian Puhrsch, and Rob Fergus. Depth map prediction from a single image using a multi-scale deep network. Advances in Neural Information Processing Systems, 3:2366-2374, 6 2014. 1, 2, 5, 6, 7 +[13] Huan Fu, Mingming Gong, Chaohui Wang, Kayhan Bat-manghelich, and Dacheng Tao. Deep ordinal regression network for monocular depth estimation. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 2002-2011, 6 2018. 1, 2, 3, 4, 6, 7, 8 +[14] David Gallup, Jan Michael Frahm, and Marc Pollefeys. Piecewise planar and non-planar stereo for urban scene reconstruction. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 1418-1425, 2010. 2 +[15] Peng Gao, Minghang Zheng, Xiaogang Wang, Jifeng Dai, and Hongsheng Li. Fast convergence of detr with spatially modulated co-attention. Proceedings of the IEEE International Conference on Computer Vision, pages 3601-3610, 8 2021. 4 +[16] Ravi Garg, BG Vijay Kumar, Gustavo Carneiro, and Ian Reid. Unsupervised cnn for single view depth estimation: Geometry to the rescue. In European Conference on Computer Vision, pages 740-756. Springer, 2016. 5, 6 +[17] Andreas Geiger, Philip Lenz, and Raquel Urtasun. Are we ready for autonomous driving? the kitti vision benchmark suite. In Conference on Computer Vision and Pattern Recognition (CVPR), 2012. 2, 5 +[18] Vitor Guizilini, Rares Ambrus, Sudeep Pillai, Allan Raventos, and Adrien Gaidon. 3d packing for self-supervised monocular depth estimation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2, 6 +[19] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2016-December:770-778, 12 2015. 6, 7 +[20] Geoffrey E. Hinton, Sara Sabour, and Nicholas Frosst. Matrix capsules with EM routing. In 6th International Conference on Learning Representations, ICLR, 2018. 2 +[21] Gao Huang, Zhuang Liu, Laurens Van Der Maaten, and Kilian Q. Weinberger. Densely connected convolutional networks. Proceedings - 30th IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2017, 2017-January:2261-2269, 8 2016. 6, 7 +[22] Lam Huynh, Phong Nguyen-Ha, Jiri Matas, Esa Rahtu, and Janne Heikkilä. Guiding monocular depth estimation using depth-attention volume. Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), 12371 LNCS:581-597, 4 2020. 1, 6, 7 +[23] Iro Laina, Christian Rupprecht, Vasileios Belagiannis, Federico Tombari, and Nassir Navab. Deeper depth prediction with fully convolutional residual networks. Proceedings - 2016 4th International Conference on 3D Vision, 3DV 2016, pages 239-248, 6 2016. 2 + +[24] Jin Han Lee, Myung-Kyu Han, Dong Wook Ko, and Il Hong Suh. From big to small: Multi-scale local planar guidance for monocular depth estimation. arXiv e-prints, abs/1907.10326, 7 2019. 1, 2, 5, 6, 7 +[25] Jae Han Lee, Minhyeok Heo, Kyung Rae Kim, and Chang Su Kim. Single-image depth estimation based on fourier domain analysis. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 330-339, 12 2018. 2 +[26] Boying Li, Yuan Huang, Zeyu Liu, Danping Zou, and Wenxian Yu. Structdepth: Leveraging the structural regularities for self-supervised indoor depth estimation. Proceedings of the IEEE International Conference on Computer Vision, pages 12643-12653, 8 2021. 2 +[27] Zhenyu Li, Zehui Chen, Xianming Liu, and Junjun Jiang. Depthformer: Exploiting long-range correlation and local information for accurate monocular depth estimation. arXiv e-prints, abs/2203.14211, 3 2022. 4 +[28] Chen Liu, Kihwan Kim, Jinwei Gu, Yasutaka Furukawa, and Jan Kautz. Planercnn: 3d plane detection and reconstruction from a single image. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2019-June:4445-4454, 12 2018. 2 +[29] Chen Liu, Jimei Yang, Duygu Ceylan, Ersin Yumer, and Yasutaka Furukawa. Planenet: Piece-wise planar reconstruction from a single rgb image. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 2579-2588, 4 2018. 2 +[30] Fayao Liu, Chunhua Shen, Guosheng Lin, and Ian Reid. Learning depth from single monocular images using deep convolutional neural fields. IEEE Transactions on Pattern Analysis and Machine Intelligence, 38:2024-2039, 2 2015. 2 +[31] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. Proceedings of the IEEE International Conference on Computer Vision, pages 9992-10002, 3 2021. 6, 7 +[32] Francesco Locatello, Dirk Weissenborn, Thomas Unterthiner, Aravindh Mahendran, Georg Heigold, Jakob Uszkoreit, Alexey Dosovitskiy, and Thomas Kipf. Object-centric learning with slot attention. Advances in Neural Information Processing Systems, 2020-December, 6 2020. 2, 3, 4, 8 +[33] Xiaoxiao Long, Cheng Lin, Lingjie Liu, Wei Li, Christian Theobalt, Ruigang Yang, and Wenping Wang. Adaptive surface normal constraint for depth estimation. Proceedings of the IEEE International Conference on Computer Vision, pages 12829-12838, 3 2021. 1, 2, 6 +[34] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. 7th International Conference on Learning Representations, ICLR 2019, 11 2017. 6 +[35] S. H. Mahdi Miangoleh, Sebastian Dille, Long Mai, Sylvain Paris, and Yagiz Aksoy. Boosting monocular depth estimation models to high-resolution via content-adaptive multi-resolution merging. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 9680-9689, 5 2021. 2 + +[36] Pushmeet Kohli Nathan Silberman, Derek Hoiem and Rob Fergus. Indoor segmentation and support inference from rgbd images. In ECCV, 2012. 5 +[37] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Kopf, Edward Yang, Zachary DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. Pytorch: An imperative style, high-performance deep learning library. In Advances in Neural Information Processing Systems 32, pages 8024-8035. Curran Associates, Inc., 2019. 6 +[38] Vaishakh Patil, Christos Sakaridis, Alexander Liniger, and Luc Van Gool. P3Depth: Monocular depth estimation with a piecewise planarity prior. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR, pages 1600-1611. IEEE, 2022. 1, 2, 6, 7 +[39] Xiaojuan Qi, Renjie Liao, Zhengzhe Liu, Raquel Urtasun, and Jiaya Jia. Geonet: Geometric neural network for joint depth and surface normal estimation. In 2018 IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2018, Salt Lake City, UT, USA, June 18-22, 2018, pages 283-291. Computer Vision Foundation / IEEE Computer Society, 2018. 2, 8 +[40] Xiaojuan Qi, Zhengzhe Liu, Renjie Liao, Philip H. S. Torr, Raquel Urtasun, and Jiaya Jia. Geonet++: Iterative geometric neural network with edge-aware refinement for joint depth and surface normal estimation. IEEE Trans. Pattern Anal. Mach. Intell., 44(2):969-984, 2022. 8 +[41] René Ranftl, Alexey Bochkovskiy, and Vladlen Koltun. Vision transformers for dense prediction. Proceedings of the IEEE International Conference on Computer Vision, pages 12159-12168, 3 2021. 1, 6, 7 +[42] Sara Sabour, Nicholas Frosst, and Geoffrey E. Hinton. Dynamic routing between capsules. In Isabelle Guyon, Ulrike von Luxburg, Samy Bengio, Hanna M. Wallach, Rob Fergus, S. V. N. Vishwanathan, and Roman Garnett, editors, Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA, pages 3856-3866, 2017. 2 +[43] Shuran Song, Samuel P. Lichtenberg, and Jianxiong Xiao. Sun rgb-d: A rgb-d scene understanding benchmark suite. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 07-12-June-2015:567-576, 10 2015. 5 +[44] Zhiqing Sun, Shengcao Cao, Yiming Yang, and Kris Kitani. Rethinking transformer-based set prediction for object detection. Proceedings of the IEEE International Conference on Computer Vision, pages 3591-3600, 11 2020. 4 +[45] Mingxing Tan and Quoc V. Le. Efficientnet: Rethinking model scaling for convolutional neural networks. 36th International Conference on Machine Learning, ICML 2019, 2019-June:10691-10700, 5 2019. 6, 7 +[46] Yao-Hung Hubert Tsai, Nitish Srivastava, Hanlin Goh, and Ruslan Salakhutdinov. Capsules with inverted dot-product attention routing. arXiv e-prints, abs/2002.04764, 2020. 2 + +[47] Igor Vasiljevic, Nicholas I. Kolkin, Shanyi Zhang, Ruotian Luo, Haochen Wang, Falcon Z. Dai, Andrea F. Daniele, Mohammadreza Mostajabi, Steven Basart, Matthew R. Walter, and Gregory Shakhnarovich. DIODE: A dense indoor and outdoor depth dataset. arXiv e-prints, abs/1908.00463, 2019. 5 +[48] Jingdong Wang, Ke Sun, Tianheng Cheng, Borui Jiang, Chaorui Deng, Yang Zhao, Dong Liu, Yadong Mu, Mingkui Tan, Xinggang Wang, Wenyu Liu, and Bin Xiao. Deep high-resolution representation learning for visual recognition. IEEE Transactions on Pattern Analysis and Machine Intelligence, 43:3349-3364, 8 2019. 6 +[49] Peng Wang, Xiaohui Shen, Bryan C. Russell, Scott Cohen, Brian L. Price, and Alan L. Yuille. SURGE: surface regularized geometry estimation from a single image. In Daniel D. Lee, Masashi Sugiyama, Ulrike von Luxburg, Isabelle Guyon, and Roman Garnett, editors, Advances in Neural Information Processing Systems, pages 172-180, 2016. 8 +[50] Cihang Xie, Mingxing Tan, Boqing Gong, Jiang Wang, Alan L. Yuille, and Quoc V. Le. Adversarial examples improve image recognition. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 816-825, 11 2019. 6, 7 +[51] Dan Xu, Wanli Ouyang, Xiaogang Wang, and Nicu Sebe. Pad-net: Multi-tasks guided prediction-and-distillation network for simultaneous depth estimation and scene parsing. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 675-684, 5 2018. 2 +[52] Dan Xu, Wei Wang, Hao Tang, Hong Liu, Nicu Sebe, and Elisa Ricci. Structured attention guided convolutional neural fields for monocular depth estimation. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 3917-3925, 3 2018. 2 +[53] Fengting Yang and Zihan Zhou. Recovering 3d planes from a single image via convolutional neural networks. Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), 11214 LNCS:87-103, 2018. 2 +[54] Guanglei Yang, Hao Tang, Mingli Ding, Nicu Sebe, and Elisa Ricci. Transformer-based attention networks for continuous pixel-wise prediction. Proceedings of the IEEE International Conference on Computer Vision, pages 16249-16259, 3 2021. 1, 2, 6, 7 +[55] Wei Yin, Yifan Liu, Chunhua Shen, and Youliang Yan. Enforcing geometric constraints of virtual normal for depth prediction. Proceedings of the IEEE International Conference on Computer Vision, pages 5683-5692, 7 2019. 1, 2, 5, 6 +[56] Fisher Yu, Vladlen Koltun, and Thomas Funkhouser. Dilated residual networks. Proceedings - 30th IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2017, 2017-January:636-644, 5 2017. 6 +[57] Zehao Yu, Lei Jin, and Shenghua Gao. $\mathbf{P}^2$ net: Patch-match and plane-regularization for unsupervised indoor depth estimation. In European Conference on Computer Vision, pages 206–222, 7 2020. 2 +[58] Zehao Yu, Jia Zheng, Dongze Lian, Zihan Zhou, and Shenghua Gao. Single-image piece-wise planar 3d recon + +struction via associative embedding. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2019-June:1029-1037, 2 2019. 2 +[59] Weihao Yuan, Xiaodong Gu, Zuozhuo Dai, Siyu Zhu, and Ping Tan. Neural window fully-connected crfs for monocular depth estimation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR, pages 3906-3915. IEEE, 2022. 1, 2, 5, 6, 7, 8 +[60] Weidong Zhang, Wei Zhang, and Yinda Zhang. Geolayout: Geometry driven room layout estimation based on depth maps of planes. In European Conference on Computer Vision, pages 632-648. Springer Science and Business Media Deutschland GmbH, 8 2020. 2 +[61] Zhenyu Zhang, Zhen Cui, Chunyan Xu, Yan Yan, Nicu Sebe, and Jian Yang. Pattern-affinitive propagation across depth, surface normal and semantic segmentation. In IEEE Computer Society Conference on Computer Vision and Pattern Recognition CVPR, pages 4101-4110, 6 2019. 8 +[62] Brady Zhou, Philipp Krahenbuhl, and Vladlen Koltun. Does computer vision matter for action? Science Robotics, 4, 5 2019. 1 +[63] Xizhou Zhu, Weijie Su, Lewei Lu, Bin Li, Xiaogang Wang, and Jifeng Dai. Deformable DETR: deformable transformers for end-to-end object detection. In 9th International Conference on Learning Representations ICLR, 2021. 5 \ No newline at end of file diff --git a/2023/iDisc_ Internal Discretization for Monocular Depth Estimation/images.zip b/2023/iDisc_ Internal Discretization for Monocular Depth Estimation/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..10f51257959c01de7b8efcd1cc26b6e8702cb36a --- /dev/null +++ b/2023/iDisc_ Internal Discretization for Monocular Depth Estimation/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:56888326eb9519a57e54e5223ed80809fa4d22038cf6fdf59dd07efea3c532c1 +size 600045 diff --git a/2023/iDisc_ Internal Discretization for Monocular Depth Estimation/layout.json b/2023/iDisc_ Internal Discretization for Monocular Depth Estimation/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..dbd0dc7201dd1394aed86c099b1fb41584d62ab4 --- /dev/null +++ b/2023/iDisc_ Internal Discretization for Monocular Depth Estimation/layout.json @@ -0,0 +1,9368 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 104, + 103, + 490, + 120 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 103, + 490, + 120 + ], + "spans": [ + { + "bbox": [ + 104, + 103, + 490, + 120 + ], + "type": "text", + "content": "iDisc: Internal Discretization for Monocular Depth Estimation" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 167, + 143, + 426, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 143, + 426, + 157 + ], + "spans": [ + { + "bbox": [ + 167, + 143, + 426, + 157 + ], + "type": "text", + "content": "Luigi Piccinelli Christos Sakaridis Fisher Yu" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 209, + 163, + 381, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 209, + 163, + 381, + 177 + ], + "spans": [ + { + "bbox": [ + 209, + 163, + 381, + 177 + ], + "type": "text", + "content": "Computer Vision Lab, ETH Zürich" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 143, + 205, + 192, + 217 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 205, + 192, + 217 + ], + "spans": [ + { + "bbox": [ + 143, + 205, + 192, + 217 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 232, + 290, + 531 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 232, + 290, + 531 + ], + "spans": [ + { + "bbox": [ + 46, + 232, + 290, + 531 + ], + "type": "text", + "content": "Monocular depth estimation is fundamental for 3D scene understanding and downstream applications. However, even under the supervised setup, it is still challenging and ill-posed due to the lack of full geometric constraints. Although a scene can consist of millions of pixels, there are fewer high-level patterns. We propose iDisc to learn those patterns with internal discretized representations. The method implicitly partitions the scene into a set of high-level patterns. In particular, our new module, Internal Discretization (ID), implements a continuous-discrete-continuous bottleneck to learn those concepts without supervision. In contrast to state-of-the-art methods, the proposed model does not enforce any explicit constraints or priors on the depth output. The whole network with the ID module can be trained end-to-end, thanks to the bottleneck module based on attention. Our method sets the new state of the art with significant improvements on NYU-Depth v2 and KITTI, outperforming all published methods on the official KITTI benchmark. iDisc can also achieve state-of-the-art results on surface normal estimation. Further, we explore the model generalization capability via zero-shot testing. We observe the compelling need to promote diversification in the outdoor scenario. Hence, we introduce splits of two autonomous driving datasets, DDAD and Argoverse. Code is available at http://vis.xyz/pub/idisc." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 548, + 128, + 560 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 548, + 128, + 560 + ], + "spans": [ + { + "bbox": [ + 47, + 548, + 128, + 560 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 570, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 570, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 570, + 288, + 713 + ], + "type": "text", + "content": "Depth estimation is essential in computer vision, especially for understanding geometric relations in a scene. This task consists in predicting the distance between the projection center and the 3D point corresponding to each pixel. Depth estimation finds direct significance in downstream applications such as 3D modeling, robotics, and autonomous cars. Some research [62] shows that depth estimation is a crucial prompt to be leveraged for action reasoning and execution. In particular, we tackle the task of monocular depth estimation (MDE). MDE is an ill-posed problem due to its inherent scale ambiguity: the same 2D input image can correspond to an infinite number of 3D scenes." + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 307, + 204, + 426, + 289 + ], + "blocks": [ + { + "bbox": [ + 307, + 204, + 426, + 289 + ], + "lines": [ + { + "bbox": [ + 307, + 204, + 426, + 289 + ], + "spans": [ + { + "bbox": [ + 307, + 204, + 426, + 289 + ], + "type": "image", + "image_path": "8ba22e74df8a546a7c53dddaaca8625992d9f6ed5692c5fbba528c9c016e51d4.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 341, + 291, + 392, + 300 + ], + "lines": [ + { + "bbox": [ + 341, + 291, + 392, + 300 + ], + "spans": [ + { + "bbox": [ + 341, + 291, + 392, + 300 + ], + "type": "text", + "content": "(a) Input image" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 427, + 204, + 545, + 289 + ], + "blocks": [ + { + "bbox": [ + 427, + 204, + 545, + 289 + ], + "lines": [ + { + "bbox": [ + 427, + 204, + 545, + 289 + ], + "spans": [ + { + "bbox": [ + 427, + 204, + 545, + 289 + ], + "type": "image", + "image_path": "f005ed225df087a2be0bc84bfe03875ccb90c154d40ec389de3b6f70c18d5bb0.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 458, + 291, + 513, + 300 + ], + "lines": [ + { + "bbox": [ + 458, + 291, + 513, + 300 + ], + "spans": [ + { + "bbox": [ + 458, + 291, + 513, + 300 + ], + "type": "text", + "content": "(b) Output depth" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 307, + 304, + 426, + 389 + ], + "blocks": [ + { + "bbox": [ + 307, + 304, + 426, + 389 + ], + "lines": [ + { + "bbox": [ + 307, + 304, + 426, + 389 + ], + "spans": [ + { + "bbox": [ + 307, + 304, + 426, + 389 + ], + "type": "image", + "image_path": "2e8c07acd4f3533dd988334195875e5b8b0914a829e433cb67d2c0d162911607.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 315, + 391, + 418, + 400 + ], + "lines": [ + { + "bbox": [ + 315, + 391, + 418, + 400 + ], + "spans": [ + { + "bbox": [ + 315, + 391, + 418, + 400 + ], + "type": "text", + "content": "(c) Intermediate representations" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 427, + 304, + 545, + 389 + ], + "blocks": [ + { + "bbox": [ + 427, + 304, + 545, + 389 + ], + "lines": [ + { + "bbox": [ + 427, + 304, + 545, + 389 + ], + "spans": [ + { + "bbox": [ + 427, + 304, + 545, + 389 + ], + "type": "image", + "image_path": "870d50ba2271e434f8e85ba6f034fb1178463bcbcb085e72560087d90b8584c5.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 444, + 391, + 527, + 400 + ], + "lines": [ + { + "bbox": [ + 444, + 391, + 527, + 400 + ], + "spans": [ + { + "bbox": [ + 444, + 391, + 527, + 400 + ], + "type": "text", + "content": "(d) Internal discretization" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 304, + 405, + 547, + 503 + ], + "lines": [ + { + "bbox": [ + 304, + 405, + 547, + 503 + ], + "spans": [ + { + "bbox": [ + 304, + 405, + 547, + 503 + ], + "type": "text", + "content": "Figure 1. We propose iDisc which implicitly enforces an internal discretization of the scene via a continuous-discrete-continuous bottleneck. Supervision is applied to the output depth only, i.e., the fused intermediate representations in (c), while the internal discrete representations are implicitly learned by the model. (d) displays some actual internal discretization patterns captured from the input, e.g., foreground, object relationships, and 3D planes. Our iDisc model is able to predict high-quality depth maps by capturing scene interactions and structure." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 518, + 547, + 636 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 518, + 547, + 636 + ], + "spans": [ + { + "bbox": [ + 304, + 518, + 547, + 636 + ], + "type": "text", + "content": "State-of-the-art (SotA) methods typically involve convolutional networks [12, 13, 24] or, since the advent of vision Transformer [11], transformer architectures [3, 41, 54, 59]. Most methods either impose geometric constraints on the image [22, 33, 38, 55], namely, planarity priors or explicitly discretize the continuous depth range [3, 4, 13]. The latter can be viewed as learning frontoparallel planes. These imposed priors inherently limit the expressiveness of the respective models, as they cannot model arbitrary depth patterns, ubiquitous in real-world scenes." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 642, + 548, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 642, + 548, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 642, + 548, + 715 + ], + "type": "text", + "content": "We instead propose a more general depth estimation model, called iDisc, which does not explicitly impose any constraint on the final prediction. We design an Internal Discretization (ID) of the scene which is in principle depth-agnostic. Our assumption behind this ID is that each scene can be implicitly described by a set of concepts or patterns," + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "21477" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 288, + 108 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 288, + 108 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 288, + 108 + ], + "type": "text", + "content": "such as objects, planes, edges, and perspectivity relationships. The specific training signal determines which patterns to learn (see Fig. 1)." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 108, + 288, + 347 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 108, + 288, + 347 + ], + "spans": [ + { + "bbox": [ + 46, + 108, + 288, + 347 + ], + "type": "text", + "content": "We design a continuous-to-discrete bottleneck through which the information is passed in order to obtain such internal scene discretization, namely the underlying patterns. In the bottleneck, the scene feature space is partitioned via learnable and input-dependent quantizers, which in turn transfer the information onto the continuous output space. The ID bottleneck introduced in this work is a general concept and can be implemented in several ways. Our particular ID implementation employs attention-based operators, leading to an end-to-end trainable architecture and input-dependent framework. More specifically, we implement the continuous-to-discrete operation via \"transposed\" cross-attention, where transposed refers to applying softmax on the output dimension. This softmax formulation enforces the input features to be routed to the internal discrete representations (IDRs) in an exclusive fashion, thus defining an input-dependent soft clustering of the feature space. The discrete-to-continuous transformation is implemented via cross-attention. Supervision is only applied to the final output, without any assumptions or regularization on the IDRs." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 348, + 288, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 348, + 288, + 491 + ], + "spans": [ + { + "bbox": [ + 46, + 348, + 288, + 491 + ], + "type": "text", + "content": "We test iDisc on multiple indoor and outdoor datasets and probe its robustness via zero-shot testing. As of today, there is too little variety in MDE benchmarks for the outdoor scenario, since the only established benchmark is KITTI [17]. Moreover, we observe that all methods fail on outdoor zero-shot testing, suggesting that the KITTI dataset is not diverse enough and leads to overfitting, thus implying that it is not indicative of generalized performance. Hence, we find it compelling to establish a new benchmark setup for the MDE community by proposing two new train-test splits of more diverse and challenging high-quality outdoor datasets: Argoverse1.1 [8] and DDAD [18]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 491, + 288, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 491, + 288, + 635 + ], + "spans": [ + { + "bbox": [ + 46, + 491, + 288, + 635 + ], + "type": "text", + "content": "Our main contributions are as follows: (i) we introduce the Internal Discretization module, a novel architectural component that adeptly represents a scene by combining underlying patterns; (ii) we show that it is a generalization of SotA methods involving depth ordinal regression [3, 13]; (iii) we propose splits of two raw outdoor datasets [8, 18] with high-quality LiDAR measurements. We extensively test iDisc on six diverse datasets and, owing to the ID design, our model consistently outperforms SotA methods and presents better transferability. Moreover, we apply iDisc to surface normal estimation showing that the proposed module is general enough to tackle generic real-valued dense prediction tasks." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 645, + 134, + 658 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 645, + 134, + 658 + ], + "spans": [ + { + "bbox": [ + 47, + 645, + 134, + 658 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 665, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 665, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 665, + 288, + 713 + ], + "type": "text", + "content": "The supervised setting of MDE assumes that pixel-wise depth annotations are available at training time and depth inference is performed on single images. The coarse-to-fine network introduced in Eigen et al. [12] is the cor" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 72, + 547, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 547, + 192 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 547, + 192 + ], + "type": "text", + "content": "nerstone in MDE with end-to-end neural networks. The work established the optimization process via the Scale-Invariant log loss " + }, + { + "bbox": [ + 304, + 72, + 547, + 192 + ], + "type": "inline_equation", + "content": "(\\mathrm{SI}_{\\log})" + }, + { + "bbox": [ + 304, + 72, + 547, + 192 + ], + "type": "text", + "content": ". Since then, the three main directions evolve: new architectures, such as residual networks [23], neural fields [30, 52], multi-scale fusion [25, 35], transformers [3, 54, 59]; improved optimization schemes, such as reverse-Huber loss [23], classification [6], or ordinal regression [3, 13]; multi-task learning to leverage ancillary information from the related task, such as surface normals estimation or semantic segmentation [12, 39, 51]." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 194, + 547, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 194, + 547, + 373 + ], + "spans": [ + { + "bbox": [ + 304, + 194, + 547, + 373 + ], + "type": "text", + "content": "Geometric priors have been widely utilized in the literature, particularly the piecewise planarity prior [5, 9, 14], serving as a proper real-world approximation. The geometric priors are usually incorporated by explicitly treating the image as a set of planes [26, 28, 29, 58], using a plane-inducing loss [57], forcing pixels to attend to the planar representation of other pixels [24, 38], or imposing consistency with other tasks' output [2, 33, 55], like surface normals. Priors can focus on a more holistic scene representation by dividing the whole scene into 3D planes without dependence on intrinsic camera parameters [53, 60], aiming at partitioning the scene into dominant depth planes. In contrast to geometric prior-based works, our method lifts any explicit geometric constraints on the scene. Instead, iDisc implicitly enforces the representation of scenes as a set of high-level patterns." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 375, + 547, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 375, + 547, + 518 + ], + "spans": [ + { + "bbox": [ + 304, + 375, + 547, + 518 + ], + "type": "text", + "content": "Ordinal regression methods [3,4, 13] have proven to be a promising alternative to other geometry-driven approaches. The difference with classification models is that class \"values\" are learnable and are real numbers, thus the problem falls into the regression category. The typical SotA rationale is to explicitly discretize the continuous output depth range, rendering the approach similar to mask-based segmentation. Each of the scalar depth values is associated with a confidence mask which describes the probability of each pixel presenting such a depth value. Hence, SotA methods inherently assume that depth can be represented as a set of frontoparallel planes, that is, depth \"masks\"." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 520, + 547, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 520, + 547, + 675 + ], + "spans": [ + { + "bbox": [ + 304, + 520, + 547, + 675 + ], + "type": "text", + "content": "The main paradigm of ordinal regression methods is to first obtain hidden representations and scalar values of discrete depth values. The dot-product similarity between the feature maps and the depth representations is treated as logits and softmax is applied to extract confidence masks (in Fu et al. [13] this degenerates to argmax). Finally, the final prediction is defined as the per-pixel weighted average of the discrete depth values, with the confidence values serving as the weights. iDisc draws connections with the idea of depth discretization. However, our ID module is designed to be depth-agnostic. The discretization occurs at the abstract level of internal features from the ID bottleneck instead of the output depth level, unlike other methods." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 677, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 547, + 713 + ], + "type": "text", + "content": "Iterative routing is related to our \"transposed\" crossattention. The first approach of this kind was Capsule Networks and their variants [20, 42]. Some formulations [32, 46]" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "21478" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 47, + 68, + 547, + 250 + ], + "blocks": [ + { + "bbox": [ + 47, + 68, + 547, + 250 + ], + "lines": [ + { + "bbox": [ + 47, + 68, + 547, + 250 + ], + "spans": [ + { + "bbox": [ + 47, + 68, + 547, + 250 + ], + "type": "image", + "image_path": "7b815d251a046cb5082a7bd385affd5a0d6db5a9092c6b80fa9ff6953390a6f6.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 254, + 548, + 344 + ], + "lines": [ + { + "bbox": [ + 45, + 254, + 548, + 344 + ], + "spans": [ + { + "bbox": [ + 45, + 254, + 548, + 344 + ], + "type": "text", + "content": "Figure 2. Model Architecture. The Internal Discretization Module imposes an information bottleneck via two consecutive stages: continuous-to-discrete (C2D) and discrete-to-continuous (D2C). The module processes multiple resolutions, i.e., " + }, + { + "bbox": [ + 45, + 254, + 548, + 344 + ], + "type": "inline_equation", + "content": "l \\in \\{1, 2, 3\\}" + }, + { + "bbox": [ + 45, + 254, + 548, + 344 + ], + "type": "text", + "content": ", independently in parallel. The bottleneck embodies our assumption that a scene can be represented as a set of patterns. The C2D stage aggregates information, given a learnable prior (" + }, + { + "bbox": [ + 45, + 254, + 548, + 344 + ], + "type": "inline_equation", + "content": "\\mathbf{H}_{\\text{prior}}^l" + }, + { + "bbox": [ + 45, + 254, + 548, + 344 + ], + "type": "text", + "content": "), from the " + }, + { + "bbox": [ + 45, + 254, + 548, + 344 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 45, + 254, + 548, + 344 + ], + "type": "text", + "content": "-th resolution feature maps (" + }, + { + "bbox": [ + 45, + 254, + 548, + 344 + ], + "type": "inline_equation", + "content": "\\mathbf{F}^l" + }, + { + "bbox": [ + 45, + 254, + 548, + 344 + ], + "type": "text", + "content": ") to a finite set of IDRs (" + }, + { + "bbox": [ + 45, + 254, + 548, + 344 + ], + "type": "inline_equation", + "content": "\\mathbf{H}^l" + }, + { + "bbox": [ + 45, + 254, + 548, + 344 + ], + "type": "text", + "content": "). In particular, it learns how to define a partition function that is dependent on the input " + }, + { + "bbox": [ + 45, + 254, + 548, + 344 + ], + "type": "inline_equation", + "content": "\\mathbf{F}^l" + }, + { + "bbox": [ + 45, + 254, + 548, + 344 + ], + "type": "text", + "content": " via transposed cross-attention, as in (1). The second stage (D2C) transfers the IDRs on the original continuous space using layers of cross-attention as in (2), for sake of simplicity, we depict only a generic " + }, + { + "bbox": [ + 45, + 254, + 548, + 344 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 45, + 254, + 548, + 344 + ], + "type": "text", + "content": "-th layer. Cross-attention is guided by the similarity between decoded pixel embeddings (" + }, + { + "bbox": [ + 45, + 254, + 548, + 344 + ], + "type": "inline_equation", + "content": "\\mathbf{P}^l" + }, + { + "bbox": [ + 45, + 254, + 548, + 344 + ], + "type": "text", + "content": ") and " + }, + { + "bbox": [ + 45, + 254, + 548, + 344 + ], + "type": "inline_equation", + "content": "\\mathbf{H}^l" + }, + { + "bbox": [ + 45, + 254, + 548, + 344 + ], + "type": "text", + "content": ". The final prediction (" + }, + { + "bbox": [ + 45, + 254, + 548, + 344 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{D}}" + }, + { + "bbox": [ + 45, + 254, + 548, + 344 + ], + "type": "text", + "content": ") is the fusion, i.e., mean, of the intermediate representations " + }, + { + "bbox": [ + 45, + 254, + 548, + 344 + ], + "type": "inline_equation", + "content": "\\{\\hat{\\mathbf{D}}^l\\}_{l=1}^3" + }, + { + "bbox": [ + 45, + 254, + 548, + 344 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 350, + 290, + 446 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 350, + 290, + 446 + ], + "spans": [ + { + "bbox": [ + 46, + 350, + 290, + 446 + ], + "type": "text", + "content": "employ different kinds of attention mechanisms. Our attention mechanism draws connections with [32]. However, we do not allow permutation invariance, since our assumption is that each discrete representation internally describes a particular kind of pattern. In addition, we do not introduce any other architectural components such as gated recurrent units (GRU). In contrast to other methods, our attention is employed at a higher abstraction level, namely in the decoder." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 459, + 104, + 471 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 459, + 104, + 471 + ], + "spans": [ + { + "bbox": [ + 47, + 459, + 104, + 471 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 479, + 289, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 479, + 289, + 635 + ], + "spans": [ + { + "bbox": [ + 46, + 479, + 289, + 635 + ], + "type": "text", + "content": "We propose an Internal Discretization (ID) module, to discretize the internal feature representation of encoder-decoder network architectures. We hypothesize that the module can break down the scenes into coherent concepts without semantic supervision. This section will first describe the module design and then discuss the network architecture. Sec. 3.1.1 defines the formulation of \"transposed\" cross-attention outlined in Sec. 1 and describes the main difference with previous formulations from Sec. 2. Moreover, we derive in Sec. 3.1.2 how the iDisc formulation can be interpreted as a generalization of SotA ordinal regression methods by reframing their original formulation. Eventually, Sec. 3.2 presents the optimization problem and the overall architecture." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 646, + 216, + 658 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 646, + 216, + 658 + ], + "spans": [ + { + "bbox": [ + 47, + 646, + 216, + 658 + ], + "type": "text", + "content": "3.1. Internal Discretization Module" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "text", + "content": "The ID module involves a continuous-discrete-continuous bottleneck composed of two main consecutive stages. The overall module is based on our hypothesis that scenes can be represented as a finite set of patterns. The first stage" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 350, + 548, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 350, + 548, + 529 + ], + "spans": [ + { + "bbox": [ + 304, + 350, + 548, + 529 + ], + "type": "text", + "content": "consists in a continuous-to-discrete component, namely soft-exclusive discretization of the feature space. More specifically, it enforces an input-dependent soft clustering on the feature maps in an image-to-set fashion. The second stage completes the internal scene discretization by mapping the learned IDRs onto the continuous output space. IDRs are not bounded to focus exclusively on depth planes but are allowed to represent any high-level pattern or concept, such as objects, relative locations, and planes in the 3D space. In contrast with SotA ordinal regression methods [3,4,13], the IDRs are neither explicitly tied to depth values nor directly tied to the output. Moreover, our module operates at multiple intermediate resolutions and merges them only in the last layer. The overall architecture of iDisc, particularly our ID module, is shown in Fig. 2." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 549, + 468, + 562 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 549, + 468, + 562 + ], + "spans": [ + { + "bbox": [ + 306, + 549, + 468, + 562 + ], + "type": "text", + "content": "3.1.1 Adaptive Feature Partitioning" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "text", + "content": "The first stage of our ID module, Adaptive Feature Partitioning (AFP), generates proper discrete representations " + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "inline_equation", + "content": "(\\mathcal{H} \\coloneqq \\{\\mathbf{H}^l\\}_{l=1}^3)" + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "text", + "content": " that quantize the feature space " + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "inline_equation", + "content": "(\\mathcal{F} \\coloneqq \\{\\mathbf{F}^l\\}_{l=1}^3)" + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "text", + "content": " at each resolution " + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "text", + "content": ". We drop the resolution superscript " + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "text", + "content": " since resolutions are independently processed and only one generic resolution is treated here. iDisc does not simply learn fixed centroids, as in standard clustering, but rather learns how to define a partition function in an input-dependent fashion. More specifically, an iterative transposed cross-attention module is utilized. Given the specific input feature maps " + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "inline_equation", + "content": "(\\mathbf{F})" + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "text", + "content": ", the iteration process refines (learnable) IDR priors " + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "inline_equation", + "content": "(\\mathbf{H}_{\\mathrm{prior}})" + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "text", + "content": " over " + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "text", + "content": " iterations." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "21479" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 169 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 169 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 169 + ], + "type": "text", + "content": "More specifically, the term \"transposed\" refers to the different axis along which the softmax operation is applied, namely " + }, + { + "bbox": [ + 46, + 72, + 289, + 169 + ], + "type": "inline_equation", + "content": "\\left[\\mathrm{softmax}(\\mathbf{KQ}^T)\\right]^T\\mathbf{V}" + }, + { + "bbox": [ + 46, + 72, + 289, + 169 + ], + "type": "text", + "content": " instead of the canonical dot-product attention softmax(QK)V, with Q,K,V as query, key and value tensors, respectively. In particular, the tensors are obtained as projections of feature maps and IDR priors, " + }, + { + "bbox": [ + 46, + 72, + 289, + 169 + ], + "type": "inline_equation", + "content": "f_{\\mathbf{Q}}(\\mathbf{H}_{\\mathrm{prior}}),f_{\\mathbf{K}}(\\mathbf{F}),f_{\\mathbf{V}}(\\mathbf{F})" + }, + { + "bbox": [ + 46, + 72, + 289, + 169 + ], + "type": "text", + "content": " . The " + }, + { + "bbox": [ + 46, + 72, + 289, + 169 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 46, + 72, + 289, + 169 + ], + "type": "text", + "content": " -th iteration out of " + }, + { + "bbox": [ + 46, + 72, + 289, + 169 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 46, + 72, + 289, + 169 + ], + "type": "text", + "content": " can be formulated as follows:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 71, + 173, + 288, + 205 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 173, + 288, + 205 + ], + "spans": [ + { + "bbox": [ + 71, + 173, + 288, + 205 + ], + "type": "interline_equation", + "content": "W _ {i j} ^ {t} = \\frac {\\exp \\left(\\mathbf {k} _ {i} ^ {T} \\mathbf {q} _ {j} ^ {t}\\right)}{\\sum_ {k = 1} ^ {N} \\exp \\left(\\mathbf {k} _ {i} ^ {T} \\mathbf {q} _ {k} ^ {t}\\right)}, \\mathbf {q} _ {j} ^ {t + 1} = \\sum_ {i = 1} ^ {M} W _ {i j} ^ {t} \\mathbf {v} _ {i}, \\tag {1}", + "image_path": "a8a6f983fb0d08234f997bb3954942a7553e7b6a4d7a322dd1796d99295f4824.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 210, + 288, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 210, + 288, + 270 + ], + "spans": [ + { + "bbox": [ + 46, + 210, + 288, + 270 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 210, + 288, + 270 + ], + "type": "inline_equation", + "content": "\\mathbf{q}_j, \\mathbf{k}_i, \\mathbf{v}_i \\in \\mathbb{R}^C" + }, + { + "bbox": [ + 46, + 210, + 288, + 270 + ], + "type": "text", + "content": " are query, key and value respectively, " + }, + { + "bbox": [ + 46, + 210, + 288, + 270 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 46, + 210, + 288, + 270 + ], + "type": "text", + "content": " is the number of IDRs, nameley, clusters, and " + }, + { + "bbox": [ + 46, + 210, + 288, + 270 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 46, + 210, + 288, + 270 + ], + "type": "text", + "content": " is the number of pixels. The weights " + }, + { + "bbox": [ + 46, + 210, + 288, + 270 + ], + "type": "inline_equation", + "content": "W_{ij}" + }, + { + "bbox": [ + 46, + 210, + 288, + 270 + ], + "type": "text", + "content": " may be normalized to 1 along the " + }, + { + "bbox": [ + 46, + 210, + 288, + 270 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 46, + 210, + 288, + 270 + ], + "type": "text", + "content": " dimension to avoid vanishing or exploding quantities due to the summation of un-normalized distribution." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 270, + 289, + 474 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 270, + 289, + 474 + ], + "spans": [ + { + "bbox": [ + 46, + 270, + 289, + 474 + ], + "type": "text", + "content": "The quantization stems from the inherent behavior of softmax. In particular, softmax forces competition among outputs: one output can be large only to the detriment of others. Therefore, when fixing " + }, + { + "bbox": [ + 46, + 270, + 289, + 474 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 46, + 270, + 289, + 474 + ], + "type": "text", + "content": ", namely, given a feature, only a few attention weights " + }, + { + "bbox": [ + 46, + 270, + 289, + 474 + ], + "type": "inline_equation", + "content": "(W_{ij})" + }, + { + "bbox": [ + 46, + 270, + 289, + 474 + ], + "type": "text", + "content": " may be significantly greater than zero. Hence, the content " + }, + { + "bbox": [ + 46, + 270, + 289, + 474 + ], + "type": "inline_equation", + "content": "\\mathbf{v}_i" + }, + { + "bbox": [ + 46, + 270, + 289, + 474 + ], + "type": "text", + "content": " is routed only to a few IDRs at the successive iteration. Feature maps are fixed during the process and weights are shared by design, thus " + }, + { + "bbox": [ + 46, + 270, + 289, + 474 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{k}_i, \\mathbf{v}_i\\}_{i=1}^M" + }, + { + "bbox": [ + 46, + 270, + 289, + 474 + ], + "type": "text", + "content": " are the same across iterations. The induced competition enforces a soft clustering of the input feature space, where the last-iteration IDR represents the actual partition function " + }, + { + "bbox": [ + 46, + 270, + 289, + 474 + ], + "type": "inline_equation", + "content": "(\\mathbf{H} := \\mathbf{Q}^R)" + }, + { + "bbox": [ + 46, + 270, + 289, + 474 + ], + "type": "text", + "content": ". The probabilities of belonging to one partition are the attention weights, namely " + }, + { + "bbox": [ + 46, + 270, + 289, + 474 + ], + "type": "inline_equation", + "content": "W_{ij}^R" + }, + { + "bbox": [ + 46, + 270, + 289, + 474 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 46, + 270, + 289, + 474 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 46, + 270, + 289, + 474 + ], + "type": "text", + "content": "-th query fixed. Since attention weights are inherently dependent on the input, the specific partitioning also depends on the input and takes place at inference time. The entire process of AFP leads to (soft) mutually exclusive IDRs." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 474, + 288, + 642 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 474, + 288, + 642 + ], + "spans": [ + { + "bbox": [ + 46, + 474, + 288, + 642 + ], + "type": "text", + "content": "As far as the partitioning rationale is concerned, the proposed AFP draws connections with iterative routing methods described in Sec. 2. However, important distinctions apply. First, IDRs are not randomly initialized as the \"slots\" in Locatello et al. [32] but present a learnable prior. Priors can be seen as learnable positional embeddings in the attention context, thus we do not allow a permutation-invariant set of representations. Moreover, non-adaptive partitioning can still take place via the learnable priors if the iterations are zero. Second, the overall architecture differs noticeably as described in Sec. 2, and in addition, iDisc partitions feature space at the decoder level, corresponding to more abstract, high-level concepts, while the SotA formulations focus on clustering at an abstraction level close to the input image." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 642, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 642, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 642, + 288, + 714 + ], + "type": "text", + "content": "One possible alternative approach to obtaining the aforementioned IDRs is the well-known image-to-set proposed in DETR [7], namely via classic cross-attention between representations and image feature maps. However, the corresponding representations might redundantly aggregate features, where the extreme corresponds to each output being" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 72, + 547, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 547, + 180 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 547, + 180 + ], + "type": "text", + "content": "the mean of the input. Studies [15, 44] have shown that slow convergence in transformer-based architectures may be due to the non-localized context in cross-attention. The exclusiveness of the IDRs discourages the redundancy of information in different IDRs. We argue that exclusiveness allows the utilization of fewer representations (32 against the 256 utilized in [3] and [13]), and can improve both the interpretability of what IDRs are responsible for and training convergence." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 305, + 196, + 463, + 207 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 196, + 463, + 207 + ], + "spans": [ + { + "bbox": [ + 305, + 196, + 463, + 207 + ], + "type": "text", + "content": "3.1.2 Internal Scene Discretization" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 215, + 547, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 215, + 547, + 334 + ], + "spans": [ + { + "bbox": [ + 304, + 215, + 547, + 334 + ], + "type": "text", + "content": "In the second stage of the ID module, Internal Scene Discretization (ISD), the module ingests pixel embeddings " + }, + { + "bbox": [ + 304, + 215, + 547, + 334 + ], + "type": "inline_equation", + "content": "(\\mathcal{P} := \\{\\mathbf{P}^l\\}_{l=1}^3)" + }, + { + "bbox": [ + 304, + 215, + 547, + 334 + ], + "type": "text", + "content": " from the decoder and IDRs " + }, + { + "bbox": [ + 304, + 215, + 547, + 334 + ], + "type": "inline_equation", + "content": "\\mathcal{H}" + }, + { + "bbox": [ + 304, + 215, + 547, + 334 + ], + "type": "text", + "content": " from the first stage, both at different resolutions " + }, + { + "bbox": [ + 304, + 215, + 547, + 334 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 304, + 215, + 547, + 334 + ], + "type": "text", + "content": ", as shown in Fig. 2. Each discrete representation carries both the signature, as the key, and the output-related content, as the value, of the pattern it represents. The similarity between IDRs and pixel embeddings is computed in order to spatially localize in the continuous output space where to transfer the information of each IDR. We utilize the dot-product similarity function." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 335, + 547, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 335, + 547, + 562 + ], + "spans": [ + { + "bbox": [ + 304, + 335, + 547, + 562 + ], + "type": "text", + "content": "Furthermore, the kind of information to transfer onto the final prediction is not constrained, as we never explicitly handle depth values, usually called bins, until the final output. Thus, the IDRs are completely free to carry generic high-level concepts (such as object-ness, relative positioning, and geometric structures). This approach is in stark contrast with SotA methods [3,4, 13, 27], which explicitly constrain what the representations are about: scalar depth values. Instead, iDisc learns to generate unconstrained representations in an input-dependent fashion. The effective discretization of the scene occurs in the second stage thanks to the information transfer from the set of exclusive concepts " + }, + { + "bbox": [ + 304, + 335, + 547, + 562 + ], + "type": "inline_equation", + "content": "(\\mathcal{H})" + }, + { + "bbox": [ + 304, + 335, + 547, + 562 + ], + "type": "text", + "content": " from AFP to the continuous space defined by " + }, + { + "bbox": [ + 304, + 335, + 547, + 562 + ], + "type": "inline_equation", + "content": "\\mathcal{P}" + }, + { + "bbox": [ + 304, + 335, + 547, + 562 + ], + "type": "text", + "content": ". We show that our method is not bounded to depth estimation, but can be applied to generic continuous dense tasks, for instance, surface normal estimation. Consequently, we argue that the training signal of the task at hand determines how to internally discretize the scene, rendering our ID module general and usable in settings other than depth estimation." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 563, + 547, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 563, + 547, + 635 + ], + "spans": [ + { + "bbox": [ + 304, + 563, + 547, + 635 + ], + "type": "text", + "content": "From a practical point of view, the whole second stage consists in cross-attention layers applied to IDRs and pixel embeddings. As described in Sec. 3.1.1, we drop the resolution superscript " + }, + { + "bbox": [ + 304, + 563, + 547, + 635 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 304, + 563, + 547, + 635 + ], + "type": "text", + "content": ". After that, the final depth maps are projected onto the output space and the multi-resolution depth predictions are combined. The " + }, + { + "bbox": [ + 304, + 563, + 547, + 635 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 304, + 563, + 547, + 635 + ], + "type": "text", + "content": "-th layer is defined as:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 351, + 645, + 547, + 659 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 351, + 645, + 547, + 659 + ], + "spans": [ + { + "bbox": [ + 351, + 645, + 547, + 659 + ], + "type": "interline_equation", + "content": "\\mathbf {D} _ {i + 1} = \\operatorname {s o f t m a x} \\left(\\mathbf {Q} _ {i} \\mathbf {K} _ {i} ^ {T}\\right) \\mathbf {V} _ {i} + \\mathbf {D} _ {i}, \\tag {2}", + "image_path": "11d854b1b95af8cf9bf34a8287ff8c67e794b9cdf1fbba9a3392008bc36ccb1c.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 664, + 547, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 664, + 547, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 664, + 547, + 714 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 664, + 547, + 714 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}_i = f_{Q_i}(\\mathbf{P})\\in \\mathbb{R}^{H\\times W\\times C}" + }, + { + "bbox": [ + 304, + 664, + 547, + 714 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 664, + 547, + 714 + ], + "type": "inline_equation", + "content": "\\mathbf{P}" + }, + { + "bbox": [ + 304, + 664, + 547, + 714 + ], + "type": "text", + "content": " are pixel embeddings with shape " + }, + { + "bbox": [ + 304, + 664, + 547, + 714 + ], + "type": "inline_equation", + "content": "(H,W)" + }, + { + "bbox": [ + 304, + 664, + 547, + 714 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 304, + 664, + 547, + 714 + ], + "type": "inline_equation", + "content": "\\mathbf{K}_i" + }, + { + "bbox": [ + 304, + 664, + 547, + 714 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 664, + 547, + 714 + ], + "type": "inline_equation", + "content": "\\mathbf{V}_i\\in \\mathbb{R}^{N\\times C}" + }, + { + "bbox": [ + 304, + 664, + 547, + 714 + ], + "type": "text", + "content": " are the " + }, + { + "bbox": [ + 304, + 664, + 547, + 714 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 304, + 664, + 547, + 714 + ], + "type": "text", + "content": " IDRs under linear transformations " + }, + { + "bbox": [ + 304, + 664, + 547, + 714 + ], + "type": "inline_equation", + "content": "f_{K_i}(\\mathbf{H})" + }, + { + "bbox": [ + 304, + 664, + 547, + 714 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 664, + 547, + 714 + ], + "type": "inline_equation", + "content": "f_{V_i}(\\mathbf{H})" + }, + { + "bbox": [ + 304, + 664, + 547, + 714 + ], + "type": "text", + "content": ". The term " + }, + { + "bbox": [ + 304, + 664, + 547, + 714 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}_i\\mathbf{K}_i^T" + }, + { + "bbox": [ + 304, + 664, + 547, + 714 + ], + "type": "text", + "content": " determines the spatial location for which each" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "21480" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 95 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 95 + ], + "type": "text", + "content": "specific IDR is responsible, while " + }, + { + "bbox": [ + 46, + 72, + 287, + 95 + ], + "type": "inline_equation", + "content": "\\mathbf{V}_i" + }, + { + "bbox": [ + 46, + 72, + 287, + 95 + ], + "type": "text", + "content": " carries the semantic content to be transferred in the proper spatial locations." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 96, + 289, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 96, + 289, + 204 + ], + "spans": [ + { + "bbox": [ + 46, + 96, + 289, + 204 + ], + "type": "text", + "content": "Our approach constitutes a generalization of depth estimation methods that involve (hybrid) ordinal regression. As described in Sec. 2, the common paradigm in ordinal regression methods is to explicitly discretize depth in a set of masks with a scalar depth value associated with it. Then, they predict the likelihood that each pixel belongs to such masks. Our change of paradigm stems from the reinterpretation of the mentioned ordinal regression pipeline which we translate into the following mathematical expression:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 109, + 209, + 287, + 223 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 209, + 287, + 223 + ], + "spans": [ + { + "bbox": [ + 109, + 209, + 287, + 223 + ], + "type": "interline_equation", + "content": "\\mathbf {D} = \\operatorname {s o f t m a x} \\left(\\mathbf {P R} ^ {T} / T\\right) \\mathbf {v}, \\tag {3}", + "image_path": "552df590256fac3d7b37093211b06835c71bd8bcc41fb11921aa8bf151fef58f.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 229, + 289, + 408 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 229, + 289, + 408 + ], + "spans": [ + { + "bbox": [ + 46, + 229, + 289, + 408 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 229, + 289, + 408 + ], + "type": "inline_equation", + "content": "\\mathbf{P}" + }, + { + "bbox": [ + 46, + 229, + 289, + 408 + ], + "type": "text", + "content": " are the pixel embeddings at maximum resolution and " + }, + { + "bbox": [ + 46, + 229, + 289, + 408 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 46, + 229, + 289, + 408 + ], + "type": "text", + "content": " is the softmax temperature. " + }, + { + "bbox": [ + 46, + 229, + 289, + 408 + ], + "type": "inline_equation", + "content": "\\mathbf{v} \\in \\mathbb{R}^{N \\times 1}" + }, + { + "bbox": [ + 46, + 229, + 289, + 408 + ], + "type": "text", + "content": " are " + }, + { + "bbox": [ + 46, + 229, + 289, + 408 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 46, + 229, + 289, + 408 + ], + "type": "text", + "content": " depth scalar values and " + }, + { + "bbox": [ + 46, + 229, + 289, + 408 + ], + "type": "inline_equation", + "content": "\\mathbf{R} \\in \\mathbb{R}^{N \\times (C - 1)}" + }, + { + "bbox": [ + 46, + 229, + 289, + 408 + ], + "type": "text", + "content": " are their hidden representations, both processed as a unique stacked tensor " + }, + { + "bbox": [ + 46, + 229, + 289, + 408 + ], + "type": "inline_equation", + "content": "(\\mathbf{R}||\\mathbf{v} \\in \\mathbb{R}^{N \\times C})" + }, + { + "bbox": [ + 46, + 229, + 289, + 408 + ], + "type": "text", + "content": ". From the reformulation in (3), one can observe that (3) is a degenerate case of (2). In particular, " + }, + { + "bbox": [ + 46, + 229, + 289, + 408 + ], + "type": "inline_equation", + "content": "f_{Q}" + }, + { + "bbox": [ + 46, + 229, + 289, + 408 + ], + "type": "text", + "content": " degenerates to the identity function. " + }, + { + "bbox": [ + 46, + 229, + 289, + 408 + ], + "type": "inline_equation", + "content": "f_{K}" + }, + { + "bbox": [ + 46, + 229, + 289, + 408 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 229, + 289, + 408 + ], + "type": "inline_equation", + "content": "f_{V}" + }, + { + "bbox": [ + 46, + 229, + 289, + 408 + ], + "type": "text", + "content": " degenerate to selector functions: the former function selects up to the " + }, + { + "bbox": [ + 46, + 229, + 289, + 408 + ], + "type": "inline_equation", + "content": "C - 1" + }, + { + "bbox": [ + 46, + 229, + 289, + 408 + ], + "type": "text", + "content": " dimensions and the latter selects the last dimension only. Moreover, the hidden representations are refined pixel embeddings " + }, + { + "bbox": [ + 46, + 229, + 289, + 408 + ], + "type": "inline_equation", + "content": "(f(\\mathbf{P}_i) = \\mathbf{H}_i = \\mathbf{R}||\\mathbf{v})" + }, + { + "bbox": [ + 46, + 229, + 289, + 408 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 46, + 229, + 289, + 408 + ], + "type": "inline_equation", + "content": "\\mathbf{D}" + }, + { + "bbox": [ + 46, + 229, + 289, + 408 + ], + "type": "text", + "content": " in (3) is the final output, namely no multiple iterations are performed as in (2). The explicit entanglement between the semantic content of the hidden representations and the final output is due to hard-coding " + }, + { + "bbox": [ + 46, + 229, + 289, + 408 + ], + "type": "inline_equation", + "content": "\\mathbf{v}" + }, + { + "bbox": [ + 46, + 229, + 289, + 408 + ], + "type": "text", + "content": " as depth scalar values." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 415, + 173, + 426 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 415, + 173, + 426 + ], + "spans": [ + { + "bbox": [ + 47, + 415, + 173, + 426 + ], + "type": "text", + "content": "3.2. Network Architecture" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 433, + 289, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 433, + 289, + 613 + ], + "spans": [ + { + "bbox": [ + 46, + 433, + 289, + 613 + ], + "type": "text", + "content": "Our network described in Fig. 2 comprises first an encoder backbone, interchangeably convolutional or attention-based, producing features at different scales. The encoded features at different resolutions are refined, and information between resolutions is shared, both via four multi-scale deformable attention (MSDA) blocks [63]. The feature maps from MSDA at different scales are fed into the AFP module to extract IDRs " + }, + { + "bbox": [ + 46, + 433, + 289, + 613 + ], + "type": "inline_equation", + "content": "(\\mathcal{H})" + }, + { + "bbox": [ + 46, + 433, + 289, + 613 + ], + "type": "text", + "content": ", and into the decoder to extract pixel embeddings in the continuous space " + }, + { + "bbox": [ + 46, + 433, + 289, + 613 + ], + "type": "inline_equation", + "content": "(\\mathcal{P})" + }, + { + "bbox": [ + 46, + 433, + 289, + 613 + ], + "type": "text", + "content": ". Pixel embeddings at different resolutions are combined with the respective IDRs in the ISD stage of the ID module to extract the depth maps. The final depth prediction corresponds to the mean of the interpolated intermediate representations. The optimization process is guided only by the established " + }, + { + "bbox": [ + 46, + 433, + 289, + 613 + ], + "type": "inline_equation", + "content": "\\mathrm{SI}_{\\log}" + }, + { + "bbox": [ + 46, + 433, + 289, + 613 + ], + "type": "text", + "content": " loss defined in [12], and no other regularization is exploited. " + }, + { + "bbox": [ + 46, + 433, + 289, + 613 + ], + "type": "inline_equation", + "content": "\\mathrm{SI}_{\\log}" + }, + { + "bbox": [ + 46, + 433, + 289, + 613 + ], + "type": "text", + "content": " is defined as:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 619, + 287, + 638 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 619, + 287, + 638 + ], + "spans": [ + { + "bbox": [ + 104, + 619, + 287, + 638 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {S I} _ {\\log}} (\\epsilon) = \\alpha \\sqrt {\\mathbb {V} [ \\epsilon ] + \\lambda \\mathbb {E} ^ {2} [ \\epsilon ]} \\tag {4}", + "image_path": "bc977e31fb1af8d8f34a64df58b2ae5931dcb61aca713ff5efd03339ba1e868d.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 635, + 216, + 648 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 635, + 216, + 648 + ], + "spans": [ + { + "bbox": [ + 105, + 635, + 216, + 648 + ], + "type": "interline_equation", + "content": "\\text {w i t h} \\epsilon = \\log (\\hat {y}) - \\log (y ^ {*}),", + "image_path": "ff4d7fb536b9ab12b250d01ece82b77e47437c9410b90d29c04ed4dc7c09773e.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 653, + 289, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 653, + 289, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 653, + 289, + 714 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 653, + 289, + 714 + ], + "type": "inline_equation", + "content": "\\hat{y}" + }, + { + "bbox": [ + 46, + 653, + 289, + 714 + ], + "type": "text", + "content": " is the predicted depth and " + }, + { + "bbox": [ + 46, + 653, + 289, + 714 + ], + "type": "inline_equation", + "content": "y^{*}" + }, + { + "bbox": [ + 46, + 653, + 289, + 714 + ], + "type": "text", + "content": " is the ground-truth (GT) value. " + }, + { + "bbox": [ + 46, + 653, + 289, + 714 + ], + "type": "inline_equation", + "content": "\\mathbb{V}[\\epsilon ]" + }, + { + "bbox": [ + 46, + 653, + 289, + 714 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 653, + 289, + 714 + ], + "type": "inline_equation", + "content": "\\mathbb{E}[\\epsilon ]" + }, + { + "bbox": [ + 46, + 653, + 289, + 714 + ], + "type": "text", + "content": " are computed as the empirical variance and expected value over all pixels, namely, " + }, + { + "bbox": [ + 46, + 653, + 289, + 714 + ], + "type": "inline_equation", + "content": "\\{\\epsilon_i\\}_{i = 1}^N" + }, + { + "bbox": [ + 46, + 653, + 289, + 714 + ], + "type": "inline_equation", + "content": "\\mathbb{V}[\\epsilon ]" + }, + { + "bbox": [ + 46, + 653, + 289, + 714 + ], + "type": "text", + "content": " is the purely scale-invariant loss, while " + }, + { + "bbox": [ + 46, + 653, + 289, + 714 + ], + "type": "inline_equation", + "content": "\\mathbb{E}^2 [\\epsilon ]" + }, + { + "bbox": [ + 46, + 653, + 289, + 714 + ], + "type": "text", + "content": " fosters a proper scale. " + }, + { + "bbox": [ + 46, + 653, + 289, + 714 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 46, + 653, + 289, + 714 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 653, + 289, + 714 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 46, + 653, + 289, + 714 + ], + "type": "text", + "content": " are set to 10 and 0.15, as customary." + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 307, + 71, + 545, + 332 + ], + "blocks": [ + { + "bbox": [ + 307, + 71, + 545, + 332 + ], + "lines": [ + { + "bbox": [ + 307, + 71, + 545, + 332 + ], + "spans": [ + { + "bbox": [ + 307, + 71, + 545, + 332 + ], + "type": "image", + "image_path": "f17c9cbe2353e72fa9a47066bbe82a251fe44385531a47ab4a393b4c33481f4f.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 335, + 547, + 402 + ], + "lines": [ + { + "bbox": [ + 305, + 335, + 547, + 402 + ], + "spans": [ + { + "bbox": [ + 305, + 335, + 547, + 402 + ], + "type": "text", + "content": "Figure 3. Qualitative results on NYU. Each pair of consecutive rows corresponds to one test sample. Each odd row shows the input RGB image and depth predictions for the selected methods. Each even row shows GT depth and the prediction errors of the selected methods clipped at 0.5 meters. The error color map is coolwarm: blue corresponds to lower error values and red to higher values." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 414, + 388, + 427 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 414, + 388, + 427 + ], + "spans": [ + { + "bbox": [ + 306, + 414, + 388, + 427 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 434, + 422, + 447 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 434, + 422, + 447 + ], + "spans": [ + { + "bbox": [ + 306, + 434, + 422, + 447 + ], + "type": "text", + "content": "4.1. Experimental Setup" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 453, + 375, + 464 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 453, + 375, + 464 + ], + "spans": [ + { + "bbox": [ + 306, + 453, + 375, + 464 + ], + "type": "text", + "content": "4.1.1 Datasets" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 472, + 547, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 472, + 547, + 628 + ], + "spans": [ + { + "bbox": [ + 304, + 472, + 547, + 628 + ], + "type": "text", + "content": "NYU-Depth V2. NYU-Depth V2 (NYU) [36] is a dataset consisting of 464 indoor scenes with RGB images and quasi-dense depth images with " + }, + { + "bbox": [ + 304, + 472, + 547, + 628 + ], + "type": "inline_equation", + "content": "640 \\times 480" + }, + { + "bbox": [ + 304, + 472, + 547, + 628 + ], + "type": "text", + "content": " resolution. Our models are trained on the train-test split proposed by previous methods [24], corresponding to 24,231 samples for training and 654 for testing. In addition to depth, the dataset provides surface normal data utilized for normal estimation. The train split used for normal estimation is the one proposed in [55]. Zero-shot testing datasets. We evaluate the generalizability of indoor models on two indoor datasets which are not seen during training. The selected datasets are SUN-RGBD [43] and DIODE-Indoor [47]. For both datasets, the resolution is reduced to match that of NYU, which is " + }, + { + "bbox": [ + 304, + 472, + 547, + 628 + ], + "type": "inline_equation", + "content": "640 \\times 480" + }, + { + "bbox": [ + 304, + 472, + 547, + 628 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 630, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 630, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 630, + 547, + 713 + ], + "type": "text", + "content": "KITTI. The KITTI dataset provides stereo images and corresponding Velodyne LiDAR scans of outdoor scenes captured from a moving vehicle [17]. RGB and depth images have (mean) resolution of " + }, + { + "bbox": [ + 304, + 630, + 547, + 713 + ], + "type": "inline_equation", + "content": "1241 \\times 376" + }, + { + "bbox": [ + 304, + 630, + 547, + 713 + ], + "type": "text", + "content": ". The split proposed by [12] (Eigen-split) with corrected depth is utilized as training and testing set, namely, 23,158 and 652 samples. The evaluation crop corresponds to the crop defined by [16]. All methods in" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "21481" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 49, + 71, + 107, + 160 + ], + "blocks": [ + { + "bbox": [ + 49, + 71, + 107, + 160 + ], + "lines": [ + { + "bbox": [ + 49, + 71, + 107, + 160 + ], + "spans": [ + { + "bbox": [ + 49, + 71, + 107, + 160 + ], + "type": "image", + "image_path": "ff6ee306e1cb02c3fe41324f2074246954ba091393b4961e92ac9170f0646dd3.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 108, + 71, + 166, + 160 + ], + "blocks": [ + { + "bbox": [ + 108, + 71, + 166, + 160 + ], + "lines": [ + { + "bbox": [ + 108, + 71, + 166, + 160 + ], + "spans": [ + { + "bbox": [ + 108, + 71, + 166, + 160 + ], + "type": "image", + "image_path": "675d1465eb3306bd6beb3e5651aed532cfb3a7be85b64093f2b0912575370bfa.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 168, + 71, + 226, + 160 + ], + "blocks": [ + { + "bbox": [ + 168, + 71, + 226, + 160 + ], + "lines": [ + { + "bbox": [ + 168, + 71, + 226, + 160 + ], + "spans": [ + { + "bbox": [ + 168, + 71, + 226, + 160 + ], + "type": "image", + "image_path": "fb2b1b07b4ce47c510f43908dbd66678d2fe1185f1055193b1f7e3b03a45ecdf.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 227, + 71, + 286, + 160 + ], + "blocks": [ + { + "bbox": [ + 227, + 71, + 286, + 160 + ], + "lines": [ + { + "bbox": [ + 227, + 71, + 286, + 160 + ], + "spans": [ + { + "bbox": [ + 227, + 71, + 286, + 160 + ], + "type": "image", + "image_path": "f6e7cffc2765f3aa798183983e5ca1f370c86cb2488a29e108fa8c6126ce2456.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 49, + 162, + 107, + 208 + ], + "blocks": [ + { + "bbox": [ + 49, + 162, + 107, + 208 + ], + "lines": [ + { + "bbox": [ + 49, + 162, + 107, + 208 + ], + "spans": [ + { + "bbox": [ + 49, + 162, + 107, + 208 + ], + "type": "image", + "image_path": "904fc0d19d850dea0dadc74c8ace4454694f80b3d272c734cfaee9138136bc6d.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 47, + 209, + 287, + 273 + ], + "lines": [ + { + "bbox": [ + 47, + 209, + 287, + 273 + ], + "spans": [ + { + "bbox": [ + 47, + 209, + 287, + 273 + ], + "type": "text", + "content": "Figure 4. Attention maps on NYU for three different IDRs. Each row presents the attention map of a specific IDR for four test images. Each discrete representation focuses on a specific high-level concept. The first two rows pertain to IDRs at the lowest resolution while the last corresponds to the highest resolution. Best viewed on a screen and zoomed in." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 108, + 162, + 166, + 208 + ], + "blocks": [ + { + "bbox": [ + 108, + 162, + 166, + 208 + ], + "lines": [ + { + "bbox": [ + 108, + 162, + 166, + 208 + ], + "spans": [ + { + "bbox": [ + 108, + 162, + 166, + 208 + ], + "type": "image", + "image_path": "017c253393fdc1e27ae13869ab5c08dc90c5c32ab03993ec1a113d06fc076233.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 168, + 162, + 226, + 208 + ], + "blocks": [ + { + "bbox": [ + 168, + 162, + 226, + 208 + ], + "lines": [ + { + "bbox": [ + 168, + 162, + 226, + 208 + ], + "spans": [ + { + "bbox": [ + 168, + 162, + 226, + 208 + ], + "type": "image", + "image_path": "2c366c3f448c1840deb98ee2bd4e7c307ffbce404f27aa507b5515f4f1a85b32.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 227, + 162, + 286, + 208 + ], + "blocks": [ + { + "bbox": [ + 227, + 162, + 286, + 208 + ], + "lines": [ + { + "bbox": [ + 227, + 162, + 286, + 208 + ], + "spans": [ + { + "bbox": [ + 227, + 162, + 286, + 208 + ], + "type": "image", + "image_path": "fe820b1a19c9b104f02a279a1e79fe64e3f414d08c033ca8acb563ae78f3ca55.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 285, + 287, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 285, + 287, + 319 + ], + "spans": [ + { + "bbox": [ + 47, + 285, + 287, + 319 + ], + "type": "text", + "content": "Sec. 4.2 that have source code and pre-trained models available are re-evaluated on KITTI with the evaluation mask from [16] to have consistent results." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 321, + 288, + 573 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 321, + 288, + 573 + ], + "spans": [ + { + "bbox": [ + 47, + 321, + 288, + 573 + ], + "type": "text", + "content": "Argoverse1.1 and DDAD. We propose splits of two autonomous driving datasets, Argoverse1.1 (Argoverse) [8] and DDAD [18], for depth estimation. Argoverse and DDAD are both outdoor datasets that provide " + }, + { + "bbox": [ + 47, + 321, + 288, + 573 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 47, + 321, + 288, + 573 + ], + "type": "text", + "content": " HD images and the corresponding LiDAR scans from moving vehicles. We pre-process the original datasets to extract depth maps and avoid redundancy. Training set scenes are sampled when the vehicle has been displaced by at least 2 meters from the previous sample. For the testing set scenes, we increase this threshold to 50 meters to further diminish redundancy. Our Argoverse split accounts for 21,672 training samples and 476 test samples, while DDAD for 18,380 training and 860 testing samples. Samples in Argoverse are taken from the 6 cameras covering the full " + }, + { + "bbox": [ + 47, + 321, + 288, + 573 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 47, + 321, + 288, + 573 + ], + "type": "text", + "content": " panorama. For DDAD, we exclude 2 out of the 6 cameras since they have more than " + }, + { + "bbox": [ + 47, + 321, + 288, + 573 + ], + "type": "inline_equation", + "content": "30\\%" + }, + { + "bbox": [ + 47, + 321, + 288, + 573 + ], + "type": "text", + "content": " pixels occluded by the camera capture system. We crop both RGB images and depth maps to have " + }, + { + "bbox": [ + 47, + 321, + 288, + 573 + ], + "type": "inline_equation", + "content": "1920 \\times 870" + }, + { + "bbox": [ + 47, + 321, + 288, + 573 + ], + "type": "text", + "content": " resolution that is 180px and 210px cropped from the top for Argoverse and DDAD, respectively, to crop out a large portion of the sky and regions occluded by the ego-vehicle. For both datasets, we clip the maximum depth at " + }, + { + "bbox": [ + 47, + 321, + 288, + 573 + ], + "type": "inline_equation", + "content": "150\\mathrm{m}" + }, + { + "bbox": [ + 47, + 321, + 288, + 573 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 586, + 180, + 598 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 586, + 180, + 598 + ], + "spans": [ + { + "bbox": [ + 47, + 586, + 180, + 598 + ], + "type": "text", + "content": "4.1.2 Implementation Details" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 605, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 605, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 47, + 605, + 287, + 714 + ], + "type": "text", + "content": "Evaluation Details. In all experiments, we do not exploit any test-time augmentations (TTA), camera parameters, or other tricks and regularizations, in contrast to many previous methods [3, 13, 24, 38, 59]. This provides a more challenging setup, which allows us to show the effectiveness of iDisc. As depth estimation metrics, we utilize root mean square error (RMS) and its log variant " + }, + { + "bbox": [ + 47, + 605, + 287, + 714 + ], + "type": "inline_equation", + "content": "(\\mathrm{RMS}_{\\log})" + }, + { + "bbox": [ + 47, + 605, + 287, + 714 + ], + "type": "text", + "content": ", absolute error in log-scale " + }, + { + "bbox": [ + 47, + 605, + 287, + 714 + ], + "type": "inline_equation", + "content": "(\\mathrm{Log}_{10})" + }, + { + "bbox": [ + 47, + 605, + 287, + 714 + ], + "type": "text", + "content": ", absolute (A.Rel) and squared (S.rel) mean relative error, the percentage of inlier pixels " + }, + { + "bbox": [ + 47, + 605, + 287, + 714 + ], + "type": "inline_equation", + "content": "(\\delta_{i})" + }, + { + "bbox": [ + 47, + 605, + 287, + 714 + ], + "type": "text", + "content": " with" + } + ] + } + ], + "index": 12 + }, + { + "type": "table", + "bbox": [ + 309, + 148, + 545, + 308 + ], + "blocks": [ + { + "bbox": [ + 305, + 70, + 547, + 147 + ], + "lines": [ + { + "bbox": [ + 305, + 70, + 547, + 147 + ], + "spans": [ + { + "bbox": [ + 305, + 70, + 547, + 147 + ], + "type": "text", + "content": "Table 1. Comparison on NYU official test set. R101: ResNet-101 [19], D161: DenseNet-161 [21], EB5: EfficientNet-B5 [45], HR48: HRNet-48 [48], DD22: DRN-D-22 [56], ViTB: ViT-B/16+Resnet-50 [11], MViT: EfficientNet-B5-AP [50] + MiniViT, Swin{L, B, T}: Swin-{Large, Base, Tiny} [31]. " + }, + { + "bbox": [ + 305, + 70, + 547, + 147 + ], + "type": "inline_equation", + "content": "(\\dagger)" + }, + { + "bbox": [ + 305, + 70, + 547, + 147 + ], + "type": "text", + "content": ": ImageNet-22k [10] pretraining, " + }, + { + "bbox": [ + 305, + 70, + 547, + 147 + ], + "type": "inline_equation", + "content": "(\\ddagger)" + }, + { + "bbox": [ + 305, + 70, + 547, + 147 + ], + "type": "text", + "content": ": non-standard training set, " + }, + { + "bbox": [ + 305, + 70, + 547, + 147 + ], + "type": "inline_equation", + "content": "(\\ast)" + }, + { + "bbox": [ + 305, + 70, + 547, + 147 + ], + "type": "text", + "content": ": in-house dataset pretraining, " + }, + { + "bbox": [ + 305, + 70, + 547, + 147 + ], + "type": "inline_equation", + "content": "(\\S)" + }, + { + "bbox": [ + 305, + 70, + 547, + 147 + ], + "type": "text", + "content": ": re-evaluated without GT-based rescaling." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 309, + 148, + 545, + 308 + ], + "lines": [ + { + "bbox": [ + 309, + 148, + 545, + 308 + ], + "spans": [ + { + "bbox": [ + 309, + 148, + 545, + 308 + ], + "type": "table", + "html": "
MethodEncoderδ1δ2δ3RMSA.RelLog10
Higher is betterLower is better
Eigen et al. [12]-0.7690.9500.9880.6410.158-
DORN [13]R1010.8280.9650.9920.5090.1150.051
VNL [55]-0.8750.9760.9940.4160.1080.048
BTS [24]D1610.8850.9780.9940.3920.1100.047
AdaBins‡ [3]MViT0.9030.9840.9970.3640.1030.044
DAV [22]DD220.8820.9800.9960.4120.108-
Long et al. [33]HR480.8900.9820.9960.3770.1010.044
TransDepth [54]ViTB0.9000.9830.9960.3650.1060.045
DPT* [41]ViTB0.9040.9880.9980.3570.1100.045
P3Depth§ [38]R1010.8300.9710.9950.4500.1300.056
NeWCRF [59]SwinL†0.9220.9920.9980.3340.0950.041
LocalBins‡ [4]MViT0.9070.9870.9980.3570.0990.042
OursR1010.8920.9830.9950.3800.1090.046
EB50.9030.9860.9970.3690.1040.044
SwinT0.8940.9830.9960.3770.1090.045
SwinB0.9260.9890.9970.3270.0910.039
SwinL†0.9400.9930.9990.3130.0860.037
", + "image_path": "3a56e25eef3de38009bdbb69e40ba514b767ac4fa0641cd6a4a4db145aca3a5e.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "table_body" + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 323, + 547, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 323, + 547, + 491 + ], + "spans": [ + { + "bbox": [ + 304, + 323, + 547, + 491 + ], + "type": "text", + "content": "threshold " + }, + { + "bbox": [ + 304, + 323, + 547, + 491 + ], + "type": "inline_equation", + "content": "1.25^{i}" + }, + { + "bbox": [ + 304, + 323, + 547, + 491 + ], + "type": "text", + "content": ", and scale-invariant error in log-scale " + }, + { + "bbox": [ + 304, + 323, + 547, + 491 + ], + "type": "inline_equation", + "content": "(\\mathrm{SI}_{\\log})" + }, + { + "bbox": [ + 304, + 323, + 547, + 491 + ], + "type": "text", + "content": ": " + }, + { + "bbox": [ + 304, + 323, + 547, + 491 + ], + "type": "inline_equation", + "content": "100\\sqrt{\\mathrm{Var}(\\epsilon_{\\log})}" + }, + { + "bbox": [ + 304, + 323, + 547, + 491 + ], + "type": "text", + "content": ". The maximum depth for NYU and all zero-shot testing in indoor datasets, specifically SUN-RGBD and Diode Indoor, is set to " + }, + { + "bbox": [ + 304, + 323, + 547, + 491 + ], + "type": "inline_equation", + "content": "10\\mathrm{m}" + }, + { + "bbox": [ + 304, + 323, + 547, + 491 + ], + "type": "text", + "content": " while for KITTI it is set to " + }, + { + "bbox": [ + 304, + 323, + 547, + 491 + ], + "type": "inline_equation", + "content": "80\\mathrm{m}" + }, + { + "bbox": [ + 304, + 323, + 547, + 491 + ], + "type": "text", + "content": " and for Argoverse and DDAD to " + }, + { + "bbox": [ + 304, + 323, + 547, + 491 + ], + "type": "inline_equation", + "content": "150\\mathrm{m}" + }, + { + "bbox": [ + 304, + 323, + 547, + 491 + ], + "type": "text", + "content": ". Zero-shot testing is performed by evaluating a model trained on either KITTI or NYU and tested on either outdoor or indoor datasets, respectively, without additional fine-tuning. For surface normals estimation, the metrics are mean (Mean) and median (Med) absolute error, RMS angular error, and percentages of inlier pixels with thresholds at " + }, + { + "bbox": [ + 304, + 323, + 547, + 491 + ], + "type": "inline_equation", + "content": "11.5^{\\circ}" + }, + { + "bbox": [ + 304, + 323, + 547, + 491 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 323, + 547, + 491 + ], + "type": "inline_equation", + "content": "22.5^{\\circ}" + }, + { + "bbox": [ + 304, + 323, + 547, + 491 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 304, + 323, + 547, + 491 + ], + "type": "inline_equation", + "content": "30^{\\circ}" + }, + { + "bbox": [ + 304, + 323, + 547, + 491 + ], + "type": "text", + "content": ". GT-based mean depth rescaling is applied only on Diode Indoor for all methods since the dataset presents largely scale-equivariant scenes, such as plain walls with tiny details." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 491, + 547, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 491, + 547, + 635 + ], + "spans": [ + { + "bbox": [ + 304, + 491, + 547, + 635 + ], + "type": "text", + "content": "Training Details. We implement iDisc in PyTorch [37]. For training, we use the AdamW [34] optimizer " + }, + { + "bbox": [ + 304, + 491, + 547, + 635 + ], + "type": "inline_equation", + "content": "(\\beta_{1} = 0.9, \\beta_{2} = 0.999)" + }, + { + "bbox": [ + 304, + 491, + 547, + 635 + ], + "type": "text", + "content": " with an initial learning rate of 0.0002 for every experiment, and weight decay set to 0.02. As a scheduler, we exploit Cosine Annealing starting from " + }, + { + "bbox": [ + 304, + 491, + 547, + 635 + ], + "type": "inline_equation", + "content": "30\\%" + }, + { + "bbox": [ + 304, + 491, + 547, + 635 + ], + "type": "text", + "content": " of the training, with final learning rate of 0.00002. We run 45k optimization iterations with a batch size of 16. All backbones are initialized with weights from ImageNet-pretrained models. The augmentations include both geometric (random rotation and scale) and appearance (random brightness, gamma, saturation, hue shift) augmentations. The required training time amounts to 20 hours on 4 NVidia Titan RTX." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 305, + 646, + 503, + 658 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 646, + 503, + 658 + ], + "spans": [ + { + "bbox": [ + 305, + 646, + 503, + 658 + ], + "type": "text", + "content": "4.2. Comparison with the State of the Art" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 665, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 547, + 713 + ], + "type": "text", + "content": "Indoor Datasets. Results on NYU are presented in Table 1. The results show that we set the new state of the art on the benchmark, improving by more than " + }, + { + "bbox": [ + 304, + 665, + 547, + 713 + ], + "type": "inline_equation", + "content": "6\\%" + }, + { + "bbox": [ + 304, + 665, + 547, + 713 + ], + "type": "text", + "content": " on RMS and " + }, + { + "bbox": [ + 304, + 665, + 547, + 713 + ], + "type": "inline_equation", + "content": "9\\%" + }, + { + "bbox": [ + 304, + 665, + 547, + 713 + ], + "type": "text", + "content": " on A.Rel over the previous SotA. Moreover, results highlight" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "21482" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 50, + 103, + 287, + 223 + ], + "blocks": [ + { + "bbox": [ + 47, + 70, + 288, + 102 + ], + "lines": [ + { + "bbox": [ + 47, + 70, + 288, + 102 + ], + "spans": [ + { + "bbox": [ + 47, + 70, + 288, + 102 + ], + "type": "text", + "content": "Table 2. Zero-shot testing of models trained on NYU. All methods are trained on NYU and tested without further fine-tuning on the official validation set of SUN-RGBD and Diode Indoor." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 50, + 103, + 287, + 223 + ], + "lines": [ + { + "bbox": [ + 50, + 103, + 287, + 223 + ], + "spans": [ + { + "bbox": [ + 50, + 103, + 287, + 223 + ], + "type": "table", + "html": "
Test setMethodδ1↑RMS ↓A.Rel ↓SIlog ↓
SUN-RGBDBTS [24]0.7450.5020.16814.25
AdaBins [3]0.7680.4760.15513.20
P3Depth [38]0.6980.5410.17815.02
NeWCRF [59]0.7990.4290.15011.27
Ours0.8380.3870.12810.91
DiodeBTS [24]0.7050.9650.21123.78
AdaBins [3]0.7330.8720.20922.54
P3Depth [38]0.7320.8770.20222.16
NeWCRF [59]0.7990.7690.16418.69
Ours0.8100.7210.15618.11
", + "image_path": "f88fc9362f0b2803734373b82754f4c8850feac95ec480196966f6422e7add67.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 235, + 287, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 235, + 287, + 376 + ], + "spans": [ + { + "bbox": [ + 46, + 235, + 287, + 376 + ], + "type": "text", + "content": "how iDisc is more sample-efficient than other transformer-based architectures [3,4,41,54,59] since we achieve better results even when employing smaller and less heavily pretrained backbone architectures. In addition, results show a significant improvement in performance with our model instantiated with a full-convolutional backbone over other full-convolutional-based models [12, 13, 22, 24, 38]. Table 2 presents zero-shot testing of NYU models on SUN-RGBD and Diode. In both cases, iDisc exhibits a compelling generalization performance, which we argue is due to implicitly learning the underlying patterns, namely, IDRs, of indoor scene structure via the ID module." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 379, + 288, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 379, + 288, + 653 + ], + "spans": [ + { + "bbox": [ + 46, + 379, + 288, + 653 + ], + "type": "text", + "content": "Qualitative results in Fig. 3 emphasize how the method excels in capturing the overall scene complexity. In particular, iDisc correctly captures discontinuities without depth over-excitation due to chromatic edges, such as the sink in row 1, and captures the right perspectivity between foreground and background depth planes such as between the bed (row 2) or sofa (row 3) and the walls behind. In addition, the model presents a reduced error around edges, even when compared to higher-resolution models such as [3]. We argue that iDisc actually reasons at the pattern level, thus capturing better the structure of the scene. This is particularly appreciable in indoor scenes, since these are usually populated by a multitude of objects. This behavior is displayed in the attention maps of Fig. 4. Fig. 4 shows how IDRs at lower resolution capture specific components, such as the relative position of the background (row 1) and foreground objects (row 2), while IDRs at higher resolution behave as depth refiners, attending typically to high-frequency features, such as upper (row 3) or lower borders of objects. It is worth noting that an IDR attends to the image borders when the particular concept it looks for is not present in the image. That is, the borders are the last resort in which the IDR tries to find its corresponding pattern (e.g., row 2, col. 1)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 654, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 654, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 654, + 288, + 714 + ], + "type": "text", + "content": "Outdoor Datasets. Results on KITTI in Table 3 demonstrate that iDisc sets the new SotA for this primary outdoor dataset, improving by more than " + }, + { + "bbox": [ + 46, + 654, + 288, + 714 + ], + "type": "inline_equation", + "content": "3\\%" + }, + { + "bbox": [ + 46, + 654, + 288, + 714 + ], + "type": "text", + "content": " in RMS and by " + }, + { + "bbox": [ + 46, + 654, + 288, + 714 + ], + "type": "inline_equation", + "content": "0.9\\%" + }, + { + "bbox": [ + 46, + 654, + 288, + 714 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 46, + 654, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\delta_{0.5}" + }, + { + "bbox": [ + 46, + 654, + 288, + 714 + ], + "type": "text", + "content": " over the previous SotA. However, KITTI results present saturated metrics. For instance, " + }, + { + "bbox": [ + 46, + 654, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\delta_{3}" + }, + { + "bbox": [ + 46, + 654, + 288, + 714 + ], + "type": "text", + "content": " is not reported since ev" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 70, + 547, + 158 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 70, + 547, + 158 + ], + "spans": [ + { + "bbox": [ + 304, + 70, + 547, + 158 + ], + "type": "text", + "content": "Table 3. Comparison on KITTI Eigen-split test set. Models without " + }, + { + "bbox": [ + 304, + 70, + 547, + 158 + ], + "type": "inline_equation", + "content": "\\delta_{0.5}" + }, + { + "bbox": [ + 304, + 70, + 547, + 158 + ], + "type": "text", + "content": " have implementation (partially) unavailable. R101: ResNet-101 [19], D161: DenseNet-161 [21], EB5: EfficientNet-B5 [45], ViTB: ViT-B/16+Resnet-50 [11], MViT: EfficientNet-B5-AP [50] + MiniViT, Swin{L, B, T}: Swin-{Large, Base, Tiny} [31]. (" + }, + { + "bbox": [ + 304, + 70, + 547, + 158 + ], + "type": "inline_equation", + "content": "\\dagger" + }, + { + "bbox": [ + 304, + 70, + 547, + 158 + ], + "type": "text", + "content": "): ImageNet-22k [10] pretraining, (" + }, + { + "bbox": [ + 304, + 70, + 547, + 158 + ], + "type": "inline_equation", + "content": "\\ddagger" + }, + { + "bbox": [ + 304, + 70, + 547, + 158 + ], + "type": "text", + "content": "): non-standard training set, (*): in-house dataset pretraining, (" + }, + { + "bbox": [ + 304, + 70, + 547, + 158 + ], + "type": "inline_equation", + "content": "\\S" + }, + { + "bbox": [ + 304, + 70, + 547, + 158 + ], + "type": "text", + "content": "): re-evaluated without GT-based rescaling." + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 308, + 158, + 547, + 274 + ], + "blocks": [ + { + "bbox": [ + 308, + 158, + 547, + 274 + ], + "lines": [ + { + "bbox": [ + 308, + 158, + 547, + 274 + ], + "spans": [ + { + "bbox": [ + 308, + 158, + 547, + 274 + ], + "type": "table", + "html": "
MethodEncoderδ0.5δ1δ2RMSRMSlogA.RelS.Rel
Higher is betterLower is better
Eigenet al. [12]--0.6920.8997.1560.2700.1901.515
DORN [13]R101-0.9320.9842.7270.1200.0720.307
BTS [24]D1610.8700.9640.9952.4590.0900.0570.199
AdaBins$ [3]MVIT0.8680.9640.9952.3600.0880.0580.199
TransDepth [54]ViTB-0.9560.9942.7550.0980.0640.252
DPT* [41]ViTB0.8650.9650.9962.3150.0880.0590.190
P3Depth$ [38]R1010.8520.9590.9942.5190.0950.0600.206
NeWCRF [59]SwinL†0.8870.9740.9972.1290.0790.0520.155
OursR1010.8600.9650.9962.3620.0900.0590.197
EB50.8520.9630.9942.5100.0940.0630.223
SwinT0.8700.9680.9962.2910.0870.0580.184
SwinB0.8850.9740.9972.1490.0810.0540.159
SwinL†0.8960.9770.9972.0670.0770.0500.145
", + "image_path": "a40dd424b450428ebab2f5663dd3d48490f74d1aad73d98322f53901131fac1b.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 308, + 318, + 547, + 403 + ], + "blocks": [ + { + "bbox": [ + 304, + 285, + 547, + 317 + ], + "lines": [ + { + "bbox": [ + 304, + 285, + 547, + 317 + ], + "spans": [ + { + "bbox": [ + 304, + 285, + 547, + 317 + ], + "type": "text", + "content": "Table 4. Comparison on Argoverse and DDAD proposed splits. Comparison of performance of methods trained on either Argoverse or DDAD and tested on the same dataset." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 308, + 318, + 547, + 403 + ], + "lines": [ + { + "bbox": [ + 308, + 318, + 547, + 403 + ], + "spans": [ + { + "bbox": [ + 308, + 318, + 547, + 403 + ], + "type": "table", + "html": "
DatasetMethodδ1Higher is betterRMSRMSlogLower is better
ArgoverseBTS [24]0.7800.9080.9548.3190.2670.1862.56
AdaBins [3]0.7500.9010.9528.6860.2780.1952.36
NeWCRF [59]0.7070.8710.9399.4370.3210.2323.23
Ours0.8210.9230.9607.5670.2430.1632.22
DDADBTS [24]0.7570.9130.96210.110.2510.1862.27
AdaBins [3]0.7480.9120.96210.240.2550.2012.30
NeWCRF [59]0.7020.8810.95110.980.2710.2192.83
Ours0.8090.9340.9718.9890.2210.1631.85
", + "image_path": "531494df91ae6dac1f9b36e16d81d4127311ea43d8a1d62f6d6b512d8fa259a3.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 415, + 547, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 415, + 547, + 486 + ], + "spans": [ + { + "bbox": [ + 304, + 415, + 547, + 486 + ], + "type": "text", + "content": "ery method scores " + }, + { + "bbox": [ + 304, + 415, + 547, + 486 + ], + "type": "inline_equation", + "content": ">0.99" + }, + { + "bbox": [ + 304, + 415, + 547, + 486 + ], + "type": "text", + "content": ", with recent ones scoring 0.999. Therefore, we propose to utilize the metric " + }, + { + "bbox": [ + 304, + 415, + 547, + 486 + ], + "type": "inline_equation", + "content": "\\delta_{0.5}" + }, + { + "bbox": [ + 304, + 415, + 547, + 486 + ], + "type": "text", + "content": ", to better convey meaningful evaluation information. In addition, iDisc performs remarkably well on the highly competitive official KITTI benchmark, ranking " + }, + { + "bbox": [ + 304, + 415, + 547, + 486 + ], + "type": "inline_equation", + "content": "3^{\\mathrm{rd}}" + }, + { + "bbox": [ + 304, + 415, + 547, + 486 + ], + "type": "text", + "content": " among all methods and " + }, + { + "bbox": [ + 304, + 415, + 547, + 486 + ], + "type": "inline_equation", + "content": "1^{\\mathrm{st}}" + }, + { + "bbox": [ + 304, + 415, + 547, + 486 + ], + "type": "text", + "content": " among all published MDE methods." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 486, + 548, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 486, + 548, + 665 + ], + "spans": [ + { + "bbox": [ + 304, + 486, + 548, + 665 + ], + "type": "text", + "content": "Moreover, Table 4 shows the results of methods trained and evaluated on the splits from Argoverse and DDAD proposed in this work. All methods have been trained with the same architecture and pipeline utilized for training on KITTI. We argue that the high degree of sparseness in GT of the two proposed datasets, in contrast to KITTI, deeply affects windowed methods such as [3, 59]. Qualitative results in Fig. 5 suggest that the scene level discretization leads to retaining small objects and sharp transitions between foreground objects and background: background in row 1, and boxes in row 2. These results show the better ability of iDisc to capture fine-grained depth variations on close-by and similar objects, including crowd in row 3. Zero-shot testing from KITTI to DDAD and Argoverse are presented in Supplement." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 665, + 548, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 548, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 548, + 713 + ], + "type": "text", + "content": "Surface Normals Estimation. We emphasize that the proposed method has more general applications by testing iDisc on a different continuous dense prediction task such as surface normals estimation. Results in Table 5 evidence that we" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "21483" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 49, + 70, + 171, + 202 + ], + "blocks": [ + { + "bbox": [ + 49, + 70, + 171, + 202 + ], + "lines": [ + { + "bbox": [ + 49, + 70, + 171, + 202 + ], + "spans": [ + { + "bbox": [ + 49, + 70, + 171, + 202 + ], + "type": "image", + "image_path": "f82b0babd1075fcb975a78fbf0e4a4b041b327437274f1d2c11d1b5647e1e348.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 207, + 547, + 231 + ], + "lines": [ + { + "bbox": [ + 46, + 207, + 547, + 231 + ], + "spans": [ + { + "bbox": [ + 46, + 207, + 547, + 231 + ], + "type": "text", + "content": "Figure 5. Qualitative results on KITTI. Three zoomed-in crops of different test images are shown. The comparisons show the ability of iDisc to capture small details, proper background transition, and fine-grained variations in, e.g., crowded scenes. Best viewed on a screen." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 173, + 70, + 294, + 202 + ], + "blocks": [ + { + "bbox": [ + 173, + 70, + 294, + 202 + ], + "lines": [ + { + "bbox": [ + 173, + 70, + 294, + 202 + ], + "spans": [ + { + "bbox": [ + 173, + 70, + 294, + 202 + ], + "type": "image", + "image_path": "41ded1307a8edf0d1920bc3db437e4594b87fa46abb3465d079ee4ff03c674a2.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 296, + 70, + 419, + 202 + ], + "blocks": [ + { + "bbox": [ + 296, + 70, + 419, + 202 + ], + "lines": [ + { + "bbox": [ + 296, + 70, + 419, + 202 + ], + "spans": [ + { + "bbox": [ + 296, + 70, + 419, + 202 + ], + "type": "image", + "image_path": "54bdec52e72f6686cee7da9a7e096a354a8073ae592f317a8588dac5c29e1794.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 420, + 70, + 545, + 202 + ], + "blocks": [ + { + "bbox": [ + 420, + 70, + 545, + 202 + ], + "lines": [ + { + "bbox": [ + 420, + 70, + 545, + 202 + ], + "spans": [ + { + "bbox": [ + 420, + 70, + 545, + 202 + ], + "type": "image", + "image_path": "8eab2abe2f01cf49224ff05f658f93e28df1652ef233a7d1deba6c06ed77b7b6.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 49, + 272, + 287, + 362 + ], + "blocks": [ + { + "bbox": [ + 47, + 239, + 287, + 270 + ], + "lines": [ + { + "bbox": [ + 47, + 239, + 287, + 270 + ], + "spans": [ + { + "bbox": [ + 47, + 239, + 287, + 270 + ], + "type": "text", + "content": "Table 5. Comparison of surface normals estimation methods on NYU official test set. iDisc architecture and training pipeline is the same as the one utilized for indoor depth estimation." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 49, + 272, + 287, + 362 + ], + "lines": [ + { + "bbox": [ + 49, + 272, + 287, + 362 + ], + "spans": [ + { + "bbox": [ + 49, + 272, + 287, + 362 + ], + "type": "table", + "html": "
Method11.5°22.5°30°RMSMeanMed
Higher is betterLower is better
SURGE [49]0.4730.6890.766-20.612.2
GeoNet [39]0.4840.4840.79526.919.011.8
PAP [61]0.4880.7220.79825.518.611.7
GeoNet++ [40]0.5020.7320.80726.718.511.2
Bae et al. [1]0.6220.7930.85223.514.97.5
Ours0.6380.7980.85622.814.67.3
", + "image_path": "9c5be2e3a32de141f8fa8143e07bbe6bd8f5c79b236f7ebaa795f7c4157bbe42.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 373, + 287, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 373, + 287, + 422 + ], + "spans": [ + { + "bbox": [ + 46, + 373, + 287, + 422 + ], + "type": "text", + "content": "set the new state of the art on surface normals estimation. It is worth mentioning that all other methods are specifically designed for normals estimation, while we keep the same architecture and framework from indoor depth estimation." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 430, + 139, + 443 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 430, + 139, + 443 + ], + "spans": [ + { + "bbox": [ + 47, + 430, + 139, + 443 + ], + "type": "text", + "content": "4.3. Ablation study" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 449, + 287, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 449, + 287, + 472 + ], + "spans": [ + { + "bbox": [ + 46, + 449, + 287, + 472 + ], + "type": "text", + "content": "The importance of each component introduced in iDisc is evaluated by ablating the method in Table 6." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 474, + 287, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 474, + 287, + 569 + ], + "spans": [ + { + "bbox": [ + 46, + 474, + 287, + 569 + ], + "type": "text", + "content": "Depth Discretization. Internal scene discretization provides a clear improvement over its explicit counterpart (row 3 vs. 2), which is already beneficial in terms of robustness. Adding the MSDA module on top of explicit discretization (row 5) recovers part of the performance gap between the latter and our full method (row 8). We argue that MSDA recovers a better scene scale by refining feature maps at different scales at once, which is helpful for higher-resolution feature maps." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 570, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 570, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 570, + 288, + 714 + ], + "type": "text", + "content": "Component Interactions. Using either the MSDA module or the AFP module together with internal scene discretization results in similar performance (rows 4 and 6). We argue that the two modules are complementary, and they synergize when combined (row 8). The complementarity can be explained as follows: in the former scenario (row 4), MSDA preemptively refines feature maps to be partitioned by the non-adaptive clustering, that is, by the IDR priors described in Sec. 3, while on latter one (row 6), AFP allows the IDRs to adapt themselves to partition the unrefined feature space properly. Row 7 shows that the architecture closer to the one in [32], particularly random initialization, hurts perfor" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 239, + 545, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 239, + 545, + 316 + ], + "spans": [ + { + "bbox": [ + 305, + 239, + 545, + 316 + ], + "type": "text", + "content": "Table 6. Ablation of iDisc. EDD: Explicit Depth Discretization [3, 13], ISD: Internal Scene discretization, AFP: Adaptive Feature Partitioning, MSDA: MultiScale Deformable Attention. The EDD module, used in SotA methods, and our ISD module are mutually exclusive. AFP with " + }, + { + "bbox": [ + 305, + 239, + 545, + 316 + ], + "type": "inline_equation", + "content": "(\\checkmark_{\\mathbf{R}})" + }, + { + "bbox": [ + 305, + 239, + 545, + 316 + ], + "type": "text", + "content": " refers to random initialization of IDRs and architecture similar to [32]. The last row corresponds to our complete iDisc model." + } + ] + } + ], + "index": 12 + }, + { + "type": "table", + "bbox": [ + 308, + 316, + 544, + 423 + ], + "blocks": [ + { + "bbox": [ + 308, + 316, + 544, + 423 + ], + "lines": [ + { + "bbox": [ + 308, + 316, + 544, + 423 + ], + "spans": [ + { + "bbox": [ + 308, + 316, + 544, + 423 + ], + "type": "table", + "html": "
EDDISDAFPMSDAδ1↑RMS ↓A.Rel ↓
1XXXX0.8900.3700.104
2XXX0.9050.3670.102
3XXX0.9190.3400.096
4XX0.9310.3190.091
5XX0.9310.3260.091
6XX0.9340.3190.088
7X✓R0.9300.3190.089
8X0.9400.3130.086
", + "image_path": "7bdb7e522195c96f329e79574ce4951b328c3feae85d2667f44e72cf96810a80.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_body" + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 434, + 545, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 434, + 545, + 458 + ], + "spans": [ + { + "bbox": [ + 305, + 434, + 545, + 458 + ], + "type": "text", + "content": "mance since the internal representations do not embody any domain-specific prior information." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 468, + 378, + 481 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 468, + 378, + 481 + ], + "spans": [ + { + "bbox": [ + 306, + 468, + 378, + 481 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 488, + 547, + 680 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 488, + 547, + 680 + ], + "spans": [ + { + "bbox": [ + 304, + 488, + 547, + 680 + ], + "type": "text", + "content": "We have introduced a new module, called Internal Discretization, for MDE. The module represents the assumption that scenes can be represented as a finite set of patterns. Hence, iDisc leverages an internally discretized representation of the scene that is enforced via a continuous-discrete-continuous bottleneck, namely ID module. We have validated the proposed method, without any TTA or tricks, on the primary indoor and outdoor benchmarks for MDE, and have set the new state of the art among supervised approaches. Results showed that learning the underlying patterns, while not imposing any explicit constraints or regularization on the output, is beneficial for performance and generalization. iDisc also works out-of-the-box for normal estimation, beating all specialized SotA methods. In addition, we propose two new challenging outdoor dataset splits, aiming to benefit the community with more general and diverse benchmarks." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 305, + 689, + 545, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 689, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 305, + 689, + 545, + 714 + ], + "type": "text", + "content": "Acknowledgment. This work is funded by Toyota Motor Europe via the research project TRACE-Zürich." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "21484" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 53, + 91, + 288, + 144 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 288, + 144 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 288, + 144 + ], + "type": "text", + "content": "[1] Gwangbin Bae, Ignas Budvytis, and Roberto Cipolla. Estimating and exploiting the aleatoric uncertainty in surface normal estimation. Proceedings of the IEEE International Conference on Computer Vision, pages 13117-13126, 9 2021. 8" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 147, + 288, + 190 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 147, + 288, + 190 + ], + "spans": [ + { + "bbox": [ + 53, + 147, + 288, + 190 + ], + "type": "text", + "content": "[2] Gwangbin Bae, Ignas Budvytis, and Roberto Cipolla. Irondepth: Iterative refinement of single-view depth using surface normal and its uncertainty. In *British Machine Vision Conference (BMVC)*, 2022. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 191, + 288, + 246 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 191, + 288, + 246 + ], + "spans": [ + { + "bbox": [ + 53, + 191, + 288, + 246 + ], + "type": "text", + "content": "[3] Shariq Farooq Bhat, Ibrahim Alhashim, and Peter Wonka. Adabins: Depth estimation using adaptive bins. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 4008-4017, 11 2020. 1, 2, 3, 4, 5, 6, 7, 8" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 247, + 288, + 291 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 247, + 288, + 291 + ], + "spans": [ + { + "bbox": [ + 53, + 247, + 288, + 291 + ], + "type": "text", + "content": "[4] Shariq Farooq Bhat, Ibraheem Alhashim, and Peter Wonka. Localbins: Improving depth estimation by learning local distributions. In European Conference Computer Vision (ECCV), pages 480-496, 2022. 1, 2, 3, 4, 6, 7" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 292, + 288, + 356 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 292, + 288, + 356 + ], + "spans": [ + { + "bbox": [ + 53, + 292, + 288, + 356 + ], + "type": "text", + "content": "[5] András Bódis-Szomóru, Hayko Riemenschneider, and Luc Van Gool. Fast, approximate piecewise-planar modeling based on sparse structure-from-motion and superpixels. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 469-476, 9 2014. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 358, + 288, + 411 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 358, + 288, + 411 + ], + "spans": [ + { + "bbox": [ + 53, + 358, + 288, + 411 + ], + "type": "text", + "content": "[6] Yuanzhouhan Cao, Zifeng Wu, and Chunhua Shen. Estimating depth from monocular images as classification using deep fully convolutional residual networks. IEEE Transactions on Circuits and Systems for Video Technology, 28:3174-3182, 5 2016. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 414, + 288, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 414, + 288, + 479 + ], + "spans": [ + { + "bbox": [ + 53, + 414, + 288, + 479 + ], + "type": "text", + "content": "[7] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), 12346 LNCS:213-229, 5 2020. 4" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 480, + 288, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 480, + 288, + 555 + ], + "spans": [ + { + "bbox": [ + 53, + 480, + 288, + 555 + ], + "type": "text", + "content": "[8] Ming Fang Chang, John Lambert, Patsorn Sangkloy, Jagjeet Singh, Slawomir Bak, Andrew Hartnett, De Wang, Peter Carr, Simon Lucey, Deva Ramanan, and James Hays. Argoverse: 3d tracking and forecasting with rich maps. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2019-June:8740-8749, 11 2019. 2, 6" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 559, + 288, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 559, + 288, + 612 + ], + "spans": [ + { + "bbox": [ + 53, + 559, + 288, + 612 + ], + "type": "text", + "content": "[9] Anne Laure Chauve, Patrick Labatut, and Jean Philippe Pons. Robust piecewise-planar 3d reconstruction and completion from large-scale unstructured point data. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 1261-1268, 2010. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 614, + 288, + 657 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 614, + 288, + 657 + ], + "spans": [ + { + "bbox": [ + 48, + 614, + 288, + 657 + ], + "type": "text", + "content": "[10] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE Conference on Computer Vision and Pattern Recognition, pages 248–255, 2009. 6, 7" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 658, + 288, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 658, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 658, + 288, + 713 + ], + "type": "text", + "content": "[11] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 308, + 73, + 547, + 713 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 326, + 73, + 547, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 547, + 106 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 547, + 106 + ], + "type": "text", + "content": "scale. In 9th International Conference on Learning Representations, ICLR 2021, Virtual Event, Austria, May 3-7, 2021. OpenReview.net, 2021. 1, 6, 7" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 308, + 107, + 547, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 107, + 547, + 149 + ], + "spans": [ + { + "bbox": [ + 308, + 107, + 547, + 149 + ], + "type": "text", + "content": "[12] David Eigen, Christian Puhrsch, and Rob Fergus. Depth map prediction from a single image using a multi-scale deep network. Advances in Neural Information Processing Systems, 3:2366-2374, 6 2014. 1, 2, 5, 6, 7" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 308, + 151, + 547, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 151, + 547, + 215 + ], + "spans": [ + { + "bbox": [ + 308, + 151, + 547, + 215 + ], + "type": "text", + "content": "[13] Huan Fu, Mingming Gong, Chaohui Wang, Kayhan Bat-manghelich, and Dacheng Tao. Deep ordinal regression network for monocular depth estimation. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 2002-2011, 6 2018. 1, 2, 3, 4, 6, 7, 8" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 217, + 547, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 217, + 547, + 270 + ], + "spans": [ + { + "bbox": [ + 308, + 217, + 547, + 270 + ], + "type": "text", + "content": "[14] David Gallup, Jan Michael Frahm, and Marc Pollefeys. Piecewise planar and non-planar stereo for urban scene reconstruction. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 1418-1425, 2010. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 272, + 547, + 316 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 272, + 547, + 316 + ], + "spans": [ + { + "bbox": [ + 308, + 272, + 547, + 316 + ], + "type": "text", + "content": "[15] Peng Gao, Minghang Zheng, Xiaogang Wang, Jifeng Dai, and Hongsheng Li. Fast convergence of detr with spatially modulated co-attention. Proceedings of the IEEE International Conference on Computer Vision, pages 3601-3610, 8 2021. 4" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 317, + 547, + 360 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 317, + 547, + 360 + ], + "spans": [ + { + "bbox": [ + 308, + 317, + 547, + 360 + ], + "type": "text", + "content": "[16] Ravi Garg, BG Vijay Kumar, Gustavo Carneiro, and Ian Reid. Unsupervised cnn for single view depth estimation: Geometry to the rescue. In European Conference on Computer Vision, pages 740-756. Springer, 2016. 5, 6" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 361, + 547, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 361, + 547, + 403 + ], + "spans": [ + { + "bbox": [ + 308, + 361, + 547, + 403 + ], + "type": "text", + "content": "[17] Andreas Geiger, Philip Lenz, and Raquel Urtasun. Are we ready for autonomous driving? the kitti vision benchmark suite. In Conference on Computer Vision and Pattern Recognition (CVPR), 2012. 2, 5" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 405, + 547, + 448 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 405, + 547, + 448 + ], + "spans": [ + { + "bbox": [ + 308, + 405, + 547, + 448 + ], + "type": "text", + "content": "[18] Vitor Guizilini, Rares Ambrus, Sudeep Pillai, Allan Raventos, and Adrien Gaidon. 3d packing for self-supervised monocular depth estimation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2, 6" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 449, + 547, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 449, + 547, + 502 + ], + "spans": [ + { + "bbox": [ + 308, + 449, + 547, + 502 + ], + "type": "text", + "content": "[19] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2016-December:770-778, 12 2015. 6, 7" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 504, + 547, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 504, + 547, + 536 + ], + "spans": [ + { + "bbox": [ + 308, + 504, + 547, + 536 + ], + "type": "text", + "content": "[20] Geoffrey E. Hinton, Sara Sabour, and Nicholas Frosst. Matrix capsules with EM routing. In 6th International Conference on Learning Representations, ICLR, 2018. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 537, + 547, + 591 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 537, + 547, + 591 + ], + "spans": [ + { + "bbox": [ + 308, + 537, + 547, + 591 + ], + "type": "text", + "content": "[21] Gao Huang, Zhuang Liu, Laurens Van Der Maaten, and Kilian Q. Weinberger. Densely connected convolutional networks. Proceedings - 30th IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2017, 2017-January:2261-2269, 8 2016. 6, 7" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 593, + 547, + 657 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 593, + 547, + 657 + ], + "spans": [ + { + "bbox": [ + 308, + 593, + 547, + 657 + ], + "type": "text", + "content": "[22] Lam Huynh, Phong Nguyen-Ha, Jiri Matas, Esa Rahtu, and Janne Heikkilä. Guiding monocular depth estimation using depth-attention volume. Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), 12371 LNCS:581-597, 4 2020. 1, 6, 7" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 658, + 547, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 658, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 308, + 658, + 547, + 713 + ], + "type": "text", + "content": "[23] Iro Laina, Christian Rupprecht, Vasileios Belagiannis, Federico Tombari, and Nassir Navab. Deeper depth prediction with fully convolutional residual networks. Proceedings - 2016 4th International Conference on 3D Vision, 3DV 2016, pages 239-248, 6 2016. 2" + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "text", + "content": "21485" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 712 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "type": "text", + "content": "[24] Jin Han Lee, Myung-Kyu Han, Dong Wook Ko, and Il Hong Suh. From big to small: Multi-scale local planar guidance for monocular depth estimation. arXiv e-prints, abs/1907.10326, 7 2019. 1, 2, 5, 6, 7" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 119, + 288, + 174 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 119, + 288, + 174 + ], + "spans": [ + { + "bbox": [ + 48, + 119, + 288, + 174 + ], + "type": "text", + "content": "[25] Jae Han Lee, Minhyeok Heo, Kyung Rae Kim, and Chang Su Kim. Single-image depth estimation based on fourier domain analysis. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 330-339, 12 2018. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 177, + 288, + 231 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 177, + 288, + 231 + ], + "spans": [ + { + "bbox": [ + 48, + 177, + 288, + 231 + ], + "type": "text", + "content": "[26] Boying Li, Yuan Huang, Zeyu Liu, Danping Zou, and Wenxian Yu. Structdepth: Leveraging the structural regularities for self-supervised indoor depth estimation. Proceedings of the IEEE International Conference on Computer Vision, pages 12643-12653, 8 2021. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 234, + 288, + 277 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 234, + 288, + 277 + ], + "spans": [ + { + "bbox": [ + 48, + 234, + 288, + 277 + ], + "type": "text", + "content": "[27] Zhenyu Li, Zehui Chen, Xianming Liu, and Junjun Jiang. Depthformer: Exploiting long-range correlation and local information for accurate monocular depth estimation. arXiv e-prints, abs/2203.14211, 3 2022. 4" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 280, + 288, + 334 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 280, + 288, + 334 + ], + "spans": [ + { + "bbox": [ + 48, + 280, + 288, + 334 + ], + "type": "text", + "content": "[28] Chen Liu, Kihwan Kim, Jinwei Gu, Yasutaka Furukawa, and Jan Kautz. Planercnn: 3d plane detection and reconstruction from a single image. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2019-June:4445-4454, 12 2018. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 337, + 288, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 337, + 288, + 392 + ], + "spans": [ + { + "bbox": [ + 48, + 337, + 288, + 392 + ], + "type": "text", + "content": "[29] Chen Liu, Jimei Yang, Duygu Ceylan, Ersin Yumer, and Yasutaka Furukawa. Planenet: Piece-wise planar reconstruction from a single rgb image. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 2579-2588, 4 2018. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 394, + 288, + 438 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 394, + 288, + 438 + ], + "spans": [ + { + "bbox": [ + 48, + 394, + 288, + 438 + ], + "type": "text", + "content": "[30] Fayao Liu, Chunhua Shen, Guosheng Lin, and Ian Reid. Learning depth from single monocular images using deep convolutional neural fields. IEEE Transactions on Pattern Analysis and Machine Intelligence, 38:2024-2039, 2 2015. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 441, + 288, + 495 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 441, + 288, + 495 + ], + "spans": [ + { + "bbox": [ + 48, + 441, + 288, + 495 + ], + "type": "text", + "content": "[31] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. Proceedings of the IEEE International Conference on Computer Vision, pages 9992-10002, 3 2021. 6, 7" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 498, + 288, + 552 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 498, + 288, + 552 + ], + "spans": [ + { + "bbox": [ + 48, + 498, + 288, + 552 + ], + "type": "text", + "content": "[32] Francesco Locatello, Dirk Weissenborn, Thomas Unterthiner, Aravindh Mahendran, Georg Heigold, Jakob Uszkoreit, Alexey Dosovitskiy, and Thomas Kipf. Object-centric learning with slot attention. Advances in Neural Information Processing Systems, 2020-December, 6 2020. 2, 3, 4, 8" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 555, + 288, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 555, + 288, + 609 + ], + "spans": [ + { + "bbox": [ + 48, + 555, + 288, + 609 + ], + "type": "text", + "content": "[33] Xiaoxiao Long, Cheng Lin, Lingjie Liu, Wei Li, Christian Theobalt, Ruigang Yang, and Wenping Wang. Adaptive surface normal constraint for depth estimation. Proceedings of the IEEE International Conference on Computer Vision, pages 12829-12838, 3 2021. 1, 2, 6" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 612, + 288, + 644 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 612, + 288, + 644 + ], + "spans": [ + { + "bbox": [ + 48, + 612, + 288, + 644 + ], + "type": "text", + "content": "[34] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. 7th International Conference on Learning Representations, ICLR 2019, 11 2017. 6" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 647, + 288, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 647, + 288, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 647, + 288, + 712 + ], + "type": "text", + "content": "[35] S. H. Mahdi Miangoleh, Sebastian Dille, Long Mai, Sylvain Paris, and Yagiz Aksoy. Boosting monocular depth estimation models to high-resolution via content-adaptive multi-resolution merging. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 9680-9689, 5 2021. 2" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 713 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "type": "text", + "content": "[36] Pushmeet Kohli Nathan Silberman, Derek Hoiem and Rob Fergus. Indoor segmentation and support inference from rgbd images. In ECCV, 2012. 5" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 107, + 547, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 107, + 547, + 205 + ], + "spans": [ + { + "bbox": [ + 307, + 107, + 547, + 205 + ], + "type": "text", + "content": "[37] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Kopf, Edward Yang, Zachary DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. Pytorch: An imperative style, high-performance deep learning library. In Advances in Neural Information Processing Systems 32, pages 8024-8035. Curran Associates, Inc., 2019. 6" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 208, + 547, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 208, + 547, + 262 + ], + "spans": [ + { + "bbox": [ + 307, + 208, + 547, + 262 + ], + "type": "text", + "content": "[38] Vaishakh Patil, Christos Sakaridis, Alexander Liniger, and Luc Van Gool. P3Depth: Monocular depth estimation with a piecewise planarity prior. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR, pages 1600-1611. IEEE, 2022. 1, 2, 6, 7" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 264, + 547, + 340 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 264, + 547, + 340 + ], + "spans": [ + { + "bbox": [ + 308, + 264, + 547, + 340 + ], + "type": "text", + "content": "[39] Xiaojuan Qi, Renjie Liao, Zhengzhe Liu, Raquel Urtasun, and Jiaya Jia. Geonet: Geometric neural network for joint depth and surface normal estimation. In 2018 IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2018, Salt Lake City, UT, USA, June 18-22, 2018, pages 283-291. Computer Vision Foundation / IEEE Computer Society, 2018. 2, 8" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 342, + 547, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 342, + 547, + 396 + ], + "spans": [ + { + "bbox": [ + 308, + 342, + 547, + 396 + ], + "type": "text", + "content": "[40] Xiaojuan Qi, Zhengzhe Liu, Renjie Liao, Philip H. S. Torr, Raquel Urtasun, and Jiaya Jia. Geonet++: Iterative geometric neural network with edge-aware refinement for joint depth and surface normal estimation. IEEE Trans. Pattern Anal. Mach. Intell., 44(2):969-984, 2022. 8" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 399, + 547, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 399, + 547, + 441 + ], + "spans": [ + { + "bbox": [ + 308, + 399, + 547, + 441 + ], + "type": "text", + "content": "[41] René Ranftl, Alexey Bochkovskiy, and Vladlen Koltun. Vision transformers for dense prediction. Proceedings of the IEEE International Conference on Computer Vision, pages 12159-12168, 3 2021. 1, 6, 7" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 445, + 547, + 531 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 445, + 547, + 531 + ], + "spans": [ + { + "bbox": [ + 308, + 445, + 547, + 531 + ], + "type": "text", + "content": "[42] Sara Sabour, Nicholas Frosst, and Geoffrey E. Hinton. Dynamic routing between capsules. In Isabelle Guyon, Ulrike von Luxburg, Samy Bengio, Hanna M. Wallach, Rob Fergus, S. V. N. Vishwanathan, and Roman Garnett, editors, Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA, pages 3856-3866, 2017. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 533, + 547, + 586 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 533, + 547, + 586 + ], + "spans": [ + { + "bbox": [ + 308, + 533, + 547, + 586 + ], + "type": "text", + "content": "[43] Shuran Song, Samuel P. Lichtenberg, and Jianxiong Xiao. Sun rgb-d: A rgb-d scene understanding benchmark suite. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 07-12-June-2015:567-576, 10 2015. 5" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 590, + 547, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 590, + 547, + 633 + ], + "spans": [ + { + "bbox": [ + 308, + 590, + 547, + 633 + ], + "type": "text", + "content": "[44] Zhiqing Sun, Shengcao Cao, Yiming Yang, and Kris Kitani. Rethinking transformer-based set prediction for object detection. Proceedings of the IEEE International Conference on Computer Vision, pages 3591-3600, 11 2020. 4" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 635, + 547, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 635, + 547, + 678 + ], + "spans": [ + { + "bbox": [ + 308, + 635, + 547, + 678 + ], + "type": "text", + "content": "[45] Mingxing Tan and Quoc V. Le. Efficientnet: Rethinking model scaling for convolutional neural networks. 36th International Conference on Machine Learning, ICML 2019, 2019-June:10691-10700, 5 2019. 6, 7" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 681, + 547, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 681, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 308, + 681, + 547, + 713 + ], + "type": "text", + "content": "[46] Yao-Hung Hubert Tsai, Nitish Srivastava, Hanlin Goh, and Ruslan Salakhutdinov. Capsules with inverted dot-product attention routing. arXiv e-prints, abs/2002.04764, 2020. 2" + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "type": "text", + "content": "21486" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 715 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 137 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 288, + 137 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 288, + 137 + ], + "type": "text", + "content": "[47] Igor Vasiljevic, Nicholas I. Kolkin, Shanyi Zhang, Ruotian Luo, Haochen Wang, Falcon Z. Dai, Andrea F. Daniele, Mohammadreza Mostajabi, Steven Basart, Matthew R. Walter, and Gregory Shakhnarovich. DIODE: A dense indoor and outdoor depth dataset. arXiv e-prints, abs/1908.00463, 2019. 5" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 139, + 288, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 139, + 288, + 205 + ], + "spans": [ + { + "bbox": [ + 48, + 139, + 288, + 205 + ], + "type": "text", + "content": "[48] Jingdong Wang, Ke Sun, Tianheng Cheng, Borui Jiang, Chaorui Deng, Yang Zhao, Dong Liu, Yadong Mu, Mingkui Tan, Xinggang Wang, Wenyu Liu, and Bin Xiao. Deep high-resolution representation learning for visual recognition. IEEE Transactions on Pattern Analysis and Machine Intelligence, 43:3349-3364, 8 2019. 6" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 206, + 288, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 206, + 288, + 270 + ], + "spans": [ + { + "bbox": [ + 48, + 206, + 288, + 270 + ], + "type": "text", + "content": "[49] Peng Wang, Xiaohui Shen, Bryan C. Russell, Scott Cohen, Brian L. Price, and Alan L. Yuille. SURGE: surface regularized geometry estimation from a single image. In Daniel D. Lee, Masashi Sugiyama, Ulrike von Luxburg, Isabelle Guyon, and Roman Garnett, editors, Advances in Neural Information Processing Systems, pages 172-180, 2016. 8" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 272, + 288, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 272, + 288, + 326 + ], + "spans": [ + { + "bbox": [ + 48, + 272, + 288, + 326 + ], + "type": "text", + "content": "[50] Cihang Xie, Mingxing Tan, Boqing Gong, Jiang Wang, Alan L. Yuille, and Quoc V. Le. Adversarial examples improve image recognition. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 816-825, 11 2019. 6, 7" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 327, + 288, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 327, + 288, + 392 + ], + "spans": [ + { + "bbox": [ + 48, + 327, + 288, + 392 + ], + "type": "text", + "content": "[51] Dan Xu, Wanli Ouyang, Xiaogang Wang, and Nicu Sebe. Pad-net: Multi-tasks guided prediction-and-distillation network for simultaneous depth estimation and scene parsing. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 675-684, 5 2018. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 393, + 288, + 448 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 393, + 288, + 448 + ], + "spans": [ + { + "bbox": [ + 48, + 393, + 288, + 448 + ], + "type": "text", + "content": "[52] Dan Xu, Wei Wang, Hao Tang, Hong Liu, Nicu Sebe, and Elisa Ricci. Structured attention guided convolutional neural fields for monocular depth estimation. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 3917-3925, 3 2018. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 449, + 288, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 449, + 288, + 502 + ], + "spans": [ + { + "bbox": [ + 48, + 449, + 288, + 502 + ], + "type": "text", + "content": "[53] Fengting Yang and Zihan Zhou. Recovering 3d planes from a single image via convolutional neural networks. Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), 11214 LNCS:87-103, 2018. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 504, + 288, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 504, + 288, + 557 + ], + "spans": [ + { + "bbox": [ + 48, + 504, + 288, + 557 + ], + "type": "text", + "content": "[54] Guanglei Yang, Hao Tang, Mingli Ding, Nicu Sebe, and Elisa Ricci. Transformer-based attention networks for continuous pixel-wise prediction. Proceedings of the IEEE International Conference on Computer Vision, pages 16249-16259, 3 2021. 1, 2, 6, 7" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 559, + 288, + 602 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 559, + 288, + 602 + ], + "spans": [ + { + "bbox": [ + 48, + 559, + 288, + 602 + ], + "type": "text", + "content": "[55] Wei Yin, Yifan Liu, Chunhua Shen, and Youliang Yan. Enforcing geometric constraints of virtual normal for depth prediction. Proceedings of the IEEE International Conference on Computer Vision, pages 5683-5692, 7 2019. 1, 2, 5, 6" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 603, + 288, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 603, + 288, + 647 + ], + "spans": [ + { + "bbox": [ + 48, + 603, + 288, + 647 + ], + "type": "text", + "content": "[56] Fisher Yu, Vladlen Koltun, and Thomas Funkhouser. Dilated residual networks. Proceedings - 30th IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2017, 2017-January:636-644, 5 2017. 6" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 647, + 288, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 647, + 288, + 689 + ], + "spans": [ + { + "bbox": [ + 48, + 647, + 288, + 689 + ], + "type": "text", + "content": "[57] Zehao Yu, Lei Jin, and Shenghua Gao. " + }, + { + "bbox": [ + 48, + 647, + 288, + 689 + ], + "type": "inline_equation", + "content": "\\mathbf{P}^2" + }, + { + "bbox": [ + 48, + 647, + 288, + 689 + ], + "type": "text", + "content": " net: Patch-match and plane-regularization for unsupervised indoor depth estimation. In European Conference on Computer Vision, pages 206–222, 7 2020. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 691, + 288, + 715 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 691, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 48, + 691, + 288, + 715 + ], + "type": "text", + "content": "[58] Zehao Yu, Jia Zheng, Dongze Lian, Zihan Zhou, and Shenghua Gao. Single-image piece-wise planar 3d recon" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 308, + 73, + 547, + 352 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 327, + 73, + 547, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 327, + 73, + 547, + 106 + ], + "spans": [ + { + "bbox": [ + 327, + 73, + 547, + 106 + ], + "type": "text", + "content": "struction via associative embedding. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2019-June:1029-1037, 2 2019. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 308, + 107, + 547, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 107, + 547, + 162 + ], + "spans": [ + { + "bbox": [ + 308, + 107, + 547, + 162 + ], + "type": "text", + "content": "[59] Weihao Yuan, Xiaodong Gu, Zuozhuo Dai, Siyu Zhu, and Ping Tan. Neural window fully-connected crfs for monocular depth estimation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR, pages 3906-3915. IEEE, 2022. 1, 2, 5, 6, 7, 8" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 308, + 163, + 547, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 163, + 547, + 217 + ], + "spans": [ + { + "bbox": [ + 308, + 163, + 547, + 217 + ], + "type": "text", + "content": "[60] Weidong Zhang, Wei Zhang, and Yinda Zhang. Geolayout: Geometry driven room layout estimation based on depth maps of planes. In European Conference on Computer Vision, pages 632-648. Springer Science and Business Media Deutschland GmbH, 8 2020. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 219, + 547, + 273 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 219, + 547, + 273 + ], + "spans": [ + { + "bbox": [ + 308, + 219, + 547, + 273 + ], + "type": "text", + "content": "[61] Zhenyu Zhang, Zhen Cui, Chunyan Xu, Yan Yan, Nicu Sebe, and Jian Yang. Pattern-affinitive propagation across depth, surface normal and semantic segmentation. In IEEE Computer Society Conference on Computer Vision and Pattern Recognition CVPR, pages 4101-4110, 6 2019. 8" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 274, + 547, + 306 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 274, + 547, + 306 + ], + "spans": [ + { + "bbox": [ + 308, + 274, + 547, + 306 + ], + "type": "text", + "content": "[62] Brady Zhou, Philipp Krahenbuhl, and Vladlen Koltun. Does computer vision matter for action? Science Robotics, 4, 5 2019. 1" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 308, + 547, + 352 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 308, + 547, + 352 + ], + "spans": [ + { + "bbox": [ + 308, + 308, + 547, + 352 + ], + "type": "text", + "content": "[63] Xizhou Zhu, Weijie Su, Lewei Lu, Bin Li, Xiaogang Wang, and Jifeng Dai. Deformable DETR: deformable transformers for end-to-end object detection. In 9th International Conference on Learning Representations ICLR, 2021. 5" + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "text", + "content": "21487" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/iQuery_ Instruments As Queries for Audio-Visual Sound Separation/79c43f28-c9f8-4b22-8dca-1b2b47c85f07_content_list.json b/2023/iQuery_ Instruments As Queries for Audio-Visual Sound Separation/79c43f28-c9f8-4b22-8dca-1b2b47c85f07_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..3aa75a81d9ba9fc3dca20c6718bd4554ca4be916 --- /dev/null +++ b/2023/iQuery_ Instruments As Queries for Audio-Visual Sound Separation/79c43f28-c9f8-4b22-8dca-1b2b47c85f07_content_list.json @@ -0,0 +1,1461 @@ +[ + { + "type": "text", + "text": "iQuery: Instruments as Queries for Audio-Visual Sound Separation", + "text_level": 1, + "bbox": [ + 143, + 130, + 825, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Jiaben Chen $^{1}$ , Renrui Zhang $^{2}$ , Dongze Lian $^{3}$ , Jiaqi Yang $^{4}$ , Ziyao Zeng $^{4}$ , Jianbo Shi $^{5}$", + "bbox": [ + 151, + 179, + 815, + 199 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ UC San Diego $^{2}$ The Chinese University of Hong Kong $^{3}$ National University of Singapore $^{4}$ ShanghaiTech University $^{5}$ University of Pennsylvania", + "bbox": [ + 111, + 208, + 857, + 246 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 286, + 313, + 301 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Current audio-visual separation methods share a standard architecture design where an audio encoder-decoder network is fused with visual encoding features at the encoder bottleneck. This design confounds the learning of multi-modal feature encoding with robust sound decoding for audio separation. To generalize to a new instrument, one must fine-tune the entire visual and audio network for all musical instruments. We re-formulate the visual-sound separation task and propose Instruments as Queries (iQuery) with a flexible query expansion mechanism. Our approach ensures cross-modal consistency and cross-instrument disentanglement. We utilize \"visually named\" queries to initiate the learning of audio queries and use cross-modal attention to remove potential sound source interference at the estimated waveforms. To generalize to a new instrument or event class, drawing inspiration from the text-prompt design, we insert additional queries as audio prompts while freezing the attention mechanism. Experimental results on three benchmarks demonstrate that our iQuery improves audio-visual sound source separation performance. Code is available at https://github.com/JiabenChen/iQuery.", + "bbox": [ + 76, + 316, + 473, + 650 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 679, + 209, + 695 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Humans use multi-modal perception to understand complex activities. To mimic this skill, researchers have studied audio-visual learning [3, 17, 33] by exploiting the synchronization and correlation between auditory and visual information. In this paper, we focus on the sound source separation task, where we aim to identify and separate different sound components within a given sound mixture [60, 74]. Following the \"Mix-and-Separate\" framework [32, 34, 81], we learn to separate sounds by mixing multiple audio signals to generate an artificially complex auditory representation and then use it as a self-supervised task to separate individual sounds from the mixture. The works [26, 53, 89] showed that visually-guided sound separation is achievable", + "bbox": [ + 76, + 704, + 470, + 901 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "by leveraging visual information of the sound source.", + "bbox": [ + 498, + 287, + 852, + 303 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Prevalent architectures take a paradigm of a visual-conditioned encoder-decoder architecture [23, 26, 58, 88], where encoded features from audio and visual modalities are fused at the bottleneck for decoding to yield separated spectrogram masks. However, it is noticed that this design often creates a \"muddy\" sound and \"cross-talk\" that leaks from one instrument to another. To create a clean sound separation, one would like the audio-visual encoders to be (1) self-consistent within the music instrument and (2) contrasting across. One approach [27] added critic functions explicitly to enforce these properties. Another method [99] used a two-step process with the second motion-conditioned generation process to filter out unwanted cross-talks. We call these approaches decoder-centric.", + "bbox": [ + 496, + 305, + 892, + 517 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Most recent works focus on addressing the \"muddy\" and \"cross-talk\" issue by improving fine details of audio-visual feature extraction: for example, adding human motion encoding as in [23, 88, 99], or cross-modality representations [58] via self-supervised learning. Once the feature representations are learned, the standard encoder-decoder FCN style segmentation is used as an afterthought. We consider these methods feature-centric. The standard designs have two limitations. First, it is hard to balance decoder-centric and feature-centric approaches that enforce a common goal of cross-modality consistency and cross-instrument contrast. Second, to learn a new musical instrument, one has to retrain the entire network via self-supervision.", + "bbox": [ + 496, + 520, + 893, + 717 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "To tackle these limitations, we propose a query-based sound separation framework, iQuery. We recast this problem from a query-based transformer segmentation view, where each query learns to segment one instrument, similar to visual segmentation [15, 16, 65, 78]. We treat each audio query as a learnable prototype that parametrically models one sound class. We fuse visual modality with audio by \"visually naming\" the audio query: using object detection to assign visual features to the corresponding audio query. Within the transformer decoder, the visually initialized queries interact with the audio features through cross-attention, thus ensuring cross-modality consistency. Self", + "bbox": [ + 496, + 719, + 893, + 901 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 807, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "14675", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/bebce95567aff6300ad0b46f8f5ac5744a1e9a11a1842bb242f0a58a0d86d530.jpg", + "image_caption": [ + "Figure 1. Pipeline of iQuery. Our system takes as input an audio mixture and its corresponding video frames, and disentangles separated sound sources for each video. Our pipeline consists of two main modules: an Audio-Visual Feature Extraction module which extracts audio, object, and motion features through three corresponding encoders, and an Audio-Visual Transformer module for sound separation. The query-based sound separation transformer has three key components: 1) \"visually-named\" audio queries are initialized by extracted object features, 2) cross-attention between the audio queries with static image features, dynamic motion features and audio features, 3) self-attention between the learned audio queries to ensure cross-instrument contrast." + ], + "image_footnote": [], + "bbox": [ + 119, + 90, + 851, + 349 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "attention across the audio queries for different instruments implements a soft version of the cross-instrument contrast objective. With this design, we unify the feature-centric with the decoder-centric approach.", + "bbox": [ + 75, + 472, + 468, + 531 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "How do we achieve generalizability? Motivated by recent success in fine-tuning domain transfer with the text-prompt [28] and visual-prompt designs [7, 35, 41, 86], we adaptively insert the additional queries as audio prompts to accommodate new instruments. With the audio-prompt design, we freeze most of the transformer network parameters and only fine-tune the newly added query embedding layer. We conjecture that the learned prototype queries are instrument-dependent, while the cross/self-attention mechanism in the transformer is instrument-independent.", + "bbox": [ + 75, + 532, + 468, + 681 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our main contributions are:", + "bbox": [ + 96, + 684, + 282, + 698 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- To the best of our knowledge, we are the first to study the audio-visual sound separation problem from a tunable query view to disentangle different sound sources explicitly through learnable audio prototypes in a mask transformer architecture.", + "- To generalize to a new sound class, we design an audio prompt for fine-tuning with most of the transformer architecture frozen.", + "- Extensive experiments and ablations verify the effectiveness of our core designs for disentangle-. ment, demonstrating performance gain for audiovisual sound source separation on three benchmarks." + ], + "bbox": [ + 94, + 704, + 468, + 900 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related work", + "text_level": 1, + "bbox": [ + 500, + 470, + 635, + 486 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Audio-Visual Sound Source Separation. Recent years have witnessed promising results of audiovisual multi-modality joint learning [49, 62, 67, 75, 83] in domains like audio-visual sound source localization [4, 5, 14, 36, 55, 61, 63, 93], audio-visual event localization [68, 76, 77, 95] and sound synthesis from videos [25, 52, 54, 80, 97]. Sound source separation, a challenging classical problem, has been researched extensively in the audio signal processing area [11, 22, 37, 40]. A well-known example is the cocktail party problem [31, 48] in speech domain [1, 21]. Works have been proposed recently for tasks like speech separation [2, 27, 39, 51, 70], active sound separation [45, 46] and on-screen sound separation [25, 53, 71, 72]. Our work focuses on audio-visual sound separation. Recent audio-visual sound separation methods could be classified generally into two categories: feature-centric and decoder-centric as discussed in Sec. 1. Feature-centric methods exploit various ways for visual feature extraction selection to aid this multi-modality task. Some works consider frame-based appearance features (static frame features [24, 79, 89] or detected object regions [26, 66]) for extracting visual semantic cues (e.g., instrument categories) to guide sound separation. [12, 13] adds embeddings from an audio-visual scene graph at the U-Net bottleneck to model the visual context of sound sources. Based on the assessment that motion signals", + "bbox": [ + 496, + 507, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "14676", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/e7d991a0c55c03ea373ee6ab8b3489166941f8aed60c891607dcf99071308c67.jpg", + "image_caption": [ + "Figure 2. Qualitative results on MUSIC test set. The first column shows the mixed video frames, the second to the fourth columns compare our predicted spectrogram masks against masks yielded by state-of-the-art algorithm [66] and ground truth masks, and the fifth to the seventh columns visualize separated spectrograms. [66] produces blurry masks and contains unseparated components from another sound source, while our system successfully generates accurate mask and clean spectrograms as the ground truth." + ], + "image_footnote": [], + "bbox": [ + 202, + 88, + 772, + 329 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "could more tightly couple the moving sounding object with corresponding variations of sounds, recent approaches focus on including motion information into the pipeline (e.g., optical flow [88], and human pose [23,58]). Based on this, [94] proposes a framework to search for the optimal fusion strategy for multi-modal features. Decoder-centric methods explore prevention of \"cross-talk\" between the audio sources in the decoder stage. [99] designs a two-stage pipeline, where the second stage conducts a counterfactual synthesis through motion features to remove potentially leaked sound. The approach of [27] added critic functions explicitly to enforce cross-modal consistency and cross-instrument contrast.", + "bbox": [ + 75, + 431, + 472, + 628 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Vision Transformers. Motivated by transformer's success in natural language processing [73], transformers were first introduced in computer vision for image classification as ViT [20]. Given the superior long-range modeling capacity, many follow-up works [47, 69, 82] have upgraded ViT to achieve higher performance and widely surpassed convolutional neural networks. Further, transformer-based models are adopted for various downstream tasks, such as 2D object detection [9, 91, 100], semantic/instance segmentation [65, 78, 92], 3D object detection [50, 85], shape recognition [84, 90] and video understanding [6, 42]. Particularly, following the pipeline from DETR [9], MaskFormer [16] and Mask2Former [15] represent each mask candidate as a learnable query and conduct parallel decoding for instance-level segmentation. However, only few approaches [39, 58, 71, 72, 99] have extended transformer for audio-visual sound separation fields. [58] adopts a BERT", + "bbox": [ + 75, + 643, + 472, + 902 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "[18] architecture to learn visual, pose, and audio feature representations. [99] designs an audio-motion transformer to refine sound separation results through audio-motion feature fusion. These methods focus mainly on learning better contextualized multi-modality representations through an encoder transformer. In contrast, our mask transformer-based network focuses on the entire process of visual-audio separation task. We disentangle different sound sources through independent learnable query prototypes and segment each time-frequency region on the spectrogram via mask prediction in an end-to-end fashion.", + "bbox": [ + 496, + 431, + 893, + 599 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 500, + 613, + 591, + 628 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We first describe the formulation of the audio-visual sound separation task and introduce our pipeline iQuery briefly in Sec. 3.1. Then we introduce networks for learning representations from visual and audio modalities in Sec. 3.2 and our proposed cross-modality cross-attention transformer architecture for visual sound separation in Sec. 3.3. Finally, we introduce our adaptive query fine-tuning strategy through designs of flexible tunable queries in Sec. 3.4.", + "bbox": [ + 496, + 638, + 890, + 762 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Overview", + "text_level": 1, + "bbox": [ + 500, + 770, + 609, + 786 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "As mentioned before, our goal is to disentangle the audio mixture concerning its corresponding sound sources in the given mixture by using so-called queries. Following previous works [21, 89], we adopt a commonly used \"Mix-and-Separate\" self-supervised source separation procedure. Given $K$ video clips with accompanying audio signal: $\\{(V_k,s_k(t))\\}_{k\\in [1,K]}$ , we create a sound mixture:", + "bbox": [ + 496, + 794, + 893, + 902 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "14677", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "$s_{mix}(t) = \\sum_{k=1}^{K} s_k(t)$ as training data. Our disentanglement goal is to separate sounds $s_k(t)$ from $s_{mix}(t)$ for sound sources in $V_k$ , respectively. The pipeline, as illustrated in Fig. 1, is mainly composed of two components: an Audio-Visual Feature Extraction module and a Mask Transformer-based Sound Separation module. First, in the feature extraction module, the object detector & image encoder, and video encoder extract object-level visual features and motion features from video clip $V_k$ . The audio network yields an audio feature and an audio embedding from the given sound mixture $s_{mix}(t)$ . After that, a cross-modal transformer decoder attends to visual and audio features and outputs audio mask embeddings, which are further combined with audio embeddings for sound separation.", + "bbox": [ + 75, + 89, + 472, + 303 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. Audio-Visual Feature Extraction", + "text_level": 1, + "bbox": [ + 76, + 315, + 364, + 330 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Object Detector & Image Encoder. To initialize learning of audio queries, we assign object-level visual appearance features to the corresponding queries, to create \"visually named\" queries. In the implementation, following [26], we use a Faster R-CNN object detector with ResNet-101 backbone. For frames in a given video clip $V_{k}$ , the object detector is utilized to acquire the detected objects set $O_{k}$ . After that, we adopt a pre-trained ResNet-18 similar to [66], followed by a linear layer and max pooling to yield object-level features $F_{O_k}\\in \\mathbb{R}^{C_O}$ , where $C_O$ denotes channel dimension of object features.", + "bbox": [ + 75, + 339, + 472, + 507 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Video Encoder. The video encoder maps the video frames from $V_{k} \\in \\mathbb{R}^{3 \\times T_{k} \\times H_{k} \\times W_{k}}$ into a motion feature representation. In contrast with previous motion representations [23, 58, 88, 99], we use self-supervised video representation obtained from a 3D video encoder of I3D [10] pre-trained by FAME [19]. The model is pre-trained contrastively to concentrate on moving foregrounds. Finally, a spatial pooling is applied to obtain motion embedding $F_{M_k} \\in \\mathbb{R}^{C_M \\times T_k'}$ , where $C_M$ denotes the dimension of the motion feature.", + "bbox": [ + 75, + 522, + 472, + 674 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Audio Network. The audio network takes the form of skip-connected U-Net style architectures [59] following [26, 66, 89]. Given the input audio mixture $s_{mix}(t)$ , we first apply a Short-Time Fourier Transform (STFT) [30] to convert the raw waveform to a 2D Time-Frequency spectrogram representation $S_{mix} \\in \\mathbb{R}^{F \\times T}$ , which is then fed into the U-Net encoder to obtain an audio feature map $F_A \\in \\mathbb{R}^{C_A \\times \\frac{F}{S} \\times \\frac{T}{S}}$ ( $C_A$ denotes the number of channels and $S$ denotes stride of audio feature map) at the bottleneck. A U-Net decoder gradually upsamples the audio features to yield audio embeddings $\\varepsilon_A \\in \\mathbb{R}^{C_\\varepsilon \\times F \\times T}$ ( $C_\\varepsilon$ denotes the dimension of audio embeddings), which is combined further with the transformer mask embeddings to generate the separated sound spectrogram mask $M_k$ .", + "bbox": [ + 75, + 688, + 472, + 902 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3. Audio-Visual Transformer", + "text_level": 1, + "bbox": [ + 498, + 90, + 740, + 106 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Our cross-modality sound separation transformer contains the transformer decoder [73] with $N$ queries (i.e., learnable prototypes), and utilizes the extracted object features $F_{O_k}$ , motion embeddings $F_{M_k}$ and audio features $F_A$ to yield $N$ mask embeddings $\\varepsilon_{mask} \\in \\mathbb{R}^{C_{\\varepsilon} \\times N}$ for spectrogram mask prediction of separated sound $s_k(t)$ , where $N$ denotes maximum of the pre-defined instrument types.", + "bbox": [ + 496, + 113, + 893, + 236 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Audio query prototypes. We denote audio queries as $Q \\in \\mathbb{R}^{C_Q \\times N}$ to represent different instruments, which are initialized by \"visually naming\" audio queries. Specifically, \"visually naming\" means that we assign object features $F_{O_k}$ to the corresponding query in $Q$ with element-wise addition to yield \"visually-named\" queries $Q_v$ , which are then fed into the transformer decoder cross-attention layers.", + "bbox": [ + 496, + 250, + 893, + 356 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Cross-attention layers. In the decoder, we stack one motion-aware decoder layer and three audio-aware decoder layers. The \"visually-named\" queries $Q_{v}$ first interact temporally with motion features $F_{M_k}$ in the motion-aware decoder layer with motion cross-attention by Attention $(Q_{v},F_{M_{k}},F_{M_{k}})$ . This is followed by an FFN to generate the motion-decoded queries $Q^{\\prime}$ , which are then fed into three audio-aware decoder layers to adaptively interact with audio features $F_{A}$ , each of which consists of a self-attention, an audio cross-attention computed by Attention $(Q^{\\prime},F_{A},F_{A})$ , and an FFN. The output $N$ audio segmentation embeddings $\\varepsilon_{Q}\\in \\mathbb{R}^{C_{Q}\\times N}$ is computed by", + "bbox": [ + 496, + 369, + 893, + 551 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\varepsilon_ {Q} = \\operatorname {A u d i o D e c o d e r} _ {\\times 3} \\left(Q ^ {\\prime}, F _ {A}, F _ {A}\\right), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 566, + 559, + 890, + 575 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where AudioDecoder stands for our audio-aware decoder layer. Similar to [9, 16], the decoder generates all audio segmentation embeddings parallelly.", + "bbox": [ + 496, + 582, + 893, + 628 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Separated mask prediction. Through the above decoder, the $N$ audio segmentation embeddings $\\varepsilon_{Q}$ are converted to $N$ mask embeddings $\\varepsilon_{mask} \\in \\mathbb{R}^{C_{\\varepsilon} \\times N}$ through a MLP with two hidden layers, where dimension $C_{\\varepsilon}$ is identical to dimension of audio embeddings $\\varepsilon_{A} \\in \\mathbb{R}^{C_{\\varepsilon} \\times F \\times T}$ . Then each predicted mask $M_{k} \\in \\mathbb{R}^{F \\times T}$ of the separated sound spectrogram is generated by a dot-product between the corresponding mask embedding in $\\varepsilon_{mask}$ and audio embedding $\\varepsilon_{A}$ from the audio decoder. Finally, we multiply the sound mixture spectrogram $S_{mix}$ and the predicted mask $M_{k}$ to disentangle sound spectrogram $S_{k}$ for sound $s_{k}(t)$ by", + "bbox": [ + 496, + 643, + 893, + 810 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nS _ {k} = S _ {\\text {m i x}} \\odot M _ {k}, \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 630, + 816, + 890, + 833 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\odot$ denotes the element-wise multiplication operator. Ultimately, separated sound signal $s_k(t)$ is produced by applying inverse STFT to the separated spectrogram $S_{k}$ .", + "bbox": [ + 496, + 840, + 893, + 887 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "14678", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/1b63f6aea286d27ae18630fcc1832b89f90211b28f9777a416d1430a45904f23.jpg", + "image_caption": [ + "Figure 3. Audio prompts design. To generalize to new types of instruments/event classes, we propose to insert additional queries (audio prompts) to learn new audio prototypes for unseen classes. With this design, we only fine-tune the query embedding layer while keeping all the other parts of transformer backbone frozen." + ], + "image_footnote": [], + "bbox": [ + 78, + 89, + 467, + 167 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Training objective. Following [26, 89], we set our training objective as optimizing spectrogram masks. The ground truth ratio mask $M_{k}^{GT}$ of $k$ -th video is calculated as follows,", + "bbox": [ + 75, + 281, + 468, + 328 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nM _ {k} ^ {G T} (t, f) = \\frac {S _ {k} (t , f)}{S _ {m i x} (t , f)}, \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 183, + 337, + 468, + 369 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $(t,f)$ denotes time-frequency coordinates. We adopt per-pixel $L1$ loss [87] to optimize the overall sound separation network, sound separation loss $L_{sep}$ is defined as,", + "bbox": [ + 75, + 377, + 468, + 422 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nL _ {s e p} = \\sum_ {k = 1} ^ {K} \\left| \\left| M _ {k} - M _ {k} ^ {G T} \\right| \\right| _ {1}, \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 174, + 435, + 468, + 476 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $K$ denotes number of mixed sounds in $S_{mix}$ .", + "bbox": [ + 76, + 487, + 418, + 503 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.4. Tunable Queries as Audio Prompts", + "text_level": 1, + "bbox": [ + 76, + 512, + 382, + 529 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "With the flexible design of tunable queries as learnable prototypes, our pipeline is more friendly to generalizing to new types of instruments. Unlike previous methods that need to finetune the entire mask generation U-Net, we could insert additional queries (i.e., audio prompts) for the new instruments. Such a method enables us only need to finetune the query embedding layer for learning new audio query prototypes in Sec. 3.3 of our transformer architecture while keeping all cross-attention layers frozen (see Fig.3). Specifically, we add $L$ new audio prompts $P \\in \\mathbb{R}^{C_Q \\times L}$ to original pre-trained audio queries $Q \\in \\mathbb{R}^{C_Q \\times N}$ , then the query embedding layer for the prompted learnable prototypes $Q_{prompted} \\in \\mathbb{R}^{C_Q \\times (N + L)}$ is the only layer learnable in our transformer decoder, while keeping the transformer backbone frozen.", + "bbox": [ + 75, + 536, + 468, + 762 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 76, + 776, + 209, + 792 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Experimental Settings", + "text_level": 1, + "bbox": [ + 76, + 801, + 284, + 816 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Datasets. We perform experiments on three widely-used datasets: MUSIC [89], MUSIC-21 [88], and Audio-Visual Event (AVE) [29, 68]. MUSIC dataset spans 11 musical instrument categories: accordion, acoustic guitar, cello, clarinet, erhu, flute, saxophone, trumpet, tuba, violin,", + "bbox": [ + 75, + 824, + 468, + 900 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "and xylophone. This dataset is relatively clean, and sound sources are always within the scene, collected for the audio-visual sound separation task. We utilize 503 online available solo videos and split them into training/validation/testing sets with 453/25/25 videos from 11 different categories, respectively, following same settings as [66]. MUSIC-21 dataset [88] is an enlarged version of MUSIC [89], which contains 10 more common instrument categories: bagpipe, banjo, bassoon, congas, drum, electric bass, guzheng, piano, pipa, and ukulele. We utilize 1,092 available solo videos and split them into train/test sets with 894/198 videos respectively from 21 different categories. Note that we follow the same training/testing split as [23, 99]. AVE dataset is a general audio-visual learning dataset, covering 28 event classes such as animal behaviors, vehicles, and human activities. We follow the same setting as [99], and utilize 4143 videos from AVE [68] dataset.", + "bbox": [ + 496, + 90, + 890, + 348 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Baselines. For MUSIC dataset, we compare our method with four recent methods for sound separation. NMF-MFCC [64] is a non-learnable audio-only method, we consider reporting this result from [26, 58] on MUSIC test set. We also compare with two representative audio-visual sound separation baselines: Sound-of-Pixels [89] and Co-Separation [26]. We retrained these two methods with the same training data and split them as ours for a fair comparison. Finally, we compare our approach with a most recent publicly-available baseline CCoL [66], which has the same training setting as ours. For MUSIC-21 dataset, we compare our method with six recently proposed approaches: Sound-of-Pixels [89], Co-Separation [26], Sound-of-Motions [88], Music Gesture [23], TriBERT [58] and AMnet [99]. For [58], since $12.27\\%$ of the training samples are missing in their given training split, we consider their reported result as a baseline comparison. Finally, for AVE dataset, we compare our method with six state-of-the-art methods. Since we conduct our experiments with the same setting as AMnet [99], we report results from [99] for Multisensory [53], Sound-of-Pixels [89], Sound-of-Motions [88], Minus-Plus [79], Cascaded Opponent Filter [98] as baseline comparisons.", + "bbox": [ + 496, + 364, + 890, + 712 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Evaluation metrics. The sound separation performance is evaluated by the popular adopted mir_eval library [57] in terms of standard metrics: Signal to Distortion Ratio (SDR), Signal to Interference Ratio (SIR), and Signal to Artifact Ratio (SAR). SDR measures the combination of interference and artifacts, SIR measures interference, and SAR measures artifacts. For all three metrics, a higher value indicates better results.", + "bbox": [ + 496, + 731, + 890, + 851 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Implementation Details. For MUSIC [89] and MUSIC-21 [88] datasets, we sub-sample the audio at $11\\mathrm{kHz}$ , and each", + "bbox": [ + 500, + 869, + 890, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "14679", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/b640fbc59bf991253b8dfd81f79418df5009c6672c85148a0c4da3a770a7e2a8.jpg", + "image_caption": [ + "Figure 4. Human evaluation results for sound source separation on mixtures of different instrument types. Our system is able to separate sounds with better actual perceptual quality." + ], + "image_footnote": [], + "bbox": [ + 205, + 88, + 364, + 176 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/9618d924d53041221de055cf5aecac86821b51bc0fc7db5f8d851afc54ed96f0.jpg", + "image_caption": [ + "Figure 5. Visualization of audio query embeddings with t-SNE, different instrument categories are color-coded. Our audio queries have learned to cluster by different classes of sound." + ], + "image_footnote": [], + "bbox": [ + 117, + 260, + 423, + 412 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "audio sample is approximately 6 seconds. STFT is applied using a Hann window size of 1022 and a hop length of 256, yielding a $512 \\times 256$ Time-Frequency audio representation. It is then re-sampled on a log-frequency scale to obtain a magnitude spectrogram with $T$ , $F = 256$ . Detected objects in frames are resized to $256 \\times 256$ and randomly cropped to the size of $224 \\times 224$ . We set the video frame rate as 1 FPS, and randomly-selected three frames as input for the object detector. While for AVE [68] dataset, audio signal is sub-sampled at $22\\mathrm{kHz}$ , and we use the full frame rate(29.97 FPS). Other settings are the same as MUSIC except STFT hop length is set as 184, following [99].", + "bbox": [ + 75, + 503, + 468, + 685 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "For MUSIC dataset [89], we use the Faster R-CNN object detector pre-trained by [26] on Open Images [38]. For MUSIC-21 [88] and AVE [68] datasets, since additional musical and general classes are not covered for this object detector, we adopt a pre-trained Detic detector [96] based on CLIP [56] to detect the 10 more instruments in MUSIC-21 dataset [88] and 28 event classes in AVE dataset [68].", + "bbox": [ + 75, + 686, + 468, + 792 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We utilize 8 heads for all attention modules and select the maximum $N$ objects (number of queries) as 15, 25, and 30 for MUSIC, MUSIC-21 and AVE. The video encoder [19] and the object detector is pre-trained and kept frozen during training and inference. The multi-layer perception (MLP) for separated mask prediction has 2 hidden layers of 256 channels following [16]. Audio feature $F_{A}$ , motion feature", + "bbox": [ + 75, + 794, + 470, + 902 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/42a96aa078bdcd9978bcf18c671d5f06e38df317fe0ea9d895598e8f91ccf8c5.jpg", + "image_caption": [ + "Figure 6. Qualitative results on AVE test dataset. Beyond restricted musical instruments, our model is also able to handle general sound separation tasks (e.g. sounds of galloping race car and frying food on the first two rows; sounds of driving motorcycles and speeches on the last two rows)." + ], + "image_footnote": [], + "bbox": [ + 563, + 125, + 831, + 329 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "$F_{M}$ , object feature $F_{O}$ , and audio queries $Q$ have a channel dimension of 256. And we set the channel dimension of both audio embeddings $\\varepsilon_{A}$ and mask embeddings $\\varepsilon_{M}$ as 32. The epoch number is 80, and batch size is set to 8. We use AdamW [43] for the mask transformer with a weight decay of $10^{-4}$ and Adam for all other networks as optimizer selection. The learning rate of the transformer is set as $10^{-4}$ and decreases by multiplying 0.1 at 60-th epoch. We set the learning rate for other networks as $10^{-4}$ , decreased by multiplying 0.1 at 30-th and 50-th epoch, respectively. Training is conducted on 8 NVIDIA Titan V GPUs.", + "bbox": [ + 498, + 441, + 892, + 609 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Audio-Visual Sound Source Separation", + "text_level": 1, + "bbox": [ + 500, + 619, + 834, + 636 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Quantitative evaluation. Table. 1 demonstrates quantitative results for sound separation results against state-of-the-art methods on MUSIC dataset [89]. Our method outperforms baseline models in separation accuracy measured by all evaluation metrics. Our method outperforms the most recent publicly available state-ofthe-art algorithm [66] by $3.43\\mathrm{dB}$ in terms of SDR score. Regarding quantitative results on MUSIC21 dataset [88], we demonstrate the performance comparison in Table. 2. Again, our method outperforms baseline models in terms of SDR metric. Performance on the previous two datasets demonstrate our model's ability to disentangle musical sounds. To further verify the scalability of our proposed method to general audio-source separation problems, we perform quantitative comparisons on AVE dataset in Table. 3. As is demonstrated, we surpass the state-of-the-art algorithm [99] by $1.31\\mathrm{dB}$ in terms of SDR score. AVE is", + "bbox": [ + 496, + 643, + 890, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "14680", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/4d50ec237999731a25a363d661b0d9be33de93dd58d7b6423817469bb374d00b.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodsSDR↑SIR↑SAR↑
NMF-MFCC [64]0.925.686.84
Sound-of-Pixels [89]4.239.399.85
Co-Separation [26]6.5411.379.46
CCoL [66]7.7413.2211.54
iQuery (Ours)11.1715.8414.27
", + "bbox": [ + 102, + 88, + 439, + 200 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/bf47437b2911661dc98823f1b0ea170b970efdfd371f7e80cea9108783103a63.jpg", + "table_caption": [ + "Table 1. Audio-visual sound separation results on MUSIC. Best results in bold and second-best results in Blue." + ], + "table_footnote": [], + "table_body": "
MethodsSDR↑SIR↑SAR↑
Sound-of-Pixels [89]*7.5213.0111.53
Co-Separation [26]*7.6413.8011.30
Sound-of-Motions [88]*8.3114.8213.11
Music Gesture [23]*10.1215.81-
TriBERT [58]10.0917.4512.80
AMnet [99]*11.0818.0013.22
iQuery (Ours)11.1215.9814.16
", + "bbox": [ + 91, + 251, + 450, + 395 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "a general dataset containing scenes like male and female speeches, animal sounds, and vehicle sounds. This clearly shows our model's adaptivity to more general problems of sound source separation.", + "bbox": [ + 75, + 458, + 468, + 518 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Qualitative evaluation. Fig. 2 illustrates qualitative sound separation results on MUSIC dataset. It can be seen that our method disentangles sound sources cleaner and more accurately, with less \"muddy\" sound. Fig. 6 provides additional qualitative examples on AVE dataset, and this again illustrates our model's good performance on general sound source separation cases. Both qualitative and quantitative results verify the superiority of our designed sound query-based segmentation pipeline iQuery.", + "bbox": [ + 75, + 536, + 468, + 672 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Human evaluation. Our quantitative evaluation shows the superiority of our model compared with baseline models, however, studies [8] have shown that audio separation quality could not be truthfully determined purely by the widely used mir_eval [57] metrics. Due to this reason, we further conduct a subjective human evaluation to study the actual perceptual quality of sound-separation results. Specifically, we compare the sound separation result of our model and the publicly available best baseline model [66] on MUSIC [89]. We collected 50 testing samples for all 11 classes from the test set, and each testing sample contains separated sounds with a length of 6 seconds predicted by our model and baseline [66] for the same sound mixture. Ground truth sound is also provided for each sample as a", + "bbox": [ + 75, + 688, + 468, + 900 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/74494a1ab496ba3a5437483c3d6a37a7644e450b06d2d92a8843c6699843afb1.jpg", + "table_caption": [ + "Table 2. Audio-visual sound separation results on MUSIC-21. The results noted by * are obtained from [23, 99]." + ], + "table_footnote": [], + "table_body": "
MethodsSDR↑SIR↑SAR↑
Multisensory [53]*0.843.446.69
Sound-of-Pixels [89]*1.217.086.84
Sound-of-Motions [88]*1.487.417.39
Minus-Plus [79]*1.967.958.08
Cascaded Filter [98]*2.688.188.48
AMnet [99]*3.719.1511.00
iQuery (Ours)5.028.2112.32
", + "bbox": [ + 517, + 88, + 874, + 232 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/9324a1a91a701b0c13ff39d526fe07f1fd9c8657deec2e4d7dde2117cb718617.jpg", + "table_caption": [ + "Table 3. Audio-visual sound separation results on AVE. The results noted by * are obtained from [99]." + ], + "table_footnote": [], + "table_body": "
MethodsSDR↑SIR↑SAR↑
Sound-of-Pixels [89]4.118.179.84
Co-Separation [26]5.379.858.72
CCoL [66]6.7411.9410.22
iQuery (Ours)8.0411.6013.21
", + "bbox": [ + 526, + 277, + 862, + 375 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 4. Fine-tuning sound separation performance comparison. All methods are pretrained on MUSIC dataset without one particular instrument and then fine-tuned on this new data. Baseline models are tuned with whole network unfrozen, and we keep our transformer backbone frozen.", + "bbox": [ + 498, + 378, + 890, + 448 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "reference. The experiment is conducted by 40 participants separately. For each participant, the orders of our model and baseline [66] are randomly shuffled, and we ask the participant to answer \"Which sound separation result is more close to the ground truth audio?\" for each sample. Statistical results are shown in Fig. 4. Notably, our method significantly surpasses the compared baseline with a winning rate of $72.45\\%$ . This additionally demonstrate the better actual perceptual performance of our model.", + "bbox": [ + 496, + 474, + 890, + 612 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Learned Query Embedding. To visualize that our proposed model has indeed learned to disentangle different sound sources through learnable queries, we show t-SNE embeddings of our learnable queries in MUSIC test set [89]. As is shown in Fig. 5, our queries tend to cluster by different instrument classes, learning representative prototypes.", + "bbox": [ + 496, + 626, + 890, + 717 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3. Extendable Audio Prompt Fine-tuning", + "text_level": 1, + "bbox": [ + 500, + 726, + 831, + 742 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table. 4 evaluates our approach's generalization ability compared with previous methods. We conduct fine-tuning experiments by leave-one-out cross-validation. Baseline models are fine-tuned on the new instrument with all the networks structure unfrozen. With the design of audio prompts discussed in Sec. 3.4, we keep most of our transformer parameters frozen, only fine-tuning the query embedding layer, which has much fewer parameters (0.048% of the total parameters in Transformer).", + "bbox": [ + 496, + 750, + 890, + 883 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Fig. 7 (a) shows our performance with a varying num", + "bbox": [ + 517, + 885, + 890, + 901 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "14681", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/d1d29e0ea15b66cf5b7bbdbecab26f17454c6e4b80b4481fecbcfaf7ed81ca3d.jpg", + "image_caption": [ + "(a)" + ], + "image_footnote": [], + "bbox": [ + 86, + 98, + 259, + 200 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/885f6b465a1146a5887214e466db127fab6c7dd2d817c50b512258edd4f63142.jpg", + "image_caption": [ + "(b)", + "Figure 7. Fine-tuning curves of sound separation. (a) Fine-tuning with different number of unseen instrument classes on MUSIC. (b) Fine-tuning with different number of unseen event classes on AVE." + ], + "image_footnote": [], + "bbox": [ + 279, + 98, + 450, + 200 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "ber of new instrument classes for fine-tuning on MUSIC dataset. We hold out 1, 2, 4, and 6 instrument classes in the pre-training stage and fine-tune our method on these new classes with only the query embedding layer unfrozen. MUSIC dataset contains in total of 11 instruments. Notably, our method still yields good results when the network is only pre-trained on 5 instrument types, even fewer than the unseen classes. Fig. 7 (b) shows our model's fine-tuning performance on AVE dataset with a varying number of new event classes for fine-tuning. We follow the experimental setup on MUSIC, and hold out 2, 4, 6, 8, and 12 event classes for fine-tuning. This demonstrates our model's adaptivity in general sound separation cases.", + "bbox": [ + 75, + 290, + 468, + 486 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.4. Contrastive Verification", + "text_level": 1, + "bbox": [ + 76, + 498, + 295, + 512 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Our learnable query-prototypes network is designed to ensure cross-modality consistency and cross-instrument contrast. We assume these prototypes to draw samples of each particular sound class sample close and push away the different prototypes. The question is whether our network design with \"visually-named\" query trained in the \"Mix-and and-Separate\" can already achieve this goal? As an alternative, we design an auxiliary contrastive loss for verification: to maximize the cosine similarity of separated audio embedding $\\varepsilon_{A_k} = \\varepsilon_A \\odot M_k$ and the corresponding query embedding $Q_k$ in $Q$ , while minimizing the cosine similarity of separated audio embedding and other query embeddings $Q_n$ (where $n \\in [1, N], n \\neq k$ ). We optimize the cross-entropy losses of the cosine similarity scores to obtain contrastive loss $L_{contras}$ . To ensure the qualities of audio embedding $\\varepsilon_A$ and predicted mask $M_i$ are accurate enough, we use a hierarchical task learning strategy [44] to control weights for $L_{sep}$ and $L_{contras}$ at each epoch. The verification loss $L_{verify}$ is: $L_{verify} = w_{sep}(e) \\cdot L_{sep} + w_{contras}(e) \\cdot L_{contras}$ where $e$ denotes training epoch and $w(e)$ denotes loss weight.", + "bbox": [ + 75, + 522, + 470, + 840 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Ablations of auxiliary contrastive loss, shown in Table. 5, demonstrates that our existing design achieves better results without using explicit contrastive loss. This answers the", + "bbox": [ + 75, + 854, + 468, + 900 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/089b8d163d490edbcdc85c1970abf5b6493e6724bfb3a8b2bffad262bccd97c0.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ArchitectureSDR↑SIR↑SAR↑
w/o lrn.10.0514.2713.71
w/o adpt.10.8915.5114.14
w/ con. best11.0215.9114.10
Ours (w/o con)11.1715.8414.27
", + "bbox": [ + 557, + 89, + 834, + 179 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/009100f0961cfd4ba307198bfa4541bfaa8935c878d285d378a536e10f389171.jpg", + "table_caption": [ + "Table 5. Ablations on the auxiliary contrastive loss on MUSIC dataset. \"w/o lrn.\" denotes without learnable linear layer added to queries produced by Transformer decoder; \"w/o adpt.\" denotes that we use a fixed weight for auxiliary contrastive loss without the Hierarchical Task Learning strategy; \"w/ con. best\" denotes our best model design using auxiliary contrastive loss." + ], + "table_footnote": [], + "table_body": "
ArchitectureSDR↑SIR↑SAR↑
Random6.5810.7912.77
Self-audio10.5414.8114.23
Self-motion-audio10.6515.3713.96
Dual-stream10.4615.2513.79
Motion-self-audio11.1715.8414.27
", + "bbox": [ + 547, + 282, + 841, + 388 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 6. Ablations on the design of Transformer decoder.", + "bbox": [ + 516, + 392, + 874, + 405 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "question we raised, that our \"visually-named\" queries are already contrastive enough for sound disentanglement.", + "bbox": [ + 498, + 433, + 890, + 463 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.5. Ablations of Transformer decoder design", + "text_level": 1, + "bbox": [ + 500, + 474, + 849, + 489 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Ablation results of Transformer decoder design on $MUSIC$ dataset is shown in Table. 6. \"Random\" denotes randomly assigning object features to queries, its poor separation result verifies the importance of our \"visually-named\" queries. \"Self-audio\" means removing the motion cross attention layer, which confirms the effectiveness of adding the motion feature. We tried two baseline designs against our final selection \"Motion-self-audio\", as stated in Sec. 3.3. \"Self-motion-audio\" is a design that puts self-, motion cross-, and audio cross-attention in a single decoder layer. \"Dual-stream\" means we conduct motion and audio cross-attention in parallel then fuse in the decoder layer. Specific details are in the Supplemental material.", + "bbox": [ + 496, + 497, + 890, + 694 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 709, + 617, + 724 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We proposed an audio-visual separation method using an adaptable query-based audio mask transformer network. Our network disentangles different sound sources explicitly through learnable audio prototypes initiated by \"visually naming\". We demonstrate cross-modal consistency and cross-instrument contrast via a multi-modal cross-attention mechanism. When generalizing to new unseen classes, our method can be adapted by inserting additional queries as audio prompts while freezing the attention mechanism. Experiments on both musical and general sound datasets demonstrate performance gain by our iQuery.", + "bbox": [ + 496, + 734, + 890, + 900 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "14682", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Triantafyllos Afouras, Joon Son Chung, and Andrew Zisserman. The conversation: Deep audio-visual speech enhancement. arXiv preprint arXiv:1804.04121, 2018. 2", + "[2] Triantafyllos Afouras, Joon Son Chung, and Andrew Zisserman. My lips are concealed: Audio-visual speech enhancement through obstructions. arXiv preprint arXiv:1907.04975, 2019. 2", + "[3] Triantafyllos Afouras, Andrew Owens, Joon Son Chung, and Andrew Zisserman. Self-supervised learning of audiovisual objects from video. In European Conference on Computer Vision (ECCV), pages 208-224, 2020. 1", + "[4] Relja Arandjelovic and Andrew Zisserman. Look, listen and learn. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 609-617, 2017. 2", + "[5] Relja Arandjelovic and Andrew Zisserman. Objects that sound. In Proceedings of the European conference on computer vision (ECCV), pages 435-451, 2018. 2", + "[6] Anurag Arnab, Mostafa Dehghani, Georg Heigold, Chen Sun, Mario Lucic, and Cordelia Schmid. Vivit: A video vision transformer. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 6836-6846, 2021. 3", + "[7] Hyojin Bahng, Ali Jahanian, Swami Sankaranarayanan, and Phillip Isola. Visual prompting: Modifying pixel space to adapt pre-trained models. arXiv preprint arXiv:2203.17274, 2022. 2", + "[8] Estefanía Cano, Derry FitzGerald, and Karlheinz Brandenburg. Evaluation of quality of sound source separation algorithms: Human perception vs quantitative metrics. In 2016 24th European Signal Processing Conference (EUSIPCO), pages 1758-1762. IEEE, 2016. 7", + "[9] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In European Conference on Computer Vision (ECCV), pages 213-229. Springer, 2020. 3, 4", + "[10] Joao Carreira and Andrew Zisserman. Quo vadis, action recognition? a new model and the kinetics dataset. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 6299-6308, 2017. 4", + "[11] British Chandna, Marius Miron, Jordi Janer, and Emilia Gómez. Monoaural audio source separation using deep convolutional neural networks. In International conference on latent variable analysis and signal separation, pages 258-266. Springer, 2017. 2", + "[12] Moitreya Chatterjee, Narendra Ahuja, and Anoop Cherian. Learning audio-visual dynamics using scene graphs for audio source separation. In Advances in Neural Information Processing Systems (NeurIPS), 2022. 2", + "[13] Moitreya Chatterjee, Jonathan Le Roux, Narendra Ahuja, and Anoop Cherian. Visual scene graphs for audio source separation. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 1204-1213, 2021. 2", + "[14] Honglie Chen, Weidi Xie, Triantafyllos Afouras, Arsha Nagrani, Andrea Vedaldi, and Andrew Zisserman. Localizing visual sounds the hard way. In IEEE/CVF Conference on" + ], + "bbox": [ + 86, + 116, + 468, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Computer Vision and Pattern Recognition (CVPR), pages 16867-16876, 2021. 2", + "[15] Bowen Cheng, Ishan Misra, Alexander G Schwing, Alexander Kirillov, and Rohit Girdhar. Masked-attention mask transformer for universal image segmentation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 1290–1299, 2022. 1, 3", + "[16] Bowen Cheng, Alex Schwing, and Alexander Kirillov. Per-pixel classification is not all you need for semantic segmentation. Advances in Neural Information Processing Systems (NeurIPS), 34, 2021. 1, 3, 4, 6", + "[17] Ying Cheng, Ruize Wang, Zhihao Pan, Rui Feng, and Yuejie Zhang. Look, listen, and attend: Co-attention network for self-supervised audio-visual representation learning. In Proceedings of the 28th ACM International Conference on Multimedia, pages 3884–3892, 2020. 1", + "[18] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018.3", + "[19] Shuangrui Ding, Maomao Li, Tianyu Yang, Rui Qian, Haohang Xu, Qingyi Chen, Jue Wang, and Hongkai Xiong. Motion-aware contrastive video representation learning via foreground-background merging. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 9716-9726, 2022. 4, 6", + "[20] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929, 2020.3", + "[21] Ariel Ephrat, Inbar Mosseri, Oran Lang, Tali Dekel, Kevin Wilson, Avinatan Hassidim, William T Freeman, and Michael Rubinstein. Looking to listen at the cocktail party: A speaker-independent audio-visual model for speech separation. arXiv preprint arXiv:1804.03619, 2018. 2, 3", + "[22] Cédric Févotte, Nancy Bertin, and Jean-Louis Durrieu. Nonnegative matrix factorization with the itakura-saito divergence: With application to music analysis. Neural computation, 21(3):793-830, 2009. 2", + "[23] Chuang Gan, Deng Huang, Hang Zhao, Joshua B Tenenbaum, and Antonio Torralba. Music gesture for visual sound separation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 10478-10487, 2020. 1, 3, 4, 5, 7", + "[24] Ruohan Gao, Rogerio Feris, and Kristen Grauman. Learning to separate object sounds by watching unlabeled video. In European Conference on Computer Vision (ECCV), pages 35-53, 2018. 2", + "[25] Ruohan Gao and Kristen Grauman. 2.5 d visual sound. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 324-333, 2019. 2", + "[26] Ruohan Gao and Kristen Grauman. Co-separating sounds of visual objects. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 3879-3888, 2019. 1, 2, 4, 5, 6, 7" + ], + "bbox": [ + 509, + 93, + 890, + 898 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "14683", + "bbox": [ + 480, + 945, + 517, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[27] Ruohan Gao and Kristen Grauman. Visualvoice: Audiovisual speech separation with cross-modal consistency. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 15490-15500. IEEE, 2021. 1, 2, 3", + "[28] Tianyu Gao, Adam Fisch, and Danqi Chen. Making pretrained language models better few-shot learners. arXiv preprint arXiv:2012.15723, 2020. 2", + "[29] Jort F Gemmeke, Daniel PW Ellis, Dylan Freedman, Aren Jansen, Wade Lawrence, R Channing Moore, Manoj Plakal, and Marvin Ritter. Audio set: An ontology and human-labeled dataset for audio events. In IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 776-780. IEEE, 2017. 5", + "[30] Daniel Griffin and Jae Lim. Signal estimation from modified short-time fourier transform. IEEE Transactions on acoustics, speech, and signal processing, 32(2):236-243, 1984. 4", + "[31] Simon Haykin and Zhe Chen. The cocktail party problem. Neural computation, 17(9):1875-1902, 2005. 2", + "[32] John R Hershey, Zhuo Chen, Jonathan Le Roux, and Shinji Watanabe. Deep clustering: Discriminative embeddings for segmentation and separation. In IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 31-35. IEEE, 2016. 1", + "[33] Di Hu, Feiping Nie, and Xuelong Li. Deep multimodal clustering for unsupervised audiovisual learning. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 9248-9257, 2019. 1", + "[34] Po-Sen Huang, Minje Kim, Mark Hasegawa-Johnson, and Paris Smaragdis. Joint optimization of masks and deep recurrent neural networks for monaural source separation. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 23(12):2136-2147, 2015. 1", + "[35] Menglin Jia, Luming Tang, Bor-Chun Chen, Claire Cardie, Serge Belongie, Bharath Hariharan, and Ser-Nam Lim. Visual prompt tuning. In European Conference on Computer Vision (ECCV), pages 709-727. Springer, 2022. 2", + "[36] Einat Kidron, Yoav Y Schechner, and Michael Elad. Pixels that sound. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), volume 1, pages 88-95. IEEE, 2005. 2", + "[37] Kevin Kilgour, Beat Gfeller, Qingqing Huang, Aren Jansen, Scott Wisdom, and Marco Tagliasacchi. Text-driven separation of arbitrary sounds. arXiv preprint arXiv:2204.05738, 2022. 2", + "[38] Ivan Krasin, Tom Duerig, Neil Alldrin, Vittorio Ferrari, Sami Abu-El-Haija, Alina Kuznetsova, Hassan Rom, Jasper Uijlings, Stefan Popov, Andreas Veit, et al. Openimages: A public dataset for large-scale multi-label and multi-class image classification. Dataset available from https://github.com/openimages, 2(3):18, 2017. 6", + "[39] Jiyoung Lee, Soo-Whan Chung, Sunok Kim, Hong-Goo Kang, and Kwanghoon Sohn. Looking into your speech: Learning cross-modal affinity for audio-visual speech separation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 1336–1345, 2021. 2, 3" + ], + "bbox": [ + 86, + 92, + 470, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[40] Jie Hwan Lee, Hyeong-Seok Choi, and Kyogu Lee. Audio query-based music source separation. arXiv preprint arXiv:1908.06593, 2019. 2", + "[41] Dongze Lian, Daquan Zhou, Jiashi Feng, and Xinchao Wang. Scaling & shifting your features: A new baseline for efficient model tuning. In Advances in Neural Information Processing Systems (NeurIPS), 2022. 2", + "[42] Ze Liu, Jia Ning, Yue Cao, Yixuan Wei, Zheng Zhang, Stephen Lin, and Han Hu. Video swim transformer. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3202-3211, 2022. 3", + "[43] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017. 6", + "[44] Yan Lu, Xinzhu Ma, Lei Yang, Tianzhu Zhang, Yating Liu, Qi Chu, Junjie Yan, and Wanli Ouyang. Geometry uncertainty projection network for monocular 3d object detection. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 3111-3121, 2021. 8", + "[45] Sagnik Majumder, Ziad Al-Halah, and Kristen Grauman. Move2hear: Active audio-visual source separation. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 275–285, 2021. 2", + "[46] Sagnik Majumder and Kristen Grauman. Active audiovisual separation of dynamic sound sources. In European Conference on Computer Vision (ECCV), pages 551-569. Springer, 2022. 2", + "[47] Mingyuan Mao, Renrui Zhang, Honghui Zheng, Teli Ma, Yan Peng, Errui Ding, Baochang Zhang, Shumin Han, et al. Dual-stream network for visual recognition. Advances in Neural Information Processing Systems (NeurIPS), 34:25346-25358, 2021. 3", + "[48] Josh H McDermott. The cocktail party problem. Current Biology, 19(22):R1024-R1027, 2009. 2", + "[49] Otniel-Bogdan Mercea, Thomas Hummel, A Koepke, and Zeynep Akata. Temporal and cross-modal attention for audio-visual zero-shot learning. In European Conference on Computer Vision (ECCV), pages 488–505. Springer, 2022. 2", + "[50] Ishan Misra, Rohit Girdhar, and Armand Joulin. An end-to-end transformer model for 3d object detection. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 2906-2917, 2021. 3", + "[51] Juan F Montesinos, Venkatesh S Kadandale, and Gloria Haro. Vovit: Low latency graph-based audio-visual voice separation transformer. In European Conference on Computer Vision (ECCV), pages 310–326. Springer, 2022. 2", + "[52] Pedro Morgado, Nuno Nvasconcelos, Timothy Langlois, and Oliver Wang. Self-supervised generation of spatial audio for 360 video. Advances in Neural Information Processing Systems (NeurIPS), 31, 2018. 2", + "[53] Andrew Owens and Alexei A Efros. Audio-visual scene analysis with self-supervised multisensory features. In European Conference on Computer Vision (ECCV), pages 631–648, 2018. 1, 2, 5, 7", + "[54] Andrew Owens, Phillip Isola, Josh McDermott, Antonio Torralba, Edward H Adelson, and William T Freeman. Visually indicated sounds. In IEEE/CVF Conference on Com" + ], + "bbox": [ + 509, + 92, + 890, + 900 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "14684", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "puter Vision and Pattern Recognition (CVPR), pages 2405-2413, 2016. 2", + "[55] Rui Qian, Di Hu, Heinrich Dinkel, Mengyue Wu, Ning Xu, and Weiyao Lin. Multiple sound sources localization from coarse to fine. In European Conference on Computer Vision (ECCV), pages 292-308. Springer, 2020. 2", + "[56] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International Conference on Machine Learning (ICML), pages 8748-8763. PMLR, 2021. 6", + "[57] Colin Raffel, Brian McFee, Eric J Humphrey, Justin Salamon, Oriol Nieto, Dawen Liang, Daniel PW Ellis, and C Colin Raffel. mir.eval: A transparent implementation of common mir metrics. In *In Proceedings of the 15th International Society for Music Information Retrieval Conference*, ISMIR. CiteSeer, 2014. 5, 7", + "[58] Tanzila Rahman, Mengyu Yang, and Leonid Sigal. Tribert: Full-body human-centric audio-visual representation learning for visual sound separation. arXiv preprint arXiv:2110.13412, 2021. 1, 3, 4, 5, 7", + "[59] Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-net: Convolutional networks for biomedical image segmentation. In International Conference on Medical image computing and computer-assisted intervention, pages 234-241. Springer, 2015. 4", + "[60] Sam Roweis. One microphone source separation. Advances in Neural Information Processing Systems (NeurIPS), 13, 2000. 1", + "[61] Arda Senocak, Tae-Hyun Oh, Junsik Kim, Ming-Hsuan Yang, and In So Kweon. Learning to localize sound source in visual scenes. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4358-4366, 2018. 2", + "[62] Bowen Shi, Wei-Ning Hsu, Kushal Lakhotia, and Abdelrahman Mohamed. Learning audio-visual speech representation by masked multimodal cluster prediction. In International Conference on Learning Representations (ICLR), 2022. 2", + "[63] Zengjie Song, Yuxi Wang, Junsong Fan, Tieniu Tan, and Zhaoxiang Zhang. Self-supervised predictive learning: A negative-free method for sound source localization in visual scenes. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3222-3231, 2022. 2", + "[64] Martin Spiertz and Volker Gnann. Source-filter based clustering for monaural blind source separation. In Proceedings of the 12th International Conference on Digital Audio Effects, volume 4, 2009. 5, 7", + "[65] Robin Strudel, Ricardo Garcia, Ivan Laptev, and Cordelia Schmid. Segmenter: Transformer for semantic segmentation. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 7262-7272, 2021. 1, 3", + "[66] Yapeng Tian, Di Hu, and Chenliang Xu. Cyclic co-learning of sounding object visual grounding and sound separation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2745-2754, 2021. 2, 3, 4, 5, 6, 7" + ], + "bbox": [ + 86, + 92, + 468, + 898 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[67] Yapeng Tian, Dingzeyu Li, and Chenliang Xu. Unified multisensory perception: Weakly-supervised audio-visual video parsing. In European Conference on Computer Vision (ECCV), pages 436–454. Springer, 2020. 2", + "[68] Yapeng Tian, Jing Shi, Bochen Li, Zhiyao Duan, and Chenliang Xu. Audio-visual event localization in unconstrained videos. In European Conference on Computer Vision (ECCV), pages 247–263, 2018. 2, 5, 6", + "[69] Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, and Hervé Jégou. Training data-efficient image transformers & distillation through attention. In International Conference on Machine Learning (ICML), pages 10347-10357. PMLR, 2021. 3", + "[70] Thanh-Dat Truong, Chi Nhan Duong, Hoang Anh Pham, Bhiksha Raj, Ngan Le, Khoa Luu, et al. The right to talk: An audio-visual transformer approach. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 1105–1114, 2021. 2", + "[71] Efthymios Tzinis, Scott Wisdom, Aren Jansen, Shawn Hershey, Tal Remez, Dan Ellis, and John R Hershey. Into the wild with audioscope: Unsupervised audio-visual separation of on-screen sounds. In International Conference on Learning Representations (ICLR), 2020. 2, 3", + "[72] Efthymios Tzinis, Scott Wisdom, Tal Remez, and John R Hershey. Audioscopev2: Audio-visual attention architectures for calibrated open-domain on-screen sound separation. In European Conference on Computer Vision (ECCV), pages 368–385. Springer, 2022. 2, 3", + "[73] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in Neural Information Processing Systems (NeurIPS), 30, 2017. 3, 4", + "[74] Tuomas Virtanen. Monaural sound source separation by nonnegative matrix factorization with temporal continuity and sparseness criteria. IEEE transactions on audio, speech, and language processing, 15(3):1066-1074, 2007. 1", + "[75] Ho-Hsiang Wu, Prem Seetharaman, Kundan Kumar, and Juan Pablo Bello. Wav2clip: Learning robust audio representations from clip. In IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 4563-4567. IEEE, 2022. 2", + "[76] Yu Wu, Linchao Zhu, Yan Yan, and Yi Yang. Dual attention matching for audio-visual event localization. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 6292–6300, 2019. 2", + "[77] Yan Xia and Zhou Zhao. Cross-modal background suppression for audio-visual event localization. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 19989-19998, 2022. 2", + "[78] Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M Alvarez, and Ping Luo. Segformer: Simple and efficient design for semantic segmentation with transformers. Advances in Neural Information Processing Systems (NeurIPS), 34, 2021. 1, 3", + "[79] Xudong Xu, Bo Dai, and Dahua Lin. Recursive visual sound separation using minus-plus net. In IEEE/CVF In" + ], + "bbox": [ + 509, + 92, + 890, + 900 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "14685", + "bbox": [ + 480, + 945, + 517, + 955 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "ternational Conference on Computer Vision (ICCV), pages 882-891, 2019. 2, 5, 7", + "[80] Xudong Xu, Hang Zhou, Ziwei Liu, Bo Dai, Xiaogang Wang, and Dahua Lin. Visually informed binaural audio generation without binaural audios. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 15485-15494, 2021. 2", + "[81] Dong Yu, Morten Kolbaek, Zheng-Hua Tan, and Jesper Jensen. Permutation invariant training of deep models for speaker-independent multi-talker speech separation. In IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 241-245. IEEE, 2017. 1", + "[82] Li Yuan, Yunpeng Chen, Tao Wang, Weihao Yu, Yujun Shi, Zi-Hang Jiang, Francis EH Tay, Jiashi Feng, and Shuicheng Yan. Tokens-to-token vit: Training vision transformers from scratch onImagenet. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 558-567, 2021. 3", + "[83] Rowan Zellers, Jiasen Lu, Ximing Lu, Youngjae Yu, Yanpeng Zhao, Mohammadreza Salehi, Aditya Kusupati, Jack Hessel, Ali Farhadi, and Yejin Choi. Merlot reserve: Neural script knowledge through vision and language and sound. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 16375-16387, 2022. 2", + "[84] Renrui Zhang, Ziyu Guo, Peng Gao, Rongyao Fang, Bin Zhao, Dong Wang, Yu Qiao, and Hongsheng Li. Pointm2ae: Multi-scale masked autoencoders for hierarchical point cloud pre-training. Advances in Neural Information Processing Systems (NeurIPS), 2022. 3", + "[85] Renrui Zhang, Han Qiu, Tai Wang, Xuanzhuo Xu, Ziyu Guo, Yu Qiao, Peng Gao, and Hongsheng Li. Monodetr: Depth-aware transformer for monocular 3d object detection. arXiv preprint arXiv:2203.13310, 2022. 3", + "[86] Yuanhan Zhang, Kaiyang Zhou, and Ziwei Liu. Neural prompt search. arXiv preprint arXiv:2206.04673, 2022. 2", + "[87] Hang Zhao, Orazio Gallo, Iuri Frosio, and Jan Kautz. Loss functions for image restoration with neural networks. IEEE Transactions on computational imaging, 3(1):47-57, 2016. 5", + "[88] Hang Zhao, Chuang Gan, Wei-Chiu Ma, and Antonio Torralba. The sound of motions. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 1735-1744, 2019. 1, 3, 4, 5, 6, 7", + "[89] Hang Zhao, Chuang Gan, Andrew Rouditchenko, Carl Vondrick, Josh McDermott, and Antonio Torralba. The sound of pixels. In European Conference on Computer Vision (ECCV), pages 570-586, 2018. 1, 2, 3, 4, 5, 6, 7", + "[90] Hengshuang Zhao, Li Jiang, Jiaya Jia, Philip HS Torr, and Vladlen Koltun. Point transformer. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 16259-16268, 2021. 3", + "[91] Minghang Zheng, Peng Gao, Renrui Zhang, Kunchang Li, Xiaogang Wang, Hongsheng Li, and Hao Dong. End-to-end object detection with adaptive clustering transformer. arXiv preprint arXiv:2011.09315, 2020. 3", + "[92] Sixiao Zheng, Jiachen Lu, Hengshuang Zhao, Xiatian Zhu, Zekun Luo, Yabiao Wang, Yanwei Fu, Jianfeng Feng, Tao" + ], + "bbox": [ + 86, + 92, + 470, + 900 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Xiang, Philip HS Torr, et al. Rethinking semantic segmentation from a sequence-to-sequence perspective with transformers. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 6881-6890, 2021. 3", + "[93] Bolei Zhou, Aditya Khosla, Agata Lapedriza, Aude Oliva, and Antonio Torralba. Learning deep features for discriminative localization. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2921-2929, 2016. 2", + "[94] Dongzhan Zhou, Xinchi Zhou, Di Hu, Hang Zhou, Lei Bai, Ziwei Liu, and Wanli Ouyang. Sepfusion: Finding optimal fusion structures for visual sound separation. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 36, pages 3544-3552, 2022. 3", + "[95] Jinxing Zhou, Liang Zheng, Yiran Zhong, Shijie Hao, and Meng Wang. Positive sample propagation along the audiovisual event line. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 8436-8444, 2021. 2", + "[96] Xingyi Zhou, Rohit Girdhar, Armand Joulin, Philipp Krähenbuhl, and Ishan Misra. Detecting twenty-thousand classes using image-level supervision. In European Conference on Computer Vision (ECCV), pages 350-368. Springer, 2022. 6", + "[97] Yipin Zhou, Zhaowen Wang, Chen Fang, Trung Bui, and Tamara L Berg. Visual to sound: Generating natural sound for videos in the wild. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3550-3558, 2018. 2", + "[98] Lingyu Zhu and Esa Rahtu. Visually guided sound source separation using cascaded opponent filter network. In Proceedings of the Asian Conference on Computer Vision, 2020. 5, 7", + "[99] Lingyu Zhu and Esa Rahtu. Visually guided sound source separation and localization using self-supervised motion representations. In IEEE/CVF Winter Conference on Applications of Computer Vision (WACV), pages 1289-1299, 2022. 1, 3, 4, 5, 6, 7", + "[100] Xizhou Zhu, Weijie Su, Lewei Lu, Bin Li, Xiaogang Wang, and Jifeng Dai. Deformable detr: Deformable transformers for end-to-end object detection. arXiv preprint arXiv:2010.04159, 2020. 3" + ], + "bbox": [ + 503, + 92, + 890, + 681 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "14686", + "bbox": [ + 480, + 945, + 519, + 955 + ], + "page_idx": 11 + } +] \ No newline at end of file diff --git a/2023/iQuery_ Instruments As Queries for Audio-Visual Sound Separation/79c43f28-c9f8-4b22-8dca-1b2b47c85f07_model.json b/2023/iQuery_ Instruments As Queries for Audio-Visual Sound Separation/79c43f28-c9f8-4b22-8dca-1b2b47c85f07_model.json new file mode 100644 index 0000000000000000000000000000000000000000..9cae5d7a0133f725ac24370c7eb63b28205b7c1d --- /dev/null +++ b/2023/iQuery_ Instruments As Queries for Audio-Visual Sound Separation/79c43f28-c9f8-4b22-8dca-1b2b47c85f07_model.json @@ -0,0 +1,2600 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.808, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.144, + 0.131, + 0.826, + 0.154 + ], + "angle": 0, + "content": "iQuery: Instruments as Queries for Audio-Visual Sound Separation" + }, + { + "type": "text", + "bbox": [ + 0.153, + 0.18, + 0.816, + 0.2 + ], + "angle": 0, + "content": "Jiaben Chen\\(^{1}\\), Renrui Zhang\\(^{2}\\), Dongze Lian\\(^{3}\\), Jiaqi Yang\\(^{4}\\), Ziyao Zeng\\(^{4}\\), Jianbo Shi\\(^{5}\\)" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.209, + 0.859, + 0.247 + ], + "angle": 0, + "content": "\\(^{1}\\)UC San Diego \\(^{2}\\)The Chinese University of Hong Kong \\(^{3}\\)National University of Singapore \\(^{4}\\)ShanghaiTech University \\(^{5}\\)University of Pennsylvania" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.287, + 0.314, + 0.303 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.318, + 0.474, + 0.651 + ], + "angle": 0, + "content": "Current audio-visual separation methods share a standard architecture design where an audio encoder-decoder network is fused with visual encoding features at the encoder bottleneck. This design confounds the learning of multi-modal feature encoding with robust sound decoding for audio separation. To generalize to a new instrument, one must fine-tune the entire visual and audio network for all musical instruments. We re-formulate the visual-sound separation task and propose Instruments as Queries (iQuery) with a flexible query expansion mechanism. Our approach ensures cross-modal consistency and cross-instrument disentanglement. We utilize \"visually named\" queries to initiate the learning of audio queries and use cross-modal attention to remove potential sound source interference at the estimated waveforms. To generalize to a new instrument or event class, drawing inspiration from the text-prompt design, we insert additional queries as audio prompts while freezing the attention mechanism. Experimental results on three benchmarks demonstrate that our iQuery improves audio-visual sound source separation performance. Code is available at https://github.com/JiabenChen/iQuery." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.68, + 0.21, + 0.696 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.705, + 0.471, + 0.902 + ], + "angle": 0, + "content": "Humans use multi-modal perception to understand complex activities. To mimic this skill, researchers have studied audio-visual learning [3, 17, 33] by exploiting the synchronization and correlation between auditory and visual information. In this paper, we focus on the sound source separation task, where we aim to identify and separate different sound components within a given sound mixture [60, 74]. Following the \"Mix-and-Separate\" framework [32, 34, 81], we learn to separate sounds by mixing multiple audio signals to generate an artificially complex auditory representation and then use it as a self-supervised task to separate individual sounds from the mixture. The works [26, 53, 89] showed that visually-guided sound separation is achievable" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.289, + 0.854, + 0.304 + ], + "angle": 0, + "content": "by leveraging visual information of the sound source." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.306, + 0.893, + 0.518 + ], + "angle": 0, + "content": "Prevalent architectures take a paradigm of a visual-conditioned encoder-decoder architecture [23, 26, 58, 88], where encoded features from audio and visual modalities are fused at the bottleneck for decoding to yield separated spectrogram masks. However, it is noticed that this design often creates a \"muddy\" sound and \"cross-talk\" that leaks from one instrument to another. To create a clean sound separation, one would like the audio-visual encoders to be (1) self-consistent within the music instrument and (2) contrasting across. One approach [27] added critic functions explicitly to enforce these properties. Another method [99] used a two-step process with the second motion-conditioned generation process to filter out unwanted cross-talks. We call these approaches decoder-centric." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.521, + 0.895, + 0.718 + ], + "angle": 0, + "content": "Most recent works focus on addressing the \"muddy\" and \"cross-talk\" issue by improving fine details of audio-visual feature extraction: for example, adding human motion encoding as in [23, 88, 99], or cross-modality representations [58] via self-supervised learning. Once the feature representations are learned, the standard encoder-decoder FCN style segmentation is used as an afterthought. We consider these methods feature-centric. The standard designs have two limitations. First, it is hard to balance decoder-centric and feature-centric approaches that enforce a common goal of cross-modality consistency and cross-instrument contrast. Second, to learn a new musical instrument, one has to retrain the entire network via self-supervision." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.72, + 0.895, + 0.902 + ], + "angle": 0, + "content": "To tackle these limitations, we propose a query-based sound separation framework, iQuery. We recast this problem from a query-based transformer segmentation view, where each query learns to segment one instrument, similar to visual segmentation [15, 16, 65, 78]. We treat each audio query as a learnable prototype that parametrically models one sound class. We fuse visual modality with audio by \"visually naming\" the audio query: using object detection to assign visual features to the corresponding audio query. Within the transformer decoder, the visually initialized queries interact with the audio features through cross-attention, thus ensuring cross-modality consistency. Self" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "14675" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.12, + 0.092, + 0.852, + 0.351 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.362, + 0.892, + 0.446 + ], + "angle": 0, + "content": "Figure 1. Pipeline of iQuery. Our system takes as input an audio mixture and its corresponding video frames, and disentangles separated sound sources for each video. Our pipeline consists of two main modules: an Audio-Visual Feature Extraction module which extracts audio, object, and motion features through three corresponding encoders, and an Audio-Visual Transformer module for sound separation. The query-based sound separation transformer has three key components: 1) \"visually-named\" audio queries are initialized by extracted object features, 2) cross-attention between the audio queries with static image features, dynamic motion features and audio features, 3) self-attention between the learned audio queries to ensure cross-instrument contrast." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.473, + 0.469, + 0.532 + ], + "angle": 0, + "content": "attention across the audio queries for different instruments implements a soft version of the cross-instrument contrast objective. With this design, we unify the feature-centric with the decoder-centric approach." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.534, + 0.469, + 0.683 + ], + "angle": 0, + "content": "How do we achieve generalizability? Motivated by recent success in fine-tuning domain transfer with the text-prompt [28] and visual-prompt designs [7, 35, 41, 86], we adaptively insert the additional queries as audio prompts to accommodate new instruments. With the audio-prompt design, we freeze most of the transformer network parameters and only fine-tune the newly added query embedding layer. We conjecture that the learned prototype queries are instrument-dependent, while the cross/self-attention mechanism in the transformer is instrument-independent." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.685, + 0.283, + 0.699 + ], + "angle": 0, + "content": "Our main contributions are:" + }, + { + "type": "text", + "bbox": [ + 0.096, + 0.705, + 0.469, + 0.78 + ], + "angle": 0, + "content": "- To the best of our knowledge, we are the first to study the audio-visual sound separation problem from a tunable query view to disentangle different sound sources explicitly through learnable audio prototypes in a mask transformer architecture." + }, + { + "type": "text", + "bbox": [ + 0.096, + 0.788, + 0.469, + 0.832 + ], + "angle": 0, + "content": "- To generalize to a new sound class, we design an audio prompt for fine-tuning with most of the transformer architecture frozen." + }, + { + "type": "text", + "bbox": [ + 0.096, + 0.841, + 0.469, + 0.901 + ], + "angle": 0, + "content": "- Extensive experiments and ablations verify the effectiveness of our core designs for disentangle-. ment, demonstrating performance gain for audiovisual sound source separation on three benchmarks." + }, + { + "type": "list", + "bbox": [ + 0.096, + 0.705, + 0.469, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.471, + 0.637, + 0.487 + ], + "angle": 0, + "content": "2. Related work" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.508, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Audio-Visual Sound Source Separation. Recent years have witnessed promising results of audiovisual multi-modality joint learning [49, 62, 67, 75, 83] in domains like audio-visual sound source localization [4, 5, 14, 36, 55, 61, 63, 93], audio-visual event localization [68, 76, 77, 95] and sound synthesis from videos [25, 52, 54, 80, 97]. Sound source separation, a challenging classical problem, has been researched extensively in the audio signal processing area [11, 22, 37, 40]. A well-known example is the cocktail party problem [31, 48] in speech domain [1, 21]. Works have been proposed recently for tasks like speech separation [2, 27, 39, 51, 70], active sound separation [45, 46] and on-screen sound separation [25, 53, 71, 72]. Our work focuses on audio-visual sound separation. Recent audio-visual sound separation methods could be classified generally into two categories: feature-centric and decoder-centric as discussed in Sec. 1. Feature-centric methods exploit various ways for visual feature extraction selection to aid this multi-modality task. Some works consider frame-based appearance features (static frame features [24, 79, 89] or detected object regions [26, 66]) for extracting visual semantic cues (e.g., instrument categories) to guide sound separation. [12, 13] adds embeddings from an audio-visual scene graph at the U-Net bottleneck to model the visual context of sound sources. Based on the assessment that motion signals" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "14676" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.203, + 0.089, + 0.773, + 0.33 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.35, + 0.895, + 0.409 + ], + "angle": 0, + "content": "Figure 2. Qualitative results on MUSIC test set. The first column shows the mixed video frames, the second to the fourth columns compare our predicted spectrogram masks against masks yielded by state-of-the-art algorithm [66] and ground truth masks, and the fifth to the seventh columns visualize separated spectrograms. [66] produces blurry masks and contains unseparated components from another sound source, while our system successfully generates accurate mask and clean spectrograms as the ground truth." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.433, + 0.473, + 0.63 + ], + "angle": 0, + "content": "could more tightly couple the moving sounding object with corresponding variations of sounds, recent approaches focus on including motion information into the pipeline (e.g., optical flow [88], and human pose [23,58]). Based on this, [94] proposes a framework to search for the optimal fusion strategy for multi-modal features. Decoder-centric methods explore prevention of \"cross-talk\" between the audio sources in the decoder stage. [99] designs a two-stage pipeline, where the second stage conducts a counterfactual synthesis through motion features to remove potentially leaked sound. The approach of [27] added critic functions explicitly to enforce cross-modal consistency and cross-instrument contrast." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.645, + 0.473, + 0.903 + ], + "angle": 0, + "content": "Vision Transformers. Motivated by transformer's success in natural language processing [73], transformers were first introduced in computer vision for image classification as ViT [20]. Given the superior long-range modeling capacity, many follow-up works [47, 69, 82] have upgraded ViT to achieve higher performance and widely surpassed convolutional neural networks. Further, transformer-based models are adopted for various downstream tasks, such as 2D object detection [9, 91, 100], semantic/instance segmentation [65, 78, 92], 3D object detection [50, 85], shape recognition [84, 90] and video understanding [6, 42]. Particularly, following the pipeline from DETR [9], MaskFormer [16] and Mask2Former [15] represent each mask candidate as a learnable query and conduct parallel decoding for instance-level segmentation. However, only few approaches [39, 58, 71, 72, 99] have extended transformer for audio-visual sound separation fields. [58] adopts a BERT" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.433, + 0.895, + 0.6 + ], + "angle": 0, + "content": "[18] architecture to learn visual, pose, and audio feature representations. [99] designs an audio-motion transformer to refine sound separation results through audio-motion feature fusion. These methods focus mainly on learning better contextualized multi-modality representations through an encoder transformer. In contrast, our mask transformer-based network focuses on the entire process of visual-audio separation task. We disentangle different sound sources through independent learnable query prototypes and segment each time-frequency region on the spectrogram via mask prediction in an end-to-end fashion." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.614, + 0.593, + 0.63 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.639, + 0.892, + 0.763 + ], + "angle": 0, + "content": "We first describe the formulation of the audio-visual sound separation task and introduce our pipeline iQuery briefly in Sec. 3.1. Then we introduce networks for learning representations from visual and audio modalities in Sec. 3.2 and our proposed cross-modality cross-attention transformer architecture for visual sound separation in Sec. 3.3. Finally, we introduce our adaptive query fine-tuning strategy through designs of flexible tunable queries in Sec. 3.4." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.771, + 0.611, + 0.787 + ], + "angle": 0, + "content": "3.1. Overview" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.795, + 0.895, + 0.904 + ], + "angle": 0, + "content": "As mentioned before, our goal is to disentangle the audio mixture concerning its corresponding sound sources in the given mixture by using so-called queries. Following previous works [21, 89], we adopt a commonly used \"Mix-and-Separate\" self-supervised source separation procedure. Given \\(K\\) video clips with accompanying audio signal: \\(\\{(V_k,s_k(t))\\}_{k\\in [1,K]}\\), we create a sound mixture:" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "14677" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.09, + 0.473, + 0.304 + ], + "angle": 0, + "content": "\\(s_{mix}(t) = \\sum_{k=1}^{K} s_k(t)\\) as training data. Our disentanglement goal is to separate sounds \\(s_k(t)\\) from \\(s_{mix}(t)\\) for sound sources in \\(V_k\\), respectively. The pipeline, as illustrated in Fig. 1, is mainly composed of two components: an Audio-Visual Feature Extraction module and a Mask Transformer-based Sound Separation module. First, in the feature extraction module, the object detector & image encoder, and video encoder extract object-level visual features and motion features from video clip \\(V_k\\). The audio network yields an audio feature and an audio embedding from the given sound mixture \\(s_{mix}(t)\\). After that, a cross-modal transformer decoder attends to visual and audio features and outputs audio mask embeddings, which are further combined with audio embeddings for sound separation." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.316, + 0.366, + 0.331 + ], + "angle": 0, + "content": "3.2. Audio-Visual Feature Extraction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.34, + 0.473, + 0.508 + ], + "angle": 0, + "content": "Object Detector & Image Encoder. To initialize learning of audio queries, we assign object-level visual appearance features to the corresponding queries, to create \"visually named\" queries. In the implementation, following [26], we use a Faster R-CNN object detector with ResNet-101 backbone. For frames in a given video clip \\( V_{k} \\), the object detector is utilized to acquire the detected objects set \\( O_{k} \\). After that, we adopt a pre-trained ResNet-18 similar to [66], followed by a linear layer and max pooling to yield object-level features \\( F_{O_k}\\in \\mathbb{R}^{C_O} \\), where \\( C_O \\) denotes channel dimension of object features." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.523, + 0.473, + 0.675 + ], + "angle": 0, + "content": "Video Encoder. The video encoder maps the video frames from \\( V_{k} \\in \\mathbb{R}^{3 \\times T_{k} \\times H_{k} \\times W_{k}} \\) into a motion feature representation. In contrast with previous motion representations [23, 58, 88, 99], we use self-supervised video representation obtained from a 3D video encoder of I3D [10] pre-trained by FAME [19]. The model is pre-trained contrastively to concentrate on moving foregrounds. Finally, a spatial pooling is applied to obtain motion embedding \\( F_{M_k} \\in \\mathbb{R}^{C_M \\times T_k'} \\), where \\( C_M \\) denotes the dimension of the motion feature." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.689, + 0.473, + 0.903 + ], + "angle": 0, + "content": "Audio Network. The audio network takes the form of skip-connected U-Net style architectures [59] following [26, 66, 89]. Given the input audio mixture \\( s_{mix}(t) \\), we first apply a Short-Time Fourier Transform (STFT) [30] to convert the raw waveform to a 2D Time-Frequency spectrogram representation \\( S_{mix} \\in \\mathbb{R}^{F \\times T} \\), which is then fed into the U-Net encoder to obtain an audio feature map \\( F_A \\in \\mathbb{R}^{C_A \\times \\frac{F}{S} \\times \\frac{T}{S}} \\) (\\( C_A \\) denotes the number of channels and \\( S \\) denotes stride of audio feature map) at the bottleneck. A U-Net decoder gradually upsamples the audio features to yield audio embeddings \\( \\varepsilon_A \\in \\mathbb{R}^{C_\\varepsilon \\times F \\times T} \\) (\\( C_\\varepsilon \\) denotes the dimension of audio embeddings), which is combined further with the transformer mask embeddings to generate the separated sound spectrogram mask \\( M_k \\)." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.091, + 0.741, + 0.107 + ], + "angle": 0, + "content": "3.3. Audio-Visual Transformer" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.114, + 0.895, + 0.237 + ], + "angle": 0, + "content": "Our cross-modality sound separation transformer contains the transformer decoder [73] with \\(N\\) queries (i.e., learnable prototypes), and utilizes the extracted object features \\(F_{O_k}\\), motion embeddings \\(F_{M_k}\\) and audio features \\(F_A\\) to yield \\(N\\) mask embeddings \\(\\varepsilon_{mask} \\in \\mathbb{R}^{C_{\\varepsilon} \\times N}\\) for spectrogram mask prediction of separated sound \\(s_k(t)\\), where \\(N\\) denotes maximum of the pre-defined instrument types." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.25, + 0.895, + 0.357 + ], + "angle": 0, + "content": "Audio query prototypes. We denote audio queries as \\( Q \\in \\mathbb{R}^{C_Q \\times N} \\) to represent different instruments, which are initialized by \"visually naming\" audio queries. Specifically, \"visually naming\" means that we assign object features \\( F_{O_k} \\) to the corresponding query in \\( Q \\) with element-wise addition to yield \"visually-named\" queries \\( Q_v \\), which are then fed into the transformer decoder cross-attention layers." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.371, + 0.895, + 0.553 + ], + "angle": 0, + "content": "Cross-attention layers. In the decoder, we stack one motion-aware decoder layer and three audio-aware decoder layers. The \"visually-named\" queries \\( Q_{v} \\) first interact temporally with motion features \\( F_{M_k} \\) in the motion-aware decoder layer with motion cross-attention by Attention \\( (Q_{v},F_{M_{k}},F_{M_{k}}) \\). This is followed by an FFN to generate the motion-decoded queries \\( Q^{\\prime} \\), which are then fed into three audio-aware decoder layers to adaptively interact with audio features \\( F_{A} \\), each of which consists of a self-attention, an audio cross-attention computed by Attention \\( (Q^{\\prime},F_{A},F_{A}) \\), and an FFN. The output \\( N \\) audio segmentation embeddings \\( \\varepsilon_{Q}\\in \\mathbb{R}^{C_{Q}\\times N} \\) is computed by" + }, + { + "type": "equation", + "bbox": [ + 0.567, + 0.56, + 0.892, + 0.577 + ], + "angle": 0, + "content": "\\[\n\\varepsilon_ {Q} = \\operatorname {A u d i o D e c o d e r} _ {\\times 3} \\left(Q ^ {\\prime}, F _ {A}, F _ {A}\\right), \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.583, + 0.895, + 0.63 + ], + "angle": 0, + "content": "where AudioDecoder stands for our audio-aware decoder layer. Similar to [9, 16], the decoder generates all audio segmentation embeddings parallelly." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.644, + 0.895, + 0.811 + ], + "angle": 0, + "content": "Separated mask prediction. Through the above decoder, the \\(N\\) audio segmentation embeddings \\(\\varepsilon_{Q}\\) are converted to \\(N\\) mask embeddings \\(\\varepsilon_{mask} \\in \\mathbb{R}^{C_{\\varepsilon} \\times N}\\) through a MLP with two hidden layers, where dimension \\(C_{\\varepsilon}\\) is identical to dimension of audio embeddings \\(\\varepsilon_{A} \\in \\mathbb{R}^{C_{\\varepsilon} \\times F \\times T}\\). Then each predicted mask \\(M_{k} \\in \\mathbb{R}^{F \\times T}\\) of the separated sound spectrogram is generated by a dot-product between the corresponding mask embedding in \\(\\varepsilon_{mask}\\) and audio embedding \\(\\varepsilon_{A}\\) from the audio decoder. Finally, we multiply the sound mixture spectrogram \\(S_{mix}\\) and the predicted mask \\(M_{k}\\) to disentangle sound spectrogram \\(S_{k}\\) for sound \\(s_{k}(t)\\) by" + }, + { + "type": "equation", + "bbox": [ + 0.632, + 0.818, + 0.892, + 0.834 + ], + "angle": 0, + "content": "\\[\nS _ {k} = S _ {\\text {m i x}} \\odot M _ {k}, \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.841, + 0.895, + 0.888 + ], + "angle": 0, + "content": "where \\(\\odot\\) denotes the element-wise multiplication operator. Ultimately, separated sound signal \\(s_k(t)\\) is produced by applying inverse STFT to the separated spectrogram \\(S_{k}\\)." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "14678" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.08, + 0.09, + 0.468, + 0.169 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.185, + 0.471, + 0.256 + ], + "angle": 0, + "content": "Figure 3. Audio prompts design. To generalize to new types of instruments/event classes, we propose to insert additional queries (audio prompts) to learn new audio prototypes for unseen classes. With this design, we only fine-tune the query embedding layer while keeping all the other parts of transformer backbone frozen." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.282, + 0.47, + 0.329 + ], + "angle": 0, + "content": "Training objective. Following [26, 89], we set our training objective as optimizing spectrogram masks. The ground truth ratio mask \\( M_{k}^{GT} \\) of \\( k \\)-th video is calculated as follows," + }, + { + "type": "equation", + "bbox": [ + 0.184, + 0.338, + 0.469, + 0.371 + ], + "angle": 0, + "content": "\\[\nM _ {k} ^ {G T} (t, f) = \\frac {S _ {k} (t , f)}{S _ {m i x} (t , f)}, \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.378, + 0.47, + 0.424 + ], + "angle": 0, + "content": "where \\((t,f)\\) denotes time-frequency coordinates. We adopt per-pixel \\(L1\\) loss [87] to optimize the overall sound separation network, sound separation loss \\(L_{sep}\\) is defined as," + }, + { + "type": "equation", + "bbox": [ + 0.175, + 0.436, + 0.469, + 0.477 + ], + "angle": 0, + "content": "\\[\nL _ {s e p} = \\sum_ {k = 1} ^ {K} \\left| \\left| M _ {k} - M _ {k} ^ {G T} \\right| \\right| _ {1}, \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.488, + 0.419, + 0.504 + ], + "angle": 0, + "content": "where \\(K\\) denotes number of mixed sounds in \\(S_{mix}\\)." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.513, + 0.383, + 0.53 + ], + "angle": 0, + "content": "3.4. Tunable Queries as Audio Prompts" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.537, + 0.47, + 0.763 + ], + "angle": 0, + "content": "With the flexible design of tunable queries as learnable prototypes, our pipeline is more friendly to generalizing to new types of instruments. Unlike previous methods that need to finetune the entire mask generation U-Net, we could insert additional queries (i.e., audio prompts) for the new instruments. Such a method enables us only need to finetune the query embedding layer for learning new audio query prototypes in Sec. 3.3 of our transformer architecture while keeping all cross-attention layers frozen (see Fig.3). Specifically, we add \\( L \\) new audio prompts \\( P \\in \\mathbb{R}^{C_Q \\times L} \\) to original pre-trained audio queries \\( Q \\in \\mathbb{R}^{C_Q \\times N} \\), then the query embedding layer for the prompted learnable prototypes \\( Q_{prompted} \\in \\mathbb{R}^{C_Q \\times (N + L)} \\) is the only layer learnable in our transformer decoder, while keeping the transformer backbone frozen." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.777, + 0.21, + 0.794 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.802, + 0.285, + 0.818 + ], + "angle": 0, + "content": "4.1. Experimental Settings" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.825, + 0.469, + 0.901 + ], + "angle": 0, + "content": "Datasets. We perform experiments on three widely-used datasets: MUSIC [89], MUSIC-21 [88], and Audio-Visual Event (AVE) [29, 68]. MUSIC dataset spans 11 musical instrument categories: accordion, acoustic guitar, cello, clarinet, erhu, flute, saxophone, trumpet, tuba, violin," + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.349 + ], + "angle": 0, + "content": "and xylophone. This dataset is relatively clean, and sound sources are always within the scene, collected for the audio-visual sound separation task. We utilize 503 online available solo videos and split them into training/validation/testing sets with 453/25/25 videos from 11 different categories, respectively, following same settings as [66]. MUSIC-21 dataset [88] is an enlarged version of MUSIC [89], which contains 10 more common instrument categories: bagpipe, banjo, bassoon, congas, drum, electric bass, guzheng, piano, pipa, and ukulele. We utilize 1,092 available solo videos and split them into train/test sets with 894/198 videos respectively from 21 different categories. Note that we follow the same training/testing split as [23, 99]. AVE dataset is a general audio-visual learning dataset, covering 28 event classes such as animal behaviors, vehicles, and human activities. We follow the same setting as [99], and utilize 4143 videos from AVE [68] dataset." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.366, + 0.892, + 0.713 + ], + "angle": 0, + "content": "Baselines. For MUSIC dataset, we compare our method with four recent methods for sound separation. NMF-MFCC [64] is a non-learnable audio-only method, we consider reporting this result from [26, 58] on MUSIC test set. We also compare with two representative audio-visual sound separation baselines: Sound-of-Pixels [89] and Co-Separation [26]. We retrained these two methods with the same training data and split them as ours for a fair comparison. Finally, we compare our approach with a most recent publicly-available baseline CCoL [66], which has the same training setting as ours. For MUSIC-21 dataset, we compare our method with six recently proposed approaches: Sound-of-Pixels [89], Co-Separation [26], Sound-of-Motions [88], Music Gesture [23], TriBERT [58] and AMnet [99]. For [58], since \\(12.27\\%\\) of the training samples are missing in their given training split, we consider their reported result as a baseline comparison. Finally, for AVE dataset, we compare our method with six state-of-the-art methods. Since we conduct our experiments with the same setting as AMnet [99], we report results from [99] for Multisensory [53], Sound-of-Pixels [89], Sound-of-Motions [88], Minus-Plus [79], Cascaded Opponent Filter [98] as baseline comparisons." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.732, + 0.892, + 0.852 + ], + "angle": 0, + "content": "Evaluation metrics. The sound separation performance is evaluated by the popular adopted mir_eval library [57] in terms of standard metrics: Signal to Distortion Ratio (SDR), Signal to Interference Ratio (SIR), and Signal to Artifact Ratio (SAR). SDR measures the combination of interference and artifacts, SIR measures interference, and SAR measures artifacts. For all three metrics, a higher value indicates better results." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.871, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Implementation Details. For MUSIC [89] and MUSIC-21 [88] datasets, we sub-sample the audio at \\(11\\mathrm{kHz}\\), and each" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "14679" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.207, + 0.089, + 0.365, + 0.177 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.077, + 0.2, + 0.471, + 0.243 + ], + "angle": 0, + "content": "Figure 4. Human evaluation results for sound source separation on mixtures of different instrument types. Our system is able to separate sounds with better actual perceptual quality." + }, + { + "type": "image", + "bbox": [ + 0.119, + 0.261, + 0.424, + 0.414 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.432, + 0.47, + 0.475 + ], + "angle": 0, + "content": "Figure 5. Visualization of audio query embeddings with t-SNE, different instrument categories are color-coded. Our audio queries have learned to cluster by different classes of sound." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.504, + 0.47, + 0.686 + ], + "angle": 0, + "content": "audio sample is approximately 6 seconds. STFT is applied using a Hann window size of 1022 and a hop length of 256, yielding a \\(512 \\times 256\\) Time-Frequency audio representation. It is then re-sampled on a log-frequency scale to obtain a magnitude spectrogram with \\(T\\), \\(F = 256\\). Detected objects in frames are resized to \\(256 \\times 256\\) and randomly cropped to the size of \\(224 \\times 224\\). We set the video frame rate as 1 FPS, and randomly-selected three frames as input for the object detector. While for AVE [68] dataset, audio signal is sub-sampled at \\(22\\mathrm{kHz}\\), and we use the full frame rate(29.97 FPS). Other settings are the same as MUSIC except STFT hop length is set as 184, following [99]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.687, + 0.47, + 0.794 + ], + "angle": 0, + "content": "For MUSIC dataset [89], we use the Faster R-CNN object detector pre-trained by [26] on Open Images [38]. For MUSIC-21 [88] and AVE [68] datasets, since additional musical and general classes are not covered for this object detector, we adopt a pre-trained Detic detector [96] based on CLIP [56] to detect the 10 more instruments in MUSIC-21 dataset [88] and 28 event classes in AVE dataset [68]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.795, + 0.471, + 0.903 + ], + "angle": 0, + "content": "We utilize 8 heads for all attention modules and select the maximum \\(N\\) objects (number of queries) as 15, 25, and 30 for MUSIC, MUSIC-21 and AVE. The video encoder [19] and the object detector is pre-trained and kept frozen during training and inference. The multi-layer perception (MLP) for separated mask prediction has 2 hidden layers of 256 channels following [16]. Audio feature \\(F_{A}\\), motion feature" + }, + { + "type": "image", + "bbox": [ + 0.565, + 0.126, + 0.832, + 0.33 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.346, + 0.892, + 0.416 + ], + "angle": 0, + "content": "Figure 6. Qualitative results on AVE test dataset. Beyond restricted musical instruments, our model is also able to handle general sound separation tasks (e.g. sounds of galloping race car and frying food on the first two rows; sounds of driving motorcycles and speeches on the last two rows)." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.443, + 0.893, + 0.61 + ], + "angle": 0, + "content": "\\(F_{M}\\), object feature \\(F_{O}\\), and audio queries \\(Q\\) have a channel dimension of 256. And we set the channel dimension of both audio embeddings \\(\\varepsilon_{A}\\) and mask embeddings \\(\\varepsilon_{M}\\) as 32. The epoch number is 80, and batch size is set to 8. We use AdamW [43] for the mask transformer with a weight decay of \\(10^{-4}\\) and Adam for all other networks as optimizer selection. The learning rate of the transformer is set as \\(10^{-4}\\) and decreases by multiplying 0.1 at 60-th epoch. We set the learning rate for other networks as \\(10^{-4}\\), decreased by multiplying 0.1 at 30-th and 50-th epoch, respectively. Training is conducted on 8 NVIDIA Titan V GPUs." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.621, + 0.836, + 0.637 + ], + "angle": 0, + "content": "4.2. Audio-Visual Sound Source Separation" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.644, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Quantitative evaluation. Table. 1 demonstrates quantitative results for sound separation results against state-of-the-art methods on MUSIC dataset [89]. Our method outperforms baseline models in separation accuracy measured by all evaluation metrics. Our method outperforms the most recent publicly available state-ofthe-art algorithm [66] by \\(3.43\\mathrm{dB}\\) in terms of SDR score. Regarding quantitative results on MUSIC21 dataset [88], we demonstrate the performance comparison in Table. 2. Again, our method outperforms baseline models in terms of SDR metric. Performance on the previous two datasets demonstrate our model's ability to disentangle musical sounds. To further verify the scalability of our proposed method to general audio-source separation problems, we perform quantitative comparisons on AVE dataset in Table. 3. As is demonstrated, we surpass the state-of-the-art algorithm [99] by \\(1.31\\mathrm{dB}\\) in terms of SDR score. AVE is" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "14680" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.104, + 0.089, + 0.441, + 0.202 + ], + "angle": 0, + "content": "
MethodsSDR↑SIR↑SAR↑
NMF-MFCC [64]0.925.686.84
Sound-of-Pixels [89]4.239.399.85
Co-Separation [26]6.5411.379.46
CCoL [66]7.7413.2211.54
iQuery (Ours)11.1715.8414.27
" + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.206, + 0.47, + 0.234 + ], + "angle": 0, + "content": "Table 1. Audio-visual sound separation results on MUSIC. Best results in bold and second-best results in Blue." + }, + { + "type": "table", + "bbox": [ + 0.093, + 0.252, + 0.451, + 0.396 + ], + "angle": 0, + "content": "
MethodsSDR↑SIR↑SAR↑
Sound-of-Pixels [89]*7.5213.0111.53
Co-Separation [26]*7.6413.8011.30
Sound-of-Motions [88]*8.3114.8213.11
Music Gesture [23]*10.1215.81-
TriBERT [58]10.0917.4512.80
AMnet [99]*11.0818.0013.22
iQuery (Ours)11.1215.9814.16
" + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.4, + 0.468, + 0.428 + ], + "angle": 0, + "content": "Table 2. Audio-visual sound separation results on MUSIC-21. The results noted by * are obtained from [23, 99]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.459, + 0.47, + 0.52 + ], + "angle": 0, + "content": "a general dataset containing scenes like male and female speeches, animal sounds, and vehicle sounds. This clearly shows our model's adaptivity to more general problems of sound source separation." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.537, + 0.47, + 0.673 + ], + "angle": 0, + "content": "Qualitative evaluation. Fig. 2 illustrates qualitative sound separation results on MUSIC dataset. It can be seen that our method disentangles sound sources cleaner and more accurately, with less \"muddy\" sound. Fig. 6 provides additional qualitative examples on AVE dataset, and this again illustrates our model's good performance on general sound source separation cases. Both qualitative and quantitative results verify the superiority of our designed sound query-based segmentation pipeline iQuery." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.689, + 0.47, + 0.901 + ], + "angle": 0, + "content": "Human evaluation. Our quantitative evaluation shows the superiority of our model compared with baseline models, however, studies [8] have shown that audio separation quality could not be truthfully determined purely by the widely used mir_eval [57] metrics. Due to this reason, we further conduct a subjective human evaluation to study the actual perceptual quality of sound-separation results. Specifically, we compare the sound separation result of our model and the publicly available best baseline model [66] on MUSIC [89]. We collected 50 testing samples for all 11 classes from the test set, and each testing sample contains separated sounds with a length of 6 seconds predicted by our model and baseline [66] for the same sound mixture. Ground truth sound is also provided for each sample as a" + }, + { + "type": "table", + "bbox": [ + 0.518, + 0.089, + 0.875, + 0.233 + ], + "angle": 0, + "content": "
MethodsSDR↑SIR↑SAR↑
Multisensory [53]*0.843.446.69
Sound-of-Pixels [89]*1.217.086.84
Sound-of-Motions [88]*1.487.417.39
Minus-Plus [79]*1.967.958.08
Cascaded Filter [98]*2.688.188.48
AMnet [99]*3.719.1511.00
iQuery (Ours)5.028.2112.32
" + }, + { + "type": "table_caption", + "bbox": [ + 0.5, + 0.236, + 0.892, + 0.265 + ], + "angle": 0, + "content": "Table 3. Audio-visual sound separation results on AVE. The results noted by * are obtained from [99]." + }, + { + "type": "table", + "bbox": [ + 0.527, + 0.278, + 0.863, + 0.376 + ], + "angle": 0, + "content": "
MethodsSDR↑SIR↑SAR↑
Sound-of-Pixels [89]4.118.179.84
Co-Separation [26]5.379.858.72
CCoL [66]6.7411.9410.22
iQuery (Ours)8.0411.6013.21
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.38, + 0.892, + 0.449 + ], + "angle": 0, + "content": "Table 4. Fine-tuning sound separation performance comparison. All methods are pretrained on MUSIC dataset without one particular instrument and then fine-tuned on this new data. Baseline models are tuned with whole network unfrozen, and we keep our transformer backbone frozen." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.476, + 0.892, + 0.613 + ], + "angle": 0, + "content": "reference. The experiment is conducted by 40 participants separately. For each participant, the orders of our model and baseline [66] are randomly shuffled, and we ask the participant to answer \"Which sound separation result is more close to the ground truth audio?\" for each sample. Statistical results are shown in Fig. 4. Notably, our method significantly surpasses the compared baseline with a winning rate of \\(72.45\\%\\). This additionally demonstrate the better actual perceptual performance of our model." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.627, + 0.892, + 0.718 + ], + "angle": 0, + "content": "Learned Query Embedding. To visualize that our proposed model has indeed learned to disentangle different sound sources through learnable queries, we show t-SNE embeddings of our learnable queries in MUSIC test set [89]. As is shown in Fig. 5, our queries tend to cluster by different instrument classes, learning representative prototypes." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.727, + 0.833, + 0.743 + ], + "angle": 0, + "content": "4.3. Extendable Audio Prompt Fine-tuning" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.75, + 0.892, + 0.885 + ], + "angle": 0, + "content": "Table. 4 evaluates our approach's generalization ability compared with previous methods. We conduct fine-tuning experiments by leave-one-out cross-validation. Baseline models are fine-tuned on the new instrument with all the networks structure unfrozen. With the design of audio prompts discussed in Sec. 3.4, we keep most of our transformer parameters frozen, only fine-tuning the query embedding layer, which has much fewer parameters (0.048% of the total parameters in Transformer)." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.886, + 0.892, + 0.902 + ], + "angle": 0, + "content": "Fig. 7 (a) shows our performance with a varying num" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "14681" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.087, + 0.099, + 0.26, + 0.201 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.168, + 0.204, + 0.184, + 0.215 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.28, + 0.099, + 0.452, + 0.201 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.361, + 0.203, + 0.378, + 0.215 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.22, + 0.47, + 0.262 + ], + "angle": 0, + "content": "Figure 7. Fine-tuning curves of sound separation. (a) Fine-tuning with different number of unseen instrument classes on MUSIC. (b) Fine-tuning with different number of unseen event classes on AVE." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.291, + 0.47, + 0.487 + ], + "angle": 0, + "content": "ber of new instrument classes for fine-tuning on MUSIC dataset. We hold out 1, 2, 4, and 6 instrument classes in the pre-training stage and fine-tune our method on these new classes with only the query embedding layer unfrozen. MUSIC dataset contains in total of 11 instruments. Notably, our method still yields good results when the network is only pre-trained on 5 instrument types, even fewer than the unseen classes. Fig. 7 (b) shows our model's fine-tuning performance on AVE dataset with a varying number of new event classes for fine-tuning. We follow the experimental setup on MUSIC, and hold out 2, 4, 6, 8, and 12 event classes for fine-tuning. This demonstrates our model's adaptivity in general sound separation cases." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.499, + 0.296, + 0.513 + ], + "angle": 0, + "content": "4.4. Contrastive Verification" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.523, + 0.471, + 0.841 + ], + "angle": 0, + "content": "Our learnable query-prototypes network is designed to ensure cross-modality consistency and cross-instrument contrast. We assume these prototypes to draw samples of each particular sound class sample close and push away the different prototypes. The question is whether our network design with \"visually-named\" query trained in the \"Mix-and and-Separate\" can already achieve this goal? As an alternative, we design an auxiliary contrastive loss for verification: to maximize the cosine similarity of separated audio embedding \\(\\varepsilon_{A_k} = \\varepsilon_A \\odot M_k\\) and the corresponding query embedding \\(Q_k\\) in \\(Q\\), while minimizing the cosine similarity of separated audio embedding and other query embeddings \\(Q_n\\) (where \\(n \\in [1, N], n \\neq k\\)). We optimize the cross-entropy losses of the cosine similarity scores to obtain contrastive loss \\(L_{contras}\\). To ensure the qualities of audio embedding \\(\\varepsilon_A\\) and predicted mask \\(M_i\\) are accurate enough, we use a hierarchical task learning strategy [44] to control weights for \\(L_{sep}\\) and \\(L_{contras}\\) at each epoch. The verification loss \\(L_{verify}\\) is: \\(L_{verify} = w_{sep}(e) \\cdot L_{sep} + w_{contras}(e) \\cdot L_{contras}\\) where \\(e\\) denotes training epoch and \\(w(e)\\) denotes loss weight." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.856, + 0.47, + 0.901 + ], + "angle": 0, + "content": "Ablations of auxiliary contrastive loss, shown in Table. 5, demonstrates that our existing design achieves better results without using explicit contrastive loss. This answers the" + }, + { + "type": "table", + "bbox": [ + 0.558, + 0.09, + 0.835, + 0.18 + ], + "angle": 0, + "content": "
ArchitectureSDR↑SIR↑SAR↑
w/o lrn.10.0514.2713.71
w/o adpt.10.8915.5114.14
w/ con. best11.0215.9114.10
Ours (w/o con)11.1715.8414.27
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.184, + 0.894, + 0.268 + ], + "angle": 0, + "content": "Table 5. Ablations on the auxiliary contrastive loss on MUSIC dataset. \"w/o lrn.\" denotes without learnable linear layer added to queries produced by Transformer decoder; \"w/o adpt.\" denotes that we use a fixed weight for auxiliary contrastive loss without the Hierarchical Task Learning strategy; \"w/ con. best\" denotes our best model design using auxiliary contrastive loss." + }, + { + "type": "table", + "bbox": [ + 0.548, + 0.283, + 0.843, + 0.389 + ], + "angle": 0, + "content": "
ArchitectureSDR↑SIR↑SAR↑
Random6.5810.7912.77
Self-audio10.5414.8114.23
Self-motion-audio10.6515.3713.96
Dual-stream10.4615.2513.79
Motion-self-audio11.1715.8414.27
" + }, + { + "type": "table_caption", + "bbox": [ + 0.517, + 0.393, + 0.875, + 0.406 + ], + "angle": 0, + "content": "Table 6. Ablations on the design of Transformer decoder." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.434, + 0.892, + 0.464 + ], + "angle": 0, + "content": "question we raised, that our \"visually-named\" queries are already contrastive enough for sound disentanglement." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.475, + 0.851, + 0.491 + ], + "angle": 0, + "content": "4.5. Ablations of Transformer decoder design" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.498, + 0.892, + 0.695 + ], + "angle": 0, + "content": "Ablation results of Transformer decoder design on \\( MUSIC \\) dataset is shown in Table. 6. \"Random\" denotes randomly assigning object features to queries, its poor separation result verifies the importance of our \"visually-named\" queries. \"Self-audio\" means removing the motion cross attention layer, which confirms the effectiveness of adding the motion feature. We tried two baseline designs against our final selection \"Motion-self-audio\", as stated in Sec. 3.3. \"Self-motion-audio\" is a design that puts self-, motion cross-, and audio cross-attention in a single decoder layer. \"Dual-stream\" means we conduct motion and audio cross-attention in parallel then fuse in the decoder layer. Specific details are in the Supplemental material." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.71, + 0.619, + 0.725 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.735, + 0.892, + 0.901 + ], + "angle": 0, + "content": "We proposed an audio-visual separation method using an adaptable query-based audio mask transformer network. Our network disentangles different sound sources explicitly through learnable audio prototypes initiated by \"visually naming\". We demonstrate cross-modal consistency and cross-instrument contrast via a multi-modal cross-attention mechanism. When generalizing to new unseen classes, our method can be adapted by inserting additional queries as audio prompts while freezing the attention mechanism. Experiments on both musical and general sound datasets demonstrate performance gain by our iQuery." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "14682" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.096, + 0.117, + 0.469, + 0.157 + ], + "angle": 0, + "content": "[1] Triantafyllos Afouras, Joon Son Chung, and Andrew Zisserman. The conversation: Deep audio-visual speech enhancement. arXiv preprint arXiv:1804.04121, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.096, + 0.16, + 0.469, + 0.213 + ], + "angle": 0, + "content": "[2] Triantafyllos Afouras, Joon Son Chung, and Andrew Zisserman. My lips are concealed: Audio-visual speech enhancement through obstructions. arXiv preprint arXiv:1907.04975, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.096, + 0.217, + 0.469, + 0.272 + ], + "angle": 0, + "content": "[3] Triantafyllos Afouras, Andrew Owens, Joon Son Chung, and Andrew Zisserman. Self-supervised learning of audiovisual objects from video. In European Conference on Computer Vision (ECCV), pages 208-224, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.096, + 0.275, + 0.469, + 0.316 + ], + "angle": 0, + "content": "[4] Relja Arandjelovic and Andrew Zisserman. Look, listen and learn. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 609-617, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.096, + 0.318, + 0.469, + 0.358 + ], + "angle": 0, + "content": "[5] Relja Arandjelovic and Andrew Zisserman. Objects that sound. In Proceedings of the European conference on computer vision (ECCV), pages 435-451, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.096, + 0.361, + 0.469, + 0.416 + ], + "angle": 0, + "content": "[6] Anurag Arnab, Mostafa Dehghani, Georg Heigold, Chen Sun, Mario Lucic, and Cordelia Schmid. Vivit: A video vision transformer. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 6836-6846, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.096, + 0.419, + 0.469, + 0.471 + ], + "angle": 0, + "content": "[7] Hyojin Bahng, Ali Jahanian, Swami Sankaranarayanan, and Phillip Isola. Visual prompting: Modifying pixel space to adapt pre-trained models. arXiv preprint arXiv:2203.17274, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.096, + 0.476, + 0.469, + 0.544 + ], + "angle": 0, + "content": "[8] Estefanía Cano, Derry FitzGerald, and Karlheinz Brandenburg. Evaluation of quality of sound source separation algorithms: Human perception vs quantitative metrics. In 2016 24th European Signal Processing Conference (EUSIPCO), pages 1758-1762. IEEE, 2016. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.096, + 0.547, + 0.469, + 0.614 + ], + "angle": 0, + "content": "[9] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In European Conference on Computer Vision (ECCV), pages 213-229. Springer, 2020. 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.617, + 0.469, + 0.672 + ], + "angle": 0, + "content": "[10] Joao Carreira and Andrew Zisserman. Quo vadis, action recognition? a new model and the kinetics dataset. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 6299-6308, 2017. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.675, + 0.469, + 0.743 + ], + "angle": 0, + "content": "[11] British Chandna, Marius Miron, Jordi Janer, and Emilia Gómez. Monoaural audio source separation using deep convolutional neural networks. In International conference on latent variable analysis and signal separation, pages 258-266. Springer, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.746, + 0.469, + 0.8 + ], + "angle": 0, + "content": "[12] Moitreya Chatterjee, Narendra Ahuja, and Anoop Cherian. Learning audio-visual dynamics using scene graphs for audio source separation. In Advances in Neural Information Processing Systems (NeurIPS), 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.803, + 0.469, + 0.857 + ], + "angle": 0, + "content": "[13] Moitreya Chatterjee, Jonathan Le Roux, Narendra Ahuja, and Anoop Cherian. Visual scene graphs for audio source separation. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 1204-1213, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.86, + 0.469, + 0.901 + ], + "angle": 0, + "content": "[14] Honglie Chen, Weidi Xie, Triantafyllos Afouras, Arsha Nagrani, Andrea Vedaldi, and Andrew Zisserman. Localizing visual sounds the hard way. In IEEE/CVF Conference on" + }, + { + "type": "list", + "bbox": [ + 0.088, + 0.117, + 0.469, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.541, + 0.094, + 0.891, + 0.119 + ], + "angle": 0, + "content": "Computer Vision and Pattern Recognition (CVPR), pages 16867-16876, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.123, + 0.892, + 0.191 + ], + "angle": 0, + "content": "[15] Bowen Cheng, Ishan Misra, Alexander G Schwing, Alexander Kirillov, and Rohit Girdhar. Masked-attention mask transformer for universal image segmentation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 1290–1299, 2022. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.194, + 0.892, + 0.247 + ], + "angle": 0, + "content": "[16] Bowen Cheng, Alex Schwing, and Alexander Kirillov. Per-pixel classification is not all you need for semantic segmentation. Advances in Neural Information Processing Systems (NeurIPS), 34, 2021. 1, 3, 4, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.251, + 0.892, + 0.318 + ], + "angle": 0, + "content": "[17] Ying Cheng, Ruize Wang, Zhihao Pan, Rui Feng, and Yuejie Zhang. Look, listen, and attend: Co-attention network for self-supervised audio-visual representation learning. In Proceedings of the 28th ACM International Conference on Multimedia, pages 3884–3892, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.322, + 0.892, + 0.374 + ], + "angle": 0, + "content": "[18] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018.3" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.379, + 0.892, + 0.46 + ], + "angle": 0, + "content": "[19] Shuangrui Ding, Maomao Li, Tianyu Yang, Rui Qian, Haohang Xu, Qingyi Chen, Jue Wang, and Hongkai Xiong. Motion-aware contrastive video representation learning via foreground-background merging. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 9716-9726, 2022. 4, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.463, + 0.892, + 0.544 + ], + "angle": 0, + "content": "[20] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929, 2020.3" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.548, + 0.892, + 0.615 + ], + "angle": 0, + "content": "[21] Ariel Ephrat, Inbar Mosseri, Oran Lang, Tali Dekel, Kevin Wilson, Avinatan Hassidim, William T Freeman, and Michael Rubinstein. Looking to listen at the cocktail party: A speaker-independent audio-visual model for speech separation. arXiv preprint arXiv:1804.03619, 2018. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.618, + 0.892, + 0.673 + ], + "angle": 0, + "content": "[22] Cédric Févotte, Nancy Bertin, and Jean-Louis Durrieu. Nonnegative matrix factorization with the itakura-saito divergence: With application to music analysis. Neural computation, 21(3):793-830, 2009. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.676, + 0.892, + 0.743 + ], + "angle": 0, + "content": "[23] Chuang Gan, Deng Huang, Hang Zhao, Joshua B Tenenbaum, and Antonio Torralba. Music gesture for visual sound separation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 10478-10487, 2020. 1, 3, 4, 5, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.746, + 0.892, + 0.8 + ], + "angle": 0, + "content": "[24] Ruohan Gao, Rogerio Feris, and Kristen Grauman. Learning to separate object sounds by watching unlabeled video. In European Conference on Computer Vision (ECCV), pages 35-53, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.803, + 0.892, + 0.844 + ], + "angle": 0, + "content": "[25] Ruohan Gao and Kristen Grauman. 2.5 d visual sound. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 324-333, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.847, + 0.892, + 0.9 + ], + "angle": 0, + "content": "[26] Ruohan Gao and Kristen Grauman. Co-separating sounds of visual objects. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 3879-3888, 2019. 1, 2, 4, 5, 6, 7" + }, + { + "type": "list", + "bbox": [ + 0.511, + 0.094, + 0.892, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.946, + 0.518, + 0.956 + ], + "angle": 0, + "content": "14683" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.093, + 0.47, + 0.161 + ], + "angle": 0, + "content": "[27] Ruohan Gao and Kristen Grauman. Visualvoice: Audiovisual speech separation with cross-modal consistency. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 15490-15500. IEEE, 2021. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.164, + 0.47, + 0.207 + ], + "angle": 0, + "content": "[28] Tianyu Gao, Adam Fisch, and Danqi Chen. Making pretrained language models better few-shot learners. arXiv preprint arXiv:2012.15723, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.208, + 0.471, + 0.29 + ], + "angle": 0, + "content": "[29] Jort F Gemmeke, Daniel PW Ellis, Dylan Freedman, Aren Jansen, Wade Lawrence, R Channing Moore, Manoj Plakal, and Marvin Ritter. Audio set: An ontology and human-labeled dataset for audio events. In IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 776-780. IEEE, 2017. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.292, + 0.471, + 0.346 + ], + "angle": 0, + "content": "[30] Daniel Griffin and Jae Lim. Signal estimation from modified short-time fourier transform. IEEE Transactions on acoustics, speech, and signal processing, 32(2):236-243, 1984. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.349, + 0.468, + 0.376 + ], + "angle": 0, + "content": "[31] Simon Haykin and Zhe Chen. The cocktail party problem. Neural computation, 17(9):1875-1902, 2005. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.378, + 0.47, + 0.447 + ], + "angle": 0, + "content": "[32] John R Hershey, Zhuo Chen, Jonathan Le Roux, and Shinji Watanabe. Deep clustering: Discriminative embeddings for segmentation and separation. In IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 31-35. IEEE, 2016. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.449, + 0.47, + 0.504 + ], + "angle": 0, + "content": "[33] Di Hu, Feiping Nie, and Xuelong Li. Deep multimodal clustering for unsupervised audiovisual learning. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 9248-9257, 2019. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.506, + 0.471, + 0.575 + ], + "angle": 0, + "content": "[34] Po-Sen Huang, Minje Kim, Mark Hasegawa-Johnson, and Paris Smaragdis. Joint optimization of masks and deep recurrent neural networks for monaural source separation. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 23(12):2136-2147, 2015. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.577, + 0.47, + 0.632 + ], + "angle": 0, + "content": "[35] Menglin Jia, Luming Tang, Bor-Chun Chen, Claire Cardie, Serge Belongie, Bharath Hariharan, and Ser-Nam Lim. Visual prompt tuning. In European Conference on Computer Vision (ECCV), pages 709-727. Springer, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.634, + 0.47, + 0.688 + ], + "angle": 0, + "content": "[36] Einat Kidron, Yoav Y Schechner, and Michael Elad. Pixels that sound. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), volume 1, pages 88-95. IEEE, 2005. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.691, + 0.47, + 0.745 + ], + "angle": 0, + "content": "[37] Kevin Kilgour, Beat Gfeller, Qingqing Huang, Aren Jansen, Scott Wisdom, and Marco Tagliasacchi. Text-driven separation of arbitrary sounds. arXiv preprint arXiv:2204.05738, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.747, + 0.47, + 0.83 + ], + "angle": 0, + "content": "[38] Ivan Krasin, Tom Duerig, Neil Alldrin, Vittorio Ferrari, Sami Abu-El-Haija, Alina Kuznetsova, Hassan Rom, Jasper Uijlings, Stefan Popov, Andreas Veit, et al. Openimages: A public dataset for large-scale multi-label and multi-class image classification. Dataset available from https://github.com/openimages, 2(3):18, 2017. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.832, + 0.47, + 0.901 + ], + "angle": 0, + "content": "[39] Jiyoung Lee, Soo-Whan Chung, Sunok Kim, Hong-Goo Kang, and Kwanghoon Sohn. Looking into your speech: Learning cross-modal affinity for audio-visual speech separation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 1336–1345, 2021. 2, 3" + }, + { + "type": "list", + "bbox": [ + 0.087, + 0.093, + 0.471, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.093, + 0.892, + 0.134 + ], + "angle": 0, + "content": "[40] Jie Hwan Lee, Hyeong-Seok Choi, and Kyogu Lee. Audio query-based music source separation. arXiv preprint arXiv:1908.06593, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.137, + 0.892, + 0.19 + ], + "angle": 0, + "content": "[41] Dongze Lian, Daquan Zhou, Jiashi Feng, and Xinchao Wang. Scaling & shifting your features: A new baseline for efficient model tuning. In Advances in Neural Information Processing Systems (NeurIPS), 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.193, + 0.892, + 0.248 + ], + "angle": 0, + "content": "[42] Ze Liu, Jia Ning, Yue Cao, Yixuan Wei, Zheng Zhang, Stephen Lin, and Han Hu. Video swim transformer. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3202-3211, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.25, + 0.892, + 0.277 + ], + "angle": 0, + "content": "[43] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.279, + 0.892, + 0.348 + ], + "angle": 0, + "content": "[44] Yan Lu, Xinzhu Ma, Lei Yang, Tianzhu Zhang, Yating Liu, Qi Chu, Junjie Yan, and Wanli Ouyang. Geometry uncertainty projection network for monocular 3d object detection. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 3111-3121, 2021. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.35, + 0.892, + 0.404 + ], + "angle": 0, + "content": "[45] Sagnik Majumder, Ziad Al-Halah, and Kristen Grauman. Move2hear: Active audio-visual source separation. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 275–285, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.407, + 0.892, + 0.461 + ], + "angle": 0, + "content": "[46] Sagnik Majumder and Kristen Grauman. Active audiovisual separation of dynamic sound sources. In European Conference on Computer Vision (ECCV), pages 551-569. Springer, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.463, + 0.892, + 0.532 + ], + "angle": 0, + "content": "[47] Mingyuan Mao, Renrui Zhang, Honghui Zheng, Teli Ma, Yan Peng, Errui Ding, Baochang Zhang, Shumin Han, et al. Dual-stream network for visual recognition. Advances in Neural Information Processing Systems (NeurIPS), 34:25346-25358, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.534, + 0.892, + 0.56 + ], + "angle": 0, + "content": "[48] Josh H McDermott. The cocktail party problem. Current Biology, 19(22):R1024-R1027, 2009. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.563, + 0.892, + 0.63 + ], + "angle": 0, + "content": "[49] Otniel-Bogdan Mercea, Thomas Hummel, A Koepke, and Zeynep Akata. Temporal and cross-modal attention for audio-visual zero-shot learning. In European Conference on Computer Vision (ECCV), pages 488–505. Springer, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.633, + 0.892, + 0.688 + ], + "angle": 0, + "content": "[50] Ishan Misra, Rohit Girdhar, and Armand Joulin. An end-to-end transformer model for 3d object detection. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 2906-2917, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.69, + 0.892, + 0.745 + ], + "angle": 0, + "content": "[51] Juan F Montesinos, Venkatesh S Kadandale, and Gloria Haro. Vovit: Low latency graph-based audio-visual voice separation transformer. In European Conference on Computer Vision (ECCV), pages 310–326. Springer, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.747, + 0.892, + 0.801 + ], + "angle": 0, + "content": "[52] Pedro Morgado, Nuno Nvasconcelos, Timothy Langlois, and Oliver Wang. Self-supervised generation of spatial audio for 360 video. Advances in Neural Information Processing Systems (NeurIPS), 31, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.803, + 0.892, + 0.858 + ], + "angle": 0, + "content": "[53] Andrew Owens and Alexei A Efros. Audio-visual scene analysis with self-supervised multisensory features. In European Conference on Computer Vision (ECCV), pages 631–648, 2018. 1, 2, 5, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.86, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[54] Andrew Owens, Phillip Isola, Josh McDermott, Antonio Torralba, Edward H Adelson, and William T Freeman. Visually indicated sounds. In IEEE/CVF Conference on Com" + }, + { + "type": "list", + "bbox": [ + 0.51, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "14684" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.093, + 0.47, + 0.119 + ], + "angle": 0, + "content": "puter Vision and Pattern Recognition (CVPR), pages 2405-2413, 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.122, + 0.47, + 0.176 + ], + "angle": 0, + "content": "[55] Rui Qian, Di Hu, Heinrich Dinkel, Mengyue Wu, Ning Xu, and Weiyao Lin. Multiple sound sources localization from coarse to fine. In European Conference on Computer Vision (ECCV), pages 292-308. Springer, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.089, + 0.178, + 0.468, + 0.259 + ], + "angle": 0, + "content": "[56] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International Conference on Machine Learning (ICML), pages 8748-8763. PMLR, 2021. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.089, + 0.261, + 0.468, + 0.342 + ], + "angle": 0, + "content": "[57] Colin Raffel, Brian McFee, Eric J Humphrey, Justin Salamon, Oriol Nieto, Dawen Liang, Daniel PW Ellis, and C Colin Raffel. mir.eval: A transparent implementation of common mir metrics. In *In Proceedings of the 15th International Society for Music Information Retrieval Conference*, ISMIR. CiteSeer, 2014. 5, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.344, + 0.468, + 0.398 + ], + "angle": 0, + "content": "[58] Tanzila Rahman, Mengyu Yang, and Leonid Sigal. Tribert: Full-body human-centric audio-visual representation learning for visual sound separation. arXiv preprint arXiv:2110.13412, 2021. 1, 3, 4, 5, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.089, + 0.4, + 0.468, + 0.468 + ], + "angle": 0, + "content": "[59] Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-net: Convolutional networks for biomedical image segmentation. In International Conference on Medical image computing and computer-assisted intervention, pages 234-241. Springer, 2015. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.089, + 0.47, + 0.468, + 0.509 + ], + "angle": 0, + "content": "[60] Sam Roweis. One microphone source separation. Advances in Neural Information Processing Systems (NeurIPS), 13, 2000. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.089, + 0.511, + 0.468, + 0.579 + ], + "angle": 0, + "content": "[61] Arda Senocak, Tae-Hyun Oh, Junsik Kim, Ming-Hsuan Yang, and In So Kweon. Learning to localize sound source in visual scenes. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4358-4366, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.089, + 0.581, + 0.468, + 0.648 + ], + "angle": 0, + "content": "[62] Bowen Shi, Wei-Ning Hsu, Kushal Lakhotia, and Abdelrahman Mohamed. Learning audio-visual speech representation by masked multimodal cluster prediction. In International Conference on Learning Representations (ICLR), 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.651, + 0.468, + 0.719 + ], + "angle": 0, + "content": "[63] Zengjie Song, Yuxi Wang, Junsong Fan, Tieniu Tan, and Zhaoxiang Zhang. Self-supervised predictive learning: A negative-free method for sound source localization in visual scenes. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3222-3231, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.721, + 0.468, + 0.774 + ], + "angle": 0, + "content": "[64] Martin Spiertz and Volker Gnann. Source-filter based clustering for monaural blind source separation. In Proceedings of the 12th International Conference on Digital Audio Effects, volume 4, 2009. 5, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.776, + 0.468, + 0.831 + ], + "angle": 0, + "content": "[65] Robin Strudel, Ricardo Garcia, Ivan Laptev, and Cordelia Schmid. Segmenter: Transformer for semantic segmentation. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 7262-7272, 2021. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.833, + 0.468, + 0.899 + ], + "angle": 0, + "content": "[66] Yapeng Tian, Di Hu, and Chenliang Xu. Cyclic co-learning of sounding object visual grounding and sound separation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2745-2754, 2021. 2, 3, 4, 5, 6, 7" + }, + { + "type": "list", + "bbox": [ + 0.088, + 0.093, + 0.47, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.093, + 0.892, + 0.147 + ], + "angle": 0, + "content": "[67] Yapeng Tian, Dingzeyu Li, and Chenliang Xu. Unified multisensory perception: Weakly-supervised audio-visual video parsing. In European Conference on Computer Vision (ECCV), pages 436–454. Springer, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.149, + 0.892, + 0.204 + ], + "angle": 0, + "content": "[68] Yapeng Tian, Jing Shi, Bochen Li, Zhiyao Duan, and Chenliang Xu. Audio-visual event localization in unconstrained videos. In European Conference on Computer Vision (ECCV), pages 247–263, 2018. 2, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.205, + 0.892, + 0.273 + ], + "angle": 0, + "content": "[69] Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, and Hervé Jégou. Training data-efficient image transformers & distillation through attention. In International Conference on Machine Learning (ICML), pages 10347-10357. PMLR, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.275, + 0.892, + 0.342 + ], + "angle": 0, + "content": "[70] Thanh-Dat Truong, Chi Nhan Duong, Hoang Anh Pham, Bhiksha Raj, Ngan Le, Khoa Luu, et al. The right to talk: An audio-visual transformer approach. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 1105–1114, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.344, + 0.892, + 0.412 + ], + "angle": 0, + "content": "[71] Efthymios Tzinis, Scott Wisdom, Aren Jansen, Shawn Hershey, Tal Remez, Dan Ellis, and John R Hershey. Into the wild with audioscope: Unsupervised audio-visual separation of on-screen sounds. In International Conference on Learning Representations (ICLR), 2020. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.414, + 0.892, + 0.482 + ], + "angle": 0, + "content": "[72] Efthymios Tzinis, Scott Wisdom, Tal Remez, and John R Hershey. Audioscopev2: Audio-visual attention architectures for calibrated open-domain on-screen sound separation. In European Conference on Computer Vision (ECCV), pages 368–385. Springer, 2022. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.484, + 0.892, + 0.551 + ], + "angle": 0, + "content": "[73] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in Neural Information Processing Systems (NeurIPS), 30, 2017. 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.553, + 0.892, + 0.62 + ], + "angle": 0, + "content": "[74] Tuomas Virtanen. Monaural sound source separation by nonnegative matrix factorization with temporal continuity and sparseness criteria. IEEE transactions on audio, speech, and language processing, 15(3):1066-1074, 2007. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.623, + 0.892, + 0.69 + ], + "angle": 0, + "content": "[75] Ho-Hsiang Wu, Prem Seetharaman, Kundan Kumar, and Juan Pablo Bello. Wav2clip: Learning robust audio representations from clip. In IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 4563-4567. IEEE, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.692, + 0.892, + 0.748 + ], + "angle": 0, + "content": "[76] Yu Wu, Linchao Zhu, Yan Yan, and Yi Yang. Dual attention matching for audio-visual event localization. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 6292–6300, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.749, + 0.892, + 0.803 + ], + "angle": 0, + "content": "[77] Yan Xia and Zhou Zhao. Cross-modal background suppression for audio-visual event localization. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 19989-19998, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.804, + 0.892, + 0.872 + ], + "angle": 0, + "content": "[78] Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M Alvarez, and Ping Luo. Segformer: Simple and efficient design for semantic segmentation with transformers. Advances in Neural Information Processing Systems (NeurIPS), 34, 2021. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.874, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[79] Xudong Xu, Bo Dai, and Dahua Lin. Recursive visual sound separation using minus-plus net. In IEEE/CVF In" + }, + { + "type": "list", + "bbox": [ + 0.511, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.946, + 0.519, + 0.956 + ], + "angle": 0, + "content": "14685" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.093, + 0.468, + 0.119 + ], + "angle": 0, + "content": "ternational Conference on Computer Vision (ICCV), pages 882-891, 2019. 2, 5, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.121, + 0.47, + 0.189 + ], + "angle": 0, + "content": "[80] Xudong Xu, Hang Zhou, Ziwei Liu, Bo Dai, Xiaogang Wang, and Dahua Lin. Visually informed binaural audio generation without binaural audios. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 15485-15494, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.19, + 0.471, + 0.272 + ], + "angle": 0, + "content": "[81] Dong Yu, Morten Kolbaek, Zheng-Hua Tan, and Jesper Jensen. Permutation invariant training of deep models for speaker-independent multi-talker speech separation. In IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 241-245. IEEE, 2017. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.274, + 0.471, + 0.355 + ], + "angle": 0, + "content": "[82] Li Yuan, Yunpeng Chen, Tao Wang, Weihao Yu, Yujun Shi, Zi-Hang Jiang, Francis EH Tay, Jiashi Feng, and Shuicheng Yan. Tokens-to-token vit: Training vision transformers from scratch onImagenet. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 558-567, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.357, + 0.471, + 0.44 + ], + "angle": 0, + "content": "[83] Rowan Zellers, Jiasen Lu, Ximing Lu, Youngjae Yu, Yanpeng Zhao, Mohammadreza Salehi, Aditya Kusupati, Jack Hessel, Ali Farhadi, and Yejin Choi. Merlot reserve: Neural script knowledge through vision and language and sound. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 16375-16387, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.441, + 0.471, + 0.509 + ], + "angle": 0, + "content": "[84] Renrui Zhang, Ziyu Guo, Peng Gao, Rongyao Fang, Bin Zhao, Dong Wang, Yu Qiao, and Hongsheng Li. Pointm2ae: Multi-scale masked autoencoders for hierarchical point cloud pre-training. Advances in Neural Information Processing Systems (NeurIPS), 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.511, + 0.471, + 0.566 + ], + "angle": 0, + "content": "[85] Renrui Zhang, Han Qiu, Tai Wang, Xuanzhuo Xu, Ziyu Guo, Yu Qiao, Peng Gao, and Hongsheng Li. Monodetr: Depth-aware transformer for monocular 3d object detection. arXiv preprint arXiv:2203.13310, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.567, + 0.471, + 0.594 + ], + "angle": 0, + "content": "[86] Yuanhan Zhang, Kaiyang Zhou, and Ziwei Liu. Neural prompt search. arXiv preprint arXiv:2206.04673, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.595, + 0.471, + 0.648 + ], + "angle": 0, + "content": "[87] Hang Zhao, Orazio Gallo, Iuri Frosio, and Jan Kautz. Loss functions for image restoration with neural networks. IEEE Transactions on computational imaging, 3(1):47-57, 2016. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.65, + 0.471, + 0.704 + ], + "angle": 0, + "content": "[88] Hang Zhao, Chuang Gan, Wei-Chiu Ma, and Antonio Torralba. The sound of motions. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 1735-1744, 2019. 1, 3, 4, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.705, + 0.471, + 0.761 + ], + "angle": 0, + "content": "[89] Hang Zhao, Chuang Gan, Andrew Rouditchenko, Carl Vondrick, Josh McDermott, and Antonio Torralba. The sound of pixels. In European Conference on Computer Vision (ECCV), pages 570-586, 2018. 1, 2, 3, 4, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.762, + 0.471, + 0.816 + ], + "angle": 0, + "content": "[90] Hengshuang Zhao, Li Jiang, Jiaya Jia, Philip HS Torr, and Vladlen Koltun. Point transformer. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 16259-16268, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.818, + 0.471, + 0.872 + ], + "angle": 0, + "content": "[91] Minghang Zheng, Peng Gao, Renrui Zhang, Kunchang Li, Xiaogang Wang, Hongsheng Li, and Hao Dong. End-to-end object detection with adaptive clustering transformer. arXiv preprint arXiv:2011.09315, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.873, + 0.471, + 0.901 + ], + "angle": 0, + "content": "[92] Sixiao Zheng, Jiachen Lu, Hengshuang Zhao, Xiatian Zhu, Zekun Luo, Yabiao Wang, Yanwei Fu, Jianfeng Feng, Tao" + }, + { + "type": "list", + "bbox": [ + 0.088, + 0.093, + 0.471, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.541, + 0.093, + 0.892, + 0.148 + ], + "angle": 0, + "content": "Xiang, Philip HS Torr, et al. Rethinking semantic segmentation from a sequence-to-sequence perspective with transformers. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 6881-6890, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.149, + 0.892, + 0.217 + ], + "angle": 0, + "content": "[93] Bolei Zhou, Aditya Khosla, Agata Lapedriza, Aude Oliva, and Antonio Torralba. Learning deep features for discriminative localization. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2921-2929, 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.219, + 0.892, + 0.289 + ], + "angle": 0, + "content": "[94] Dongzhan Zhou, Xinchi Zhou, Di Hu, Hang Zhou, Lei Bai, Ziwei Liu, and Wanli Ouyang. Sepfusion: Finding optimal fusion structures for visual sound separation. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 36, pages 3544-3552, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.29, + 0.892, + 0.358 + ], + "angle": 0, + "content": "[95] Jinxing Zhou, Liang Zheng, Yiran Zhong, Shijie Hao, and Meng Wang. Positive sample propagation along the audiovisual event line. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 8436-8444, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.36, + 0.892, + 0.43 + ], + "angle": 0, + "content": "[96] Xingyi Zhou, Rohit Girdhar, Armand Joulin, Philipp Krähenbuhl, and Ishan Misra. Detecting twenty-thousand classes using image-level supervision. In European Conference on Computer Vision (ECCV), pages 350-368. Springer, 2022. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.431, + 0.892, + 0.5 + ], + "angle": 0, + "content": "[97] Yipin Zhou, Zhaowen Wang, Chen Fang, Trung Bui, and Tamara L Berg. Visual to sound: Generating natural sound for videos in the wild. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3550-3558, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.501, + 0.892, + 0.556 + ], + "angle": 0, + "content": "[98] Lingyu Zhu and Esa Rahtu. Visually guided sound source separation using cascaded opponent filter network. In Proceedings of the Asian Conference on Computer Vision, 2020. 5, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.558, + 0.892, + 0.627 + ], + "angle": 0, + "content": "[99] Lingyu Zhu and Esa Rahtu. Visually guided sound source separation and localization using self-supervised motion representations. In IEEE/CVF Winter Conference on Applications of Computer Vision (WACV), pages 1289-1299, 2022. 1, 3, 4, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.628, + 0.892, + 0.683 + ], + "angle": 0, + "content": "[100] Xizhou Zhu, Weijie Su, Lewei Lu, Bin Li, Xiaogang Wang, and Jifeng Dai. Deformable detr: Deformable transformers for end-to-end object detection. arXiv preprint arXiv:2010.04159, 2020. 3" + }, + { + "type": "list", + "bbox": [ + 0.504, + 0.093, + 0.892, + 0.683 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.946, + 0.52, + 0.957 + ], + "angle": 0, + "content": "14686" + } + ] +] \ No newline at end of file diff --git a/2023/iQuery_ Instruments As Queries for Audio-Visual Sound Separation/79c43f28-c9f8-4b22-8dca-1b2b47c85f07_origin.pdf b/2023/iQuery_ Instruments As Queries for Audio-Visual Sound Separation/79c43f28-c9f8-4b22-8dca-1b2b47c85f07_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..bf83f5639e56e17d82fd1027f518058562490c88 --- /dev/null +++ b/2023/iQuery_ Instruments As Queries for Audio-Visual Sound Separation/79c43f28-c9f8-4b22-8dca-1b2b47c85f07_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4ca0c2117e198b887fa1f8912d0efac041988774b2295af58f0f4d973054b23d +size 2102368 diff --git a/2023/iQuery_ Instruments As Queries for Audio-Visual Sound Separation/full.md b/2023/iQuery_ Instruments As Queries for Audio-Visual Sound Separation/full.md new file mode 100644 index 0000000000000000000000000000000000000000..2ae43c96de1b1ca2f2017b01095701b0d23c4ff4 --- /dev/null +++ b/2023/iQuery_ Instruments As Queries for Audio-Visual Sound Separation/full.md @@ -0,0 +1,322 @@ +# iQuery: Instruments as Queries for Audio-Visual Sound Separation + +Jiaben Chen $^{1}$ , Renrui Zhang $^{2}$ , Dongze Lian $^{3}$ , Jiaqi Yang $^{4}$ , Ziyao Zeng $^{4}$ , Jianbo Shi $^{5}$ + +$^{1}$ UC San Diego $^{2}$ The Chinese University of Hong Kong $^{3}$ National University of Singapore $^{4}$ ShanghaiTech University $^{5}$ University of Pennsylvania + +# Abstract + +Current audio-visual separation methods share a standard architecture design where an audio encoder-decoder network is fused with visual encoding features at the encoder bottleneck. This design confounds the learning of multi-modal feature encoding with robust sound decoding for audio separation. To generalize to a new instrument, one must fine-tune the entire visual and audio network for all musical instruments. We re-formulate the visual-sound separation task and propose Instruments as Queries (iQuery) with a flexible query expansion mechanism. Our approach ensures cross-modal consistency and cross-instrument disentanglement. We utilize "visually named" queries to initiate the learning of audio queries and use cross-modal attention to remove potential sound source interference at the estimated waveforms. To generalize to a new instrument or event class, drawing inspiration from the text-prompt design, we insert additional queries as audio prompts while freezing the attention mechanism. Experimental results on three benchmarks demonstrate that our iQuery improves audio-visual sound source separation performance. Code is available at https://github.com/JiabenChen/iQuery. + +# 1. Introduction + +Humans use multi-modal perception to understand complex activities. To mimic this skill, researchers have studied audio-visual learning [3, 17, 33] by exploiting the synchronization and correlation between auditory and visual information. In this paper, we focus on the sound source separation task, where we aim to identify and separate different sound components within a given sound mixture [60, 74]. Following the "Mix-and-Separate" framework [32, 34, 81], we learn to separate sounds by mixing multiple audio signals to generate an artificially complex auditory representation and then use it as a self-supervised task to separate individual sounds from the mixture. The works [26, 53, 89] showed that visually-guided sound separation is achievable + +by leveraging visual information of the sound source. + +Prevalent architectures take a paradigm of a visual-conditioned encoder-decoder architecture [23, 26, 58, 88], where encoded features from audio and visual modalities are fused at the bottleneck for decoding to yield separated spectrogram masks. However, it is noticed that this design often creates a "muddy" sound and "cross-talk" that leaks from one instrument to another. To create a clean sound separation, one would like the audio-visual encoders to be (1) self-consistent within the music instrument and (2) contrasting across. One approach [27] added critic functions explicitly to enforce these properties. Another method [99] used a two-step process with the second motion-conditioned generation process to filter out unwanted cross-talks. We call these approaches decoder-centric. + +Most recent works focus on addressing the "muddy" and "cross-talk" issue by improving fine details of audio-visual feature extraction: for example, adding human motion encoding as in [23, 88, 99], or cross-modality representations [58] via self-supervised learning. Once the feature representations are learned, the standard encoder-decoder FCN style segmentation is used as an afterthought. We consider these methods feature-centric. The standard designs have two limitations. First, it is hard to balance decoder-centric and feature-centric approaches that enforce a common goal of cross-modality consistency and cross-instrument contrast. Second, to learn a new musical instrument, one has to retrain the entire network via self-supervision. + +To tackle these limitations, we propose a query-based sound separation framework, iQuery. We recast this problem from a query-based transformer segmentation view, where each query learns to segment one instrument, similar to visual segmentation [15, 16, 65, 78]. We treat each audio query as a learnable prototype that parametrically models one sound class. We fuse visual modality with audio by "visually naming" the audio query: using object detection to assign visual features to the corresponding audio query. Within the transformer decoder, the visually initialized queries interact with the audio features through cross-attention, thus ensuring cross-modality consistency. Self + +![](images/bebce95567aff6300ad0b46f8f5ac5744a1e9a11a1842bb242f0a58a0d86d530.jpg) +Figure 1. Pipeline of iQuery. Our system takes as input an audio mixture and its corresponding video frames, and disentangles separated sound sources for each video. Our pipeline consists of two main modules: an Audio-Visual Feature Extraction module which extracts audio, object, and motion features through three corresponding encoders, and an Audio-Visual Transformer module for sound separation. The query-based sound separation transformer has three key components: 1) "visually-named" audio queries are initialized by extracted object features, 2) cross-attention between the audio queries with static image features, dynamic motion features and audio features, 3) self-attention between the learned audio queries to ensure cross-instrument contrast. + +attention across the audio queries for different instruments implements a soft version of the cross-instrument contrast objective. With this design, we unify the feature-centric with the decoder-centric approach. + +How do we achieve generalizability? Motivated by recent success in fine-tuning domain transfer with the text-prompt [28] and visual-prompt designs [7, 35, 41, 86], we adaptively insert the additional queries as audio prompts to accommodate new instruments. With the audio-prompt design, we freeze most of the transformer network parameters and only fine-tune the newly added query embedding layer. We conjecture that the learned prototype queries are instrument-dependent, while the cross/self-attention mechanism in the transformer is instrument-independent. + +Our main contributions are: + +- To the best of our knowledge, we are the first to study the audio-visual sound separation problem from a tunable query view to disentangle different sound sources explicitly through learnable audio prototypes in a mask transformer architecture. +- To generalize to a new sound class, we design an audio prompt for fine-tuning with most of the transformer architecture frozen. +- Extensive experiments and ablations verify the effectiveness of our core designs for disentangle-. ment, demonstrating performance gain for audiovisual sound source separation on three benchmarks. + +# 2. Related work + +Audio-Visual Sound Source Separation. Recent years have witnessed promising results of audiovisual multi-modality joint learning [49, 62, 67, 75, 83] in domains like audio-visual sound source localization [4, 5, 14, 36, 55, 61, 63, 93], audio-visual event localization [68, 76, 77, 95] and sound synthesis from videos [25, 52, 54, 80, 97]. Sound source separation, a challenging classical problem, has been researched extensively in the audio signal processing area [11, 22, 37, 40]. A well-known example is the cocktail party problem [31, 48] in speech domain [1, 21]. Works have been proposed recently for tasks like speech separation [2, 27, 39, 51, 70], active sound separation [45, 46] and on-screen sound separation [25, 53, 71, 72]. Our work focuses on audio-visual sound separation. Recent audio-visual sound separation methods could be classified generally into two categories: feature-centric and decoder-centric as discussed in Sec. 1. Feature-centric methods exploit various ways for visual feature extraction selection to aid this multi-modality task. Some works consider frame-based appearance features (static frame features [24, 79, 89] or detected object regions [26, 66]) for extracting visual semantic cues (e.g., instrument categories) to guide sound separation. [12, 13] adds embeddings from an audio-visual scene graph at the U-Net bottleneck to model the visual context of sound sources. Based on the assessment that motion signals + +![](images/e7d991a0c55c03ea373ee6ab8b3489166941f8aed60c891607dcf99071308c67.jpg) +Figure 2. Qualitative results on MUSIC test set. The first column shows the mixed video frames, the second to the fourth columns compare our predicted spectrogram masks against masks yielded by state-of-the-art algorithm [66] and ground truth masks, and the fifth to the seventh columns visualize separated spectrograms. [66] produces blurry masks and contains unseparated components from another sound source, while our system successfully generates accurate mask and clean spectrograms as the ground truth. + +could more tightly couple the moving sounding object with corresponding variations of sounds, recent approaches focus on including motion information into the pipeline (e.g., optical flow [88], and human pose [23,58]). Based on this, [94] proposes a framework to search for the optimal fusion strategy for multi-modal features. Decoder-centric methods explore prevention of "cross-talk" between the audio sources in the decoder stage. [99] designs a two-stage pipeline, where the second stage conducts a counterfactual synthesis through motion features to remove potentially leaked sound. The approach of [27] added critic functions explicitly to enforce cross-modal consistency and cross-instrument contrast. + +Vision Transformers. Motivated by transformer's success in natural language processing [73], transformers were first introduced in computer vision for image classification as ViT [20]. Given the superior long-range modeling capacity, many follow-up works [47, 69, 82] have upgraded ViT to achieve higher performance and widely surpassed convolutional neural networks. Further, transformer-based models are adopted for various downstream tasks, such as 2D object detection [9, 91, 100], semantic/instance segmentation [65, 78, 92], 3D object detection [50, 85], shape recognition [84, 90] and video understanding [6, 42]. Particularly, following the pipeline from DETR [9], MaskFormer [16] and Mask2Former [15] represent each mask candidate as a learnable query and conduct parallel decoding for instance-level segmentation. However, only few approaches [39, 58, 71, 72, 99] have extended transformer for audio-visual sound separation fields. [58] adopts a BERT + +[18] architecture to learn visual, pose, and audio feature representations. [99] designs an audio-motion transformer to refine sound separation results through audio-motion feature fusion. These methods focus mainly on learning better contextualized multi-modality representations through an encoder transformer. In contrast, our mask transformer-based network focuses on the entire process of visual-audio separation task. We disentangle different sound sources through independent learnable query prototypes and segment each time-frequency region on the spectrogram via mask prediction in an end-to-end fashion. + +# 3. Method + +We first describe the formulation of the audio-visual sound separation task and introduce our pipeline iQuery briefly in Sec. 3.1. Then we introduce networks for learning representations from visual and audio modalities in Sec. 3.2 and our proposed cross-modality cross-attention transformer architecture for visual sound separation in Sec. 3.3. Finally, we introduce our adaptive query fine-tuning strategy through designs of flexible tunable queries in Sec. 3.4. + +# 3.1. Overview + +As mentioned before, our goal is to disentangle the audio mixture concerning its corresponding sound sources in the given mixture by using so-called queries. Following previous works [21, 89], we adopt a commonly used "Mix-and-Separate" self-supervised source separation procedure. Given $K$ video clips with accompanying audio signal: $\{(V_k,s_k(t))\}_{k\in [1,K]}$ , we create a sound mixture: + +$s_{mix}(t) = \sum_{k=1}^{K} s_k(t)$ as training data. Our disentanglement goal is to separate sounds $s_k(t)$ from $s_{mix}(t)$ for sound sources in $V_k$ , respectively. The pipeline, as illustrated in Fig. 1, is mainly composed of two components: an Audio-Visual Feature Extraction module and a Mask Transformer-based Sound Separation module. First, in the feature extraction module, the object detector & image encoder, and video encoder extract object-level visual features and motion features from video clip $V_k$ . The audio network yields an audio feature and an audio embedding from the given sound mixture $s_{mix}(t)$ . After that, a cross-modal transformer decoder attends to visual and audio features and outputs audio mask embeddings, which are further combined with audio embeddings for sound separation. + +# 3.2. Audio-Visual Feature Extraction + +Object Detector & Image Encoder. To initialize learning of audio queries, we assign object-level visual appearance features to the corresponding queries, to create "visually named" queries. In the implementation, following [26], we use a Faster R-CNN object detector with ResNet-101 backbone. For frames in a given video clip $V_{k}$ , the object detector is utilized to acquire the detected objects set $O_{k}$ . After that, we adopt a pre-trained ResNet-18 similar to [66], followed by a linear layer and max pooling to yield object-level features $F_{O_k}\in \mathbb{R}^{C_O}$ , where $C_O$ denotes channel dimension of object features. + +Video Encoder. The video encoder maps the video frames from $V_{k} \in \mathbb{R}^{3 \times T_{k} \times H_{k} \times W_{k}}$ into a motion feature representation. In contrast with previous motion representations [23, 58, 88, 99], we use self-supervised video representation obtained from a 3D video encoder of I3D [10] pre-trained by FAME [19]. The model is pre-trained contrastively to concentrate on moving foregrounds. Finally, a spatial pooling is applied to obtain motion embedding $F_{M_k} \in \mathbb{R}^{C_M \times T_k'}$ , where $C_M$ denotes the dimension of the motion feature. + +Audio Network. The audio network takes the form of skip-connected U-Net style architectures [59] following [26, 66, 89]. Given the input audio mixture $s_{mix}(t)$ , we first apply a Short-Time Fourier Transform (STFT) [30] to convert the raw waveform to a 2D Time-Frequency spectrogram representation $S_{mix} \in \mathbb{R}^{F \times T}$ , which is then fed into the U-Net encoder to obtain an audio feature map $F_A \in \mathbb{R}^{C_A \times \frac{F}{S} \times \frac{T}{S}}$ ( $C_A$ denotes the number of channels and $S$ denotes stride of audio feature map) at the bottleneck. A U-Net decoder gradually upsamples the audio features to yield audio embeddings $\varepsilon_A \in \mathbb{R}^{C_\varepsilon \times F \times T}$ ( $C_\varepsilon$ denotes the dimension of audio embeddings), which is combined further with the transformer mask embeddings to generate the separated sound spectrogram mask $M_k$ . + +# 3.3. Audio-Visual Transformer + +Our cross-modality sound separation transformer contains the transformer decoder [73] with $N$ queries (i.e., learnable prototypes), and utilizes the extracted object features $F_{O_k}$ , motion embeddings $F_{M_k}$ and audio features $F_A$ to yield $N$ mask embeddings $\varepsilon_{mask} \in \mathbb{R}^{C_{\varepsilon} \times N}$ for spectrogram mask prediction of separated sound $s_k(t)$ , where $N$ denotes maximum of the pre-defined instrument types. + +Audio query prototypes. We denote audio queries as $Q \in \mathbb{R}^{C_Q \times N}$ to represent different instruments, which are initialized by "visually naming" audio queries. Specifically, "visually naming" means that we assign object features $F_{O_k}$ to the corresponding query in $Q$ with element-wise addition to yield "visually-named" queries $Q_v$ , which are then fed into the transformer decoder cross-attention layers. + +Cross-attention layers. In the decoder, we stack one motion-aware decoder layer and three audio-aware decoder layers. The "visually-named" queries $Q_{v}$ first interact temporally with motion features $F_{M_k}$ in the motion-aware decoder layer with motion cross-attention by Attention $(Q_{v},F_{M_{k}},F_{M_{k}})$ . This is followed by an FFN to generate the motion-decoded queries $Q^{\prime}$ , which are then fed into three audio-aware decoder layers to adaptively interact with audio features $F_{A}$ , each of which consists of a self-attention, an audio cross-attention computed by Attention $(Q^{\prime},F_{A},F_{A})$ , and an FFN. The output $N$ audio segmentation embeddings $\varepsilon_{Q}\in \mathbb{R}^{C_{Q}\times N}$ is computed by + +$$ +\varepsilon_ {Q} = \operatorname {A u d i o D e c o d e r} _ {\times 3} \left(Q ^ {\prime}, F _ {A}, F _ {A}\right), \tag {1} +$$ + +where AudioDecoder stands for our audio-aware decoder layer. Similar to [9, 16], the decoder generates all audio segmentation embeddings parallelly. + +Separated mask prediction. Through the above decoder, the $N$ audio segmentation embeddings $\varepsilon_{Q}$ are converted to $N$ mask embeddings $\varepsilon_{mask} \in \mathbb{R}^{C_{\varepsilon} \times N}$ through a MLP with two hidden layers, where dimension $C_{\varepsilon}$ is identical to dimension of audio embeddings $\varepsilon_{A} \in \mathbb{R}^{C_{\varepsilon} \times F \times T}$ . Then each predicted mask $M_{k} \in \mathbb{R}^{F \times T}$ of the separated sound spectrogram is generated by a dot-product between the corresponding mask embedding in $\varepsilon_{mask}$ and audio embedding $\varepsilon_{A}$ from the audio decoder. Finally, we multiply the sound mixture spectrogram $S_{mix}$ and the predicted mask $M_{k}$ to disentangle sound spectrogram $S_{k}$ for sound $s_{k}(t)$ by + +$$ +S _ {k} = S _ {\text {m i x}} \odot M _ {k}, \tag {2} +$$ + +where $\odot$ denotes the element-wise multiplication operator. Ultimately, separated sound signal $s_k(t)$ is produced by applying inverse STFT to the separated spectrogram $S_{k}$ . + +![](images/1b63f6aea286d27ae18630fcc1832b89f90211b28f9777a416d1430a45904f23.jpg) +Figure 3. Audio prompts design. To generalize to new types of instruments/event classes, we propose to insert additional queries (audio prompts) to learn new audio prototypes for unseen classes. With this design, we only fine-tune the query embedding layer while keeping all the other parts of transformer backbone frozen. + +Training objective. Following [26, 89], we set our training objective as optimizing spectrogram masks. The ground truth ratio mask $M_{k}^{GT}$ of $k$ -th video is calculated as follows, + +$$ +M _ {k} ^ {G T} (t, f) = \frac {S _ {k} (t , f)}{S _ {m i x} (t , f)}, \tag {3} +$$ + +where $(t,f)$ denotes time-frequency coordinates. We adopt per-pixel $L1$ loss [87] to optimize the overall sound separation network, sound separation loss $L_{sep}$ is defined as, + +$$ +L _ {s e p} = \sum_ {k = 1} ^ {K} \left| \left| M _ {k} - M _ {k} ^ {G T} \right| \right| _ {1}, \tag {4} +$$ + +where $K$ denotes number of mixed sounds in $S_{mix}$ . + +# 3.4. Tunable Queries as Audio Prompts + +With the flexible design of tunable queries as learnable prototypes, our pipeline is more friendly to generalizing to new types of instruments. Unlike previous methods that need to finetune the entire mask generation U-Net, we could insert additional queries (i.e., audio prompts) for the new instruments. Such a method enables us only need to finetune the query embedding layer for learning new audio query prototypes in Sec. 3.3 of our transformer architecture while keeping all cross-attention layers frozen (see Fig.3). Specifically, we add $L$ new audio prompts $P \in \mathbb{R}^{C_Q \times L}$ to original pre-trained audio queries $Q \in \mathbb{R}^{C_Q \times N}$ , then the query embedding layer for the prompted learnable prototypes $Q_{prompted} \in \mathbb{R}^{C_Q \times (N + L)}$ is the only layer learnable in our transformer decoder, while keeping the transformer backbone frozen. + +# 4. Experiments + +# 4.1. Experimental Settings + +Datasets. We perform experiments on three widely-used datasets: MUSIC [89], MUSIC-21 [88], and Audio-Visual Event (AVE) [29, 68]. MUSIC dataset spans 11 musical instrument categories: accordion, acoustic guitar, cello, clarinet, erhu, flute, saxophone, trumpet, tuba, violin, + +and xylophone. This dataset is relatively clean, and sound sources are always within the scene, collected for the audio-visual sound separation task. We utilize 503 online available solo videos and split them into training/validation/testing sets with 453/25/25 videos from 11 different categories, respectively, following same settings as [66]. MUSIC-21 dataset [88] is an enlarged version of MUSIC [89], which contains 10 more common instrument categories: bagpipe, banjo, bassoon, congas, drum, electric bass, guzheng, piano, pipa, and ukulele. We utilize 1,092 available solo videos and split them into train/test sets with 894/198 videos respectively from 21 different categories. Note that we follow the same training/testing split as [23, 99]. AVE dataset is a general audio-visual learning dataset, covering 28 event classes such as animal behaviors, vehicles, and human activities. We follow the same setting as [99], and utilize 4143 videos from AVE [68] dataset. + +Baselines. For MUSIC dataset, we compare our method with four recent methods for sound separation. NMF-MFCC [64] is a non-learnable audio-only method, we consider reporting this result from [26, 58] on MUSIC test set. We also compare with two representative audio-visual sound separation baselines: Sound-of-Pixels [89] and Co-Separation [26]. We retrained these two methods with the same training data and split them as ours for a fair comparison. Finally, we compare our approach with a most recent publicly-available baseline CCoL [66], which has the same training setting as ours. For MUSIC-21 dataset, we compare our method with six recently proposed approaches: Sound-of-Pixels [89], Co-Separation [26], Sound-of-Motions [88], Music Gesture [23], TriBERT [58] and AMnet [99]. For [58], since $12.27\%$ of the training samples are missing in their given training split, we consider their reported result as a baseline comparison. Finally, for AVE dataset, we compare our method with six state-of-the-art methods. Since we conduct our experiments with the same setting as AMnet [99], we report results from [99] for Multisensory [53], Sound-of-Pixels [89], Sound-of-Motions [88], Minus-Plus [79], Cascaded Opponent Filter [98] as baseline comparisons. + +Evaluation metrics. The sound separation performance is evaluated by the popular adopted mir_eval library [57] in terms of standard metrics: Signal to Distortion Ratio (SDR), Signal to Interference Ratio (SIR), and Signal to Artifact Ratio (SAR). SDR measures the combination of interference and artifacts, SIR measures interference, and SAR measures artifacts. For all three metrics, a higher value indicates better results. + +Implementation Details. For MUSIC [89] and MUSIC-21 [88] datasets, we sub-sample the audio at $11\mathrm{kHz}$ , and each + +![](images/b640fbc59bf991253b8dfd81f79418df5009c6672c85148a0c4da3a770a7e2a8.jpg) +Figure 4. Human evaluation results for sound source separation on mixtures of different instrument types. Our system is able to separate sounds with better actual perceptual quality. + +![](images/9618d924d53041221de055cf5aecac86821b51bc0fc7db5f8d851afc54ed96f0.jpg) +Figure 5. Visualization of audio query embeddings with t-SNE, different instrument categories are color-coded. Our audio queries have learned to cluster by different classes of sound. + +audio sample is approximately 6 seconds. STFT is applied using a Hann window size of 1022 and a hop length of 256, yielding a $512 \times 256$ Time-Frequency audio representation. It is then re-sampled on a log-frequency scale to obtain a magnitude spectrogram with $T$ , $F = 256$ . Detected objects in frames are resized to $256 \times 256$ and randomly cropped to the size of $224 \times 224$ . We set the video frame rate as 1 FPS, and randomly-selected three frames as input for the object detector. While for AVE [68] dataset, audio signal is sub-sampled at $22\mathrm{kHz}$ , and we use the full frame rate(29.97 FPS). Other settings are the same as MUSIC except STFT hop length is set as 184, following [99]. + +For MUSIC dataset [89], we use the Faster R-CNN object detector pre-trained by [26] on Open Images [38]. For MUSIC-21 [88] and AVE [68] datasets, since additional musical and general classes are not covered for this object detector, we adopt a pre-trained Detic detector [96] based on CLIP [56] to detect the 10 more instruments in MUSIC-21 dataset [88] and 28 event classes in AVE dataset [68]. + +We utilize 8 heads for all attention modules and select the maximum $N$ objects (number of queries) as 15, 25, and 30 for MUSIC, MUSIC-21 and AVE. The video encoder [19] and the object detector is pre-trained and kept frozen during training and inference. The multi-layer perception (MLP) for separated mask prediction has 2 hidden layers of 256 channels following [16]. Audio feature $F_{A}$ , motion feature + +![](images/42a96aa078bdcd9978bcf18c671d5f06e38df317fe0ea9d895598e8f91ccf8c5.jpg) +Figure 6. Qualitative results on AVE test dataset. Beyond restricted musical instruments, our model is also able to handle general sound separation tasks (e.g. sounds of galloping race car and frying food on the first two rows; sounds of driving motorcycles and speeches on the last two rows). + +$F_{M}$ , object feature $F_{O}$ , and audio queries $Q$ have a channel dimension of 256. And we set the channel dimension of both audio embeddings $\varepsilon_{A}$ and mask embeddings $\varepsilon_{M}$ as 32. The epoch number is 80, and batch size is set to 8. We use AdamW [43] for the mask transformer with a weight decay of $10^{-4}$ and Adam for all other networks as optimizer selection. The learning rate of the transformer is set as $10^{-4}$ and decreases by multiplying 0.1 at 60-th epoch. We set the learning rate for other networks as $10^{-4}$ , decreased by multiplying 0.1 at 30-th and 50-th epoch, respectively. Training is conducted on 8 NVIDIA Titan V GPUs. + +# 4.2. Audio-Visual Sound Source Separation + +Quantitative evaluation. Table. 1 demonstrates quantitative results for sound separation results against state-of-the-art methods on MUSIC dataset [89]. Our method outperforms baseline models in separation accuracy measured by all evaluation metrics. Our method outperforms the most recent publicly available state-ofthe-art algorithm [66] by $3.43\mathrm{dB}$ in terms of SDR score. Regarding quantitative results on MUSIC21 dataset [88], we demonstrate the performance comparison in Table. 2. Again, our method outperforms baseline models in terms of SDR metric. Performance on the previous two datasets demonstrate our model's ability to disentangle musical sounds. To further verify the scalability of our proposed method to general audio-source separation problems, we perform quantitative comparisons on AVE dataset in Table. 3. As is demonstrated, we surpass the state-of-the-art algorithm [99] by $1.31\mathrm{dB}$ in terms of SDR score. AVE is + +
MethodsSDR↑SIR↑SAR↑
NMF-MFCC [64]0.925.686.84
Sound-of-Pixels [89]4.239.399.85
Co-Separation [26]6.5411.379.46
CCoL [66]7.7413.2211.54
iQuery (Ours)11.1715.8414.27
+ +Table 1. Audio-visual sound separation results on MUSIC. Best results in bold and second-best results in Blue. + +
MethodsSDR↑SIR↑SAR↑
Sound-of-Pixels [89]*7.5213.0111.53
Co-Separation [26]*7.6413.8011.30
Sound-of-Motions [88]*8.3114.8213.11
Music Gesture [23]*10.1215.81-
TriBERT [58]10.0917.4512.80
AMnet [99]*11.0818.0013.22
iQuery (Ours)11.1215.9814.16
+ +a general dataset containing scenes like male and female speeches, animal sounds, and vehicle sounds. This clearly shows our model's adaptivity to more general problems of sound source separation. + +Qualitative evaluation. Fig. 2 illustrates qualitative sound separation results on MUSIC dataset. It can be seen that our method disentangles sound sources cleaner and more accurately, with less "muddy" sound. Fig. 6 provides additional qualitative examples on AVE dataset, and this again illustrates our model's good performance on general sound source separation cases. Both qualitative and quantitative results verify the superiority of our designed sound query-based segmentation pipeline iQuery. + +Human evaluation. Our quantitative evaluation shows the superiority of our model compared with baseline models, however, studies [8] have shown that audio separation quality could not be truthfully determined purely by the widely used mir_eval [57] metrics. Due to this reason, we further conduct a subjective human evaluation to study the actual perceptual quality of sound-separation results. Specifically, we compare the sound separation result of our model and the publicly available best baseline model [66] on MUSIC [89]. We collected 50 testing samples for all 11 classes from the test set, and each testing sample contains separated sounds with a length of 6 seconds predicted by our model and baseline [66] for the same sound mixture. Ground truth sound is also provided for each sample as a + +Table 2. Audio-visual sound separation results on MUSIC-21. The results noted by * are obtained from [23, 99]. + +
MethodsSDR↑SIR↑SAR↑
Multisensory [53]*0.843.446.69
Sound-of-Pixels [89]*1.217.086.84
Sound-of-Motions [88]*1.487.417.39
Minus-Plus [79]*1.967.958.08
Cascaded Filter [98]*2.688.188.48
AMnet [99]*3.719.1511.00
iQuery (Ours)5.028.2112.32
+ +Table 3. Audio-visual sound separation results on AVE. The results noted by * are obtained from [99]. + +
MethodsSDR↑SIR↑SAR↑
Sound-of-Pixels [89]4.118.179.84
Co-Separation [26]5.379.858.72
CCoL [66]6.7411.9410.22
iQuery (Ours)8.0411.6013.21
+ +Table 4. Fine-tuning sound separation performance comparison. All methods are pretrained on MUSIC dataset without one particular instrument and then fine-tuned on this new data. Baseline models are tuned with whole network unfrozen, and we keep our transformer backbone frozen. + +reference. The experiment is conducted by 40 participants separately. For each participant, the orders of our model and baseline [66] are randomly shuffled, and we ask the participant to answer "Which sound separation result is more close to the ground truth audio?" for each sample. Statistical results are shown in Fig. 4. Notably, our method significantly surpasses the compared baseline with a winning rate of $72.45\%$ . This additionally demonstrate the better actual perceptual performance of our model. + +Learned Query Embedding. To visualize that our proposed model has indeed learned to disentangle different sound sources through learnable queries, we show t-SNE embeddings of our learnable queries in MUSIC test set [89]. As is shown in Fig. 5, our queries tend to cluster by different instrument classes, learning representative prototypes. + +# 4.3. Extendable Audio Prompt Fine-tuning + +Table. 4 evaluates our approach's generalization ability compared with previous methods. We conduct fine-tuning experiments by leave-one-out cross-validation. Baseline models are fine-tuned on the new instrument with all the networks structure unfrozen. With the design of audio prompts discussed in Sec. 3.4, we keep most of our transformer parameters frozen, only fine-tuning the query embedding layer, which has much fewer parameters (0.048% of the total parameters in Transformer). + +Fig. 7 (a) shows our performance with a varying num + +![](images/d1d29e0ea15b66cf5b7bbdbecab26f17454c6e4b80b4481fecbcfaf7ed81ca3d.jpg) +(a) + +![](images/885f6b465a1146a5887214e466db127fab6c7dd2d817c50b512258edd4f63142.jpg) +(b) +Figure 7. Fine-tuning curves of sound separation. (a) Fine-tuning with different number of unseen instrument classes on MUSIC. (b) Fine-tuning with different number of unseen event classes on AVE. + +ber of new instrument classes for fine-tuning on MUSIC dataset. We hold out 1, 2, 4, and 6 instrument classes in the pre-training stage and fine-tune our method on these new classes with only the query embedding layer unfrozen. MUSIC dataset contains in total of 11 instruments. Notably, our method still yields good results when the network is only pre-trained on 5 instrument types, even fewer than the unseen classes. Fig. 7 (b) shows our model's fine-tuning performance on AVE dataset with a varying number of new event classes for fine-tuning. We follow the experimental setup on MUSIC, and hold out 2, 4, 6, 8, and 12 event classes for fine-tuning. This demonstrates our model's adaptivity in general sound separation cases. + +# 4.4. Contrastive Verification + +Our learnable query-prototypes network is designed to ensure cross-modality consistency and cross-instrument contrast. We assume these prototypes to draw samples of each particular sound class sample close and push away the different prototypes. The question is whether our network design with "visually-named" query trained in the "Mix-and and-Separate" can already achieve this goal? As an alternative, we design an auxiliary contrastive loss for verification: to maximize the cosine similarity of separated audio embedding $\varepsilon_{A_k} = \varepsilon_A \odot M_k$ and the corresponding query embedding $Q_k$ in $Q$ , while minimizing the cosine similarity of separated audio embedding and other query embeddings $Q_n$ (where $n \in [1, N], n \neq k$ ). We optimize the cross-entropy losses of the cosine similarity scores to obtain contrastive loss $L_{contras}$ . To ensure the qualities of audio embedding $\varepsilon_A$ and predicted mask $M_i$ are accurate enough, we use a hierarchical task learning strategy [44] to control weights for $L_{sep}$ and $L_{contras}$ at each epoch. The verification loss $L_{verify}$ is: $L_{verify} = w_{sep}(e) \cdot L_{sep} + w_{contras}(e) \cdot L_{contras}$ where $e$ denotes training epoch and $w(e)$ denotes loss weight. + +Ablations of auxiliary contrastive loss, shown in Table. 5, demonstrates that our existing design achieves better results without using explicit contrastive loss. This answers the + +
ArchitectureSDR↑SIR↑SAR↑
w/o lrn.10.0514.2713.71
w/o adpt.10.8915.5114.14
w/ con. best11.0215.9114.10
Ours (w/o con)11.1715.8414.27
+ +Table 5. Ablations on the auxiliary contrastive loss on MUSIC dataset. "w/o lrn." denotes without learnable linear layer added to queries produced by Transformer decoder; "w/o adpt." denotes that we use a fixed weight for auxiliary contrastive loss without the Hierarchical Task Learning strategy; "w/ con. best" denotes our best model design using auxiliary contrastive loss. + +
ArchitectureSDR↑SIR↑SAR↑
Random6.5810.7912.77
Self-audio10.5414.8114.23
Self-motion-audio10.6515.3713.96
Dual-stream10.4615.2513.79
Motion-self-audio11.1715.8414.27
+ +Table 6. Ablations on the design of Transformer decoder. + +question we raised, that our "visually-named" queries are already contrastive enough for sound disentanglement. + +# 4.5. Ablations of Transformer decoder design + +Ablation results of Transformer decoder design on $MUSIC$ dataset is shown in Table. 6. "Random" denotes randomly assigning object features to queries, its poor separation result verifies the importance of our "visually-named" queries. "Self-audio" means removing the motion cross attention layer, which confirms the effectiveness of adding the motion feature. We tried two baseline designs against our final selection "Motion-self-audio", as stated in Sec. 3.3. "Self-motion-audio" is a design that puts self-, motion cross-, and audio cross-attention in a single decoder layer. "Dual-stream" means we conduct motion and audio cross-attention in parallel then fuse in the decoder layer. Specific details are in the Supplemental material. + +# 5. Conclusion + +We proposed an audio-visual separation method using an adaptable query-based audio mask transformer network. Our network disentangles different sound sources explicitly through learnable audio prototypes initiated by "visually naming". We demonstrate cross-modal consistency and cross-instrument contrast via a multi-modal cross-attention mechanism. When generalizing to new unseen classes, our method can be adapted by inserting additional queries as audio prompts while freezing the attention mechanism. Experiments on both musical and general sound datasets demonstrate performance gain by our iQuery. + +# References + +[1] Triantafyllos Afouras, Joon Son Chung, and Andrew Zisserman. The conversation: Deep audio-visual speech enhancement. arXiv preprint arXiv:1804.04121, 2018. 2 +[2] Triantafyllos Afouras, Joon Son Chung, and Andrew Zisserman. My lips are concealed: Audio-visual speech enhancement through obstructions. arXiv preprint arXiv:1907.04975, 2019. 2 +[3] Triantafyllos Afouras, Andrew Owens, Joon Son Chung, and Andrew Zisserman. Self-supervised learning of audiovisual objects from video. In European Conference on Computer Vision (ECCV), pages 208-224, 2020. 1 +[4] Relja Arandjelovic and Andrew Zisserman. Look, listen and learn. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 609-617, 2017. 2 +[5] Relja Arandjelovic and Andrew Zisserman. Objects that sound. In Proceedings of the European conference on computer vision (ECCV), pages 435-451, 2018. 2 +[6] Anurag Arnab, Mostafa Dehghani, Georg Heigold, Chen Sun, Mario Lucic, and Cordelia Schmid. Vivit: A video vision transformer. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 6836-6846, 2021. 3 +[7] Hyojin Bahng, Ali Jahanian, Swami Sankaranarayanan, and Phillip Isola. Visual prompting: Modifying pixel space to adapt pre-trained models. arXiv preprint arXiv:2203.17274, 2022. 2 +[8] Estefanía Cano, Derry FitzGerald, and Karlheinz Brandenburg. Evaluation of quality of sound source separation algorithms: Human perception vs quantitative metrics. In 2016 24th European Signal Processing Conference (EUSIPCO), pages 1758-1762. IEEE, 2016. 7 +[9] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In European Conference on Computer Vision (ECCV), pages 213-229. Springer, 2020. 3, 4 +[10] Joao Carreira and Andrew Zisserman. Quo vadis, action recognition? a new model and the kinetics dataset. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 6299-6308, 2017. 4 +[11] British Chandna, Marius Miron, Jordi Janer, and Emilia Gómez. Monoaural audio source separation using deep convolutional neural networks. In International conference on latent variable analysis and signal separation, pages 258-266. Springer, 2017. 2 +[12] Moitreya Chatterjee, Narendra Ahuja, and Anoop Cherian. Learning audio-visual dynamics using scene graphs for audio source separation. In Advances in Neural Information Processing Systems (NeurIPS), 2022. 2 +[13] Moitreya Chatterjee, Jonathan Le Roux, Narendra Ahuja, and Anoop Cherian. Visual scene graphs for audio source separation. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 1204-1213, 2021. 2 +[14] Honglie Chen, Weidi Xie, Triantafyllos Afouras, Arsha Nagrani, Andrea Vedaldi, and Andrew Zisserman. Localizing visual sounds the hard way. In IEEE/CVF Conference on + +Computer Vision and Pattern Recognition (CVPR), pages 16867-16876, 2021. 2 +[15] Bowen Cheng, Ishan Misra, Alexander G Schwing, Alexander Kirillov, and Rohit Girdhar. Masked-attention mask transformer for universal image segmentation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 1290–1299, 2022. 1, 3 +[16] Bowen Cheng, Alex Schwing, and Alexander Kirillov. Per-pixel classification is not all you need for semantic segmentation. Advances in Neural Information Processing Systems (NeurIPS), 34, 2021. 1, 3, 4, 6 +[17] Ying Cheng, Ruize Wang, Zhihao Pan, Rui Feng, and Yuejie Zhang. Look, listen, and attend: Co-attention network for self-supervised audio-visual representation learning. In Proceedings of the 28th ACM International Conference on Multimedia, pages 3884–3892, 2020. 1 +[18] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018.3 +[19] Shuangrui Ding, Maomao Li, Tianyu Yang, Rui Qian, Haohang Xu, Qingyi Chen, Jue Wang, and Hongkai Xiong. Motion-aware contrastive video representation learning via foreground-background merging. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 9716-9726, 2022. 4, 6 +[20] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929, 2020.3 +[21] Ariel Ephrat, Inbar Mosseri, Oran Lang, Tali Dekel, Kevin Wilson, Avinatan Hassidim, William T Freeman, and Michael Rubinstein. Looking to listen at the cocktail party: A speaker-independent audio-visual model for speech separation. arXiv preprint arXiv:1804.03619, 2018. 2, 3 +[22] Cédric Févotte, Nancy Bertin, and Jean-Louis Durrieu. Nonnegative matrix factorization with the itakura-saito divergence: With application to music analysis. Neural computation, 21(3):793-830, 2009. 2 +[23] Chuang Gan, Deng Huang, Hang Zhao, Joshua B Tenenbaum, and Antonio Torralba. Music gesture for visual sound separation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 10478-10487, 2020. 1, 3, 4, 5, 7 +[24] Ruohan Gao, Rogerio Feris, and Kristen Grauman. Learning to separate object sounds by watching unlabeled video. In European Conference on Computer Vision (ECCV), pages 35-53, 2018. 2 +[25] Ruohan Gao and Kristen Grauman. 2.5 d visual sound. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 324-333, 2019. 2 +[26] Ruohan Gao and Kristen Grauman. Co-separating sounds of visual objects. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 3879-3888, 2019. 1, 2, 4, 5, 6, 7 + +[27] Ruohan Gao and Kristen Grauman. Visualvoice: Audiovisual speech separation with cross-modal consistency. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 15490-15500. IEEE, 2021. 1, 2, 3 +[28] Tianyu Gao, Adam Fisch, and Danqi Chen. Making pretrained language models better few-shot learners. arXiv preprint arXiv:2012.15723, 2020. 2 +[29] Jort F Gemmeke, Daniel PW Ellis, Dylan Freedman, Aren Jansen, Wade Lawrence, R Channing Moore, Manoj Plakal, and Marvin Ritter. Audio set: An ontology and human-labeled dataset for audio events. In IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 776-780. IEEE, 2017. 5 +[30] Daniel Griffin and Jae Lim. Signal estimation from modified short-time fourier transform. IEEE Transactions on acoustics, speech, and signal processing, 32(2):236-243, 1984. 4 +[31] Simon Haykin and Zhe Chen. The cocktail party problem. Neural computation, 17(9):1875-1902, 2005. 2 +[32] John R Hershey, Zhuo Chen, Jonathan Le Roux, and Shinji Watanabe. Deep clustering: Discriminative embeddings for segmentation and separation. In IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 31-35. IEEE, 2016. 1 +[33] Di Hu, Feiping Nie, and Xuelong Li. Deep multimodal clustering for unsupervised audiovisual learning. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 9248-9257, 2019. 1 +[34] Po-Sen Huang, Minje Kim, Mark Hasegawa-Johnson, and Paris Smaragdis. Joint optimization of masks and deep recurrent neural networks for monaural source separation. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 23(12):2136-2147, 2015. 1 +[35] Menglin Jia, Luming Tang, Bor-Chun Chen, Claire Cardie, Serge Belongie, Bharath Hariharan, and Ser-Nam Lim. Visual prompt tuning. In European Conference on Computer Vision (ECCV), pages 709-727. Springer, 2022. 2 +[36] Einat Kidron, Yoav Y Schechner, and Michael Elad. Pixels that sound. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), volume 1, pages 88-95. IEEE, 2005. 2 +[37] Kevin Kilgour, Beat Gfeller, Qingqing Huang, Aren Jansen, Scott Wisdom, and Marco Tagliasacchi. Text-driven separation of arbitrary sounds. arXiv preprint arXiv:2204.05738, 2022. 2 +[38] Ivan Krasin, Tom Duerig, Neil Alldrin, Vittorio Ferrari, Sami Abu-El-Haija, Alina Kuznetsova, Hassan Rom, Jasper Uijlings, Stefan Popov, Andreas Veit, et al. Openimages: A public dataset for large-scale multi-label and multi-class image classification. Dataset available from https://github.com/openimages, 2(3):18, 2017. 6 +[39] Jiyoung Lee, Soo-Whan Chung, Sunok Kim, Hong-Goo Kang, and Kwanghoon Sohn. Looking into your speech: Learning cross-modal affinity for audio-visual speech separation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 1336–1345, 2021. 2, 3 + +[40] Jie Hwan Lee, Hyeong-Seok Choi, and Kyogu Lee. Audio query-based music source separation. arXiv preprint arXiv:1908.06593, 2019. 2 +[41] Dongze Lian, Daquan Zhou, Jiashi Feng, and Xinchao Wang. Scaling & shifting your features: A new baseline for efficient model tuning. In Advances in Neural Information Processing Systems (NeurIPS), 2022. 2 +[42] Ze Liu, Jia Ning, Yue Cao, Yixuan Wei, Zheng Zhang, Stephen Lin, and Han Hu. Video swim transformer. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3202-3211, 2022. 3 +[43] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017. 6 +[44] Yan Lu, Xinzhu Ma, Lei Yang, Tianzhu Zhang, Yating Liu, Qi Chu, Junjie Yan, and Wanli Ouyang. Geometry uncertainty projection network for monocular 3d object detection. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 3111-3121, 2021. 8 +[45] Sagnik Majumder, Ziad Al-Halah, and Kristen Grauman. Move2hear: Active audio-visual source separation. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 275–285, 2021. 2 +[46] Sagnik Majumder and Kristen Grauman. Active audiovisual separation of dynamic sound sources. In European Conference on Computer Vision (ECCV), pages 551-569. Springer, 2022. 2 +[47] Mingyuan Mao, Renrui Zhang, Honghui Zheng, Teli Ma, Yan Peng, Errui Ding, Baochang Zhang, Shumin Han, et al. Dual-stream network for visual recognition. Advances in Neural Information Processing Systems (NeurIPS), 34:25346-25358, 2021. 3 +[48] Josh H McDermott. The cocktail party problem. Current Biology, 19(22):R1024-R1027, 2009. 2 +[49] Otniel-Bogdan Mercea, Thomas Hummel, A Koepke, and Zeynep Akata. Temporal and cross-modal attention for audio-visual zero-shot learning. In European Conference on Computer Vision (ECCV), pages 488–505. Springer, 2022. 2 +[50] Ishan Misra, Rohit Girdhar, and Armand Joulin. An end-to-end transformer model for 3d object detection. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 2906-2917, 2021. 3 +[51] Juan F Montesinos, Venkatesh S Kadandale, and Gloria Haro. Vovit: Low latency graph-based audio-visual voice separation transformer. In European Conference on Computer Vision (ECCV), pages 310–326. Springer, 2022. 2 +[52] Pedro Morgado, Nuno Nvasconcelos, Timothy Langlois, and Oliver Wang. Self-supervised generation of spatial audio for 360 video. Advances in Neural Information Processing Systems (NeurIPS), 31, 2018. 2 +[53] Andrew Owens and Alexei A Efros. Audio-visual scene analysis with self-supervised multisensory features. In European Conference on Computer Vision (ECCV), pages 631–648, 2018. 1, 2, 5, 7 +[54] Andrew Owens, Phillip Isola, Josh McDermott, Antonio Torralba, Edward H Adelson, and William T Freeman. Visually indicated sounds. In IEEE/CVF Conference on Com + +puter Vision and Pattern Recognition (CVPR), pages 2405-2413, 2016. 2 +[55] Rui Qian, Di Hu, Heinrich Dinkel, Mengyue Wu, Ning Xu, and Weiyao Lin. Multiple sound sources localization from coarse to fine. In European Conference on Computer Vision (ECCV), pages 292-308. Springer, 2020. 2 +[56] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International Conference on Machine Learning (ICML), pages 8748-8763. PMLR, 2021. 6 +[57] Colin Raffel, Brian McFee, Eric J Humphrey, Justin Salamon, Oriol Nieto, Dawen Liang, Daniel PW Ellis, and C Colin Raffel. mir.eval: A transparent implementation of common mir metrics. In *In Proceedings of the 15th International Society for Music Information Retrieval Conference*, ISMIR. CiteSeer, 2014. 5, 7 +[58] Tanzila Rahman, Mengyu Yang, and Leonid Sigal. Tribert: Full-body human-centric audio-visual representation learning for visual sound separation. arXiv preprint arXiv:2110.13412, 2021. 1, 3, 4, 5, 7 +[59] Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-net: Convolutional networks for biomedical image segmentation. In International Conference on Medical image computing and computer-assisted intervention, pages 234-241. Springer, 2015. 4 +[60] Sam Roweis. One microphone source separation. Advances in Neural Information Processing Systems (NeurIPS), 13, 2000. 1 +[61] Arda Senocak, Tae-Hyun Oh, Junsik Kim, Ming-Hsuan Yang, and In So Kweon. Learning to localize sound source in visual scenes. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4358-4366, 2018. 2 +[62] Bowen Shi, Wei-Ning Hsu, Kushal Lakhotia, and Abdelrahman Mohamed. Learning audio-visual speech representation by masked multimodal cluster prediction. In International Conference on Learning Representations (ICLR), 2022. 2 +[63] Zengjie Song, Yuxi Wang, Junsong Fan, Tieniu Tan, and Zhaoxiang Zhang. Self-supervised predictive learning: A negative-free method for sound source localization in visual scenes. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3222-3231, 2022. 2 +[64] Martin Spiertz and Volker Gnann. Source-filter based clustering for monaural blind source separation. In Proceedings of the 12th International Conference on Digital Audio Effects, volume 4, 2009. 5, 7 +[65] Robin Strudel, Ricardo Garcia, Ivan Laptev, and Cordelia Schmid. Segmenter: Transformer for semantic segmentation. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 7262-7272, 2021. 1, 3 +[66] Yapeng Tian, Di Hu, and Chenliang Xu. Cyclic co-learning of sounding object visual grounding and sound separation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2745-2754, 2021. 2, 3, 4, 5, 6, 7 + +[67] Yapeng Tian, Dingzeyu Li, and Chenliang Xu. Unified multisensory perception: Weakly-supervised audio-visual video parsing. In European Conference on Computer Vision (ECCV), pages 436–454. Springer, 2020. 2 +[68] Yapeng Tian, Jing Shi, Bochen Li, Zhiyao Duan, and Chenliang Xu. Audio-visual event localization in unconstrained videos. In European Conference on Computer Vision (ECCV), pages 247–263, 2018. 2, 5, 6 +[69] Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, and Hervé Jégou. Training data-efficient image transformers & distillation through attention. In International Conference on Machine Learning (ICML), pages 10347-10357. PMLR, 2021. 3 +[70] Thanh-Dat Truong, Chi Nhan Duong, Hoang Anh Pham, Bhiksha Raj, Ngan Le, Khoa Luu, et al. The right to talk: An audio-visual transformer approach. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 1105–1114, 2021. 2 +[71] Efthymios Tzinis, Scott Wisdom, Aren Jansen, Shawn Hershey, Tal Remez, Dan Ellis, and John R Hershey. Into the wild with audioscope: Unsupervised audio-visual separation of on-screen sounds. In International Conference on Learning Representations (ICLR), 2020. 2, 3 +[72] Efthymios Tzinis, Scott Wisdom, Tal Remez, and John R Hershey. Audioscopev2: Audio-visual attention architectures for calibrated open-domain on-screen sound separation. In European Conference on Computer Vision (ECCV), pages 368–385. Springer, 2022. 2, 3 +[73] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in Neural Information Processing Systems (NeurIPS), 30, 2017. 3, 4 +[74] Tuomas Virtanen. Monaural sound source separation by nonnegative matrix factorization with temporal continuity and sparseness criteria. IEEE transactions on audio, speech, and language processing, 15(3):1066-1074, 2007. 1 +[75] Ho-Hsiang Wu, Prem Seetharaman, Kundan Kumar, and Juan Pablo Bello. Wav2clip: Learning robust audio representations from clip. In IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 4563-4567. IEEE, 2022. 2 +[76] Yu Wu, Linchao Zhu, Yan Yan, and Yi Yang. Dual attention matching for audio-visual event localization. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 6292–6300, 2019. 2 +[77] Yan Xia and Zhou Zhao. Cross-modal background suppression for audio-visual event localization. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 19989-19998, 2022. 2 +[78] Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M Alvarez, and Ping Luo. Segformer: Simple and efficient design for semantic segmentation with transformers. Advances in Neural Information Processing Systems (NeurIPS), 34, 2021. 1, 3 +[79] Xudong Xu, Bo Dai, and Dahua Lin. Recursive visual sound separation using minus-plus net. In IEEE/CVF In + +ternational Conference on Computer Vision (ICCV), pages 882-891, 2019. 2, 5, 7 +[80] Xudong Xu, Hang Zhou, Ziwei Liu, Bo Dai, Xiaogang Wang, and Dahua Lin. Visually informed binaural audio generation without binaural audios. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 15485-15494, 2021. 2 +[81] Dong Yu, Morten Kolbaek, Zheng-Hua Tan, and Jesper Jensen. Permutation invariant training of deep models for speaker-independent multi-talker speech separation. In IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 241-245. IEEE, 2017. 1 +[82] Li Yuan, Yunpeng Chen, Tao Wang, Weihao Yu, Yujun Shi, Zi-Hang Jiang, Francis EH Tay, Jiashi Feng, and Shuicheng Yan. Tokens-to-token vit: Training vision transformers from scratch onImagenet. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 558-567, 2021. 3 +[83] Rowan Zellers, Jiasen Lu, Ximing Lu, Youngjae Yu, Yanpeng Zhao, Mohammadreza Salehi, Aditya Kusupati, Jack Hessel, Ali Farhadi, and Yejin Choi. Merlot reserve: Neural script knowledge through vision and language and sound. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 16375-16387, 2022. 2 +[84] Renrui Zhang, Ziyu Guo, Peng Gao, Rongyao Fang, Bin Zhao, Dong Wang, Yu Qiao, and Hongsheng Li. Pointm2ae: Multi-scale masked autoencoders for hierarchical point cloud pre-training. Advances in Neural Information Processing Systems (NeurIPS), 2022. 3 +[85] Renrui Zhang, Han Qiu, Tai Wang, Xuanzhuo Xu, Ziyu Guo, Yu Qiao, Peng Gao, and Hongsheng Li. Monodetr: Depth-aware transformer for monocular 3d object detection. arXiv preprint arXiv:2203.13310, 2022. 3 +[86] Yuanhan Zhang, Kaiyang Zhou, and Ziwei Liu. Neural prompt search. arXiv preprint arXiv:2206.04673, 2022. 2 +[87] Hang Zhao, Orazio Gallo, Iuri Frosio, and Jan Kautz. Loss functions for image restoration with neural networks. IEEE Transactions on computational imaging, 3(1):47-57, 2016. 5 +[88] Hang Zhao, Chuang Gan, Wei-Chiu Ma, and Antonio Torralba. The sound of motions. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 1735-1744, 2019. 1, 3, 4, 5, 6, 7 +[89] Hang Zhao, Chuang Gan, Andrew Rouditchenko, Carl Vondrick, Josh McDermott, and Antonio Torralba. The sound of pixels. In European Conference on Computer Vision (ECCV), pages 570-586, 2018. 1, 2, 3, 4, 5, 6, 7 +[90] Hengshuang Zhao, Li Jiang, Jiaya Jia, Philip HS Torr, and Vladlen Koltun. Point transformer. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 16259-16268, 2021. 3 +[91] Minghang Zheng, Peng Gao, Renrui Zhang, Kunchang Li, Xiaogang Wang, Hongsheng Li, and Hao Dong. End-to-end object detection with adaptive clustering transformer. arXiv preprint arXiv:2011.09315, 2020. 3 +[92] Sixiao Zheng, Jiachen Lu, Hengshuang Zhao, Xiatian Zhu, Zekun Luo, Yabiao Wang, Yanwei Fu, Jianfeng Feng, Tao + +Xiang, Philip HS Torr, et al. Rethinking semantic segmentation from a sequence-to-sequence perspective with transformers. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 6881-6890, 2021. 3 +[93] Bolei Zhou, Aditya Khosla, Agata Lapedriza, Aude Oliva, and Antonio Torralba. Learning deep features for discriminative localization. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2921-2929, 2016. 2 +[94] Dongzhan Zhou, Xinchi Zhou, Di Hu, Hang Zhou, Lei Bai, Ziwei Liu, and Wanli Ouyang. Sepfusion: Finding optimal fusion structures for visual sound separation. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 36, pages 3544-3552, 2022. 3 +[95] Jinxing Zhou, Liang Zheng, Yiran Zhong, Shijie Hao, and Meng Wang. Positive sample propagation along the audiovisual event line. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 8436-8444, 2021. 2 +[96] Xingyi Zhou, Rohit Girdhar, Armand Joulin, Philipp Krähenbuhl, and Ishan Misra. Detecting twenty-thousand classes using image-level supervision. In European Conference on Computer Vision (ECCV), pages 350-368. Springer, 2022. 6 +[97] Yipin Zhou, Zhaowen Wang, Chen Fang, Trung Bui, and Tamara L Berg. Visual to sound: Generating natural sound for videos in the wild. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3550-3558, 2018. 2 +[98] Lingyu Zhu and Esa Rahtu. Visually guided sound source separation using cascaded opponent filter network. In Proceedings of the Asian Conference on Computer Vision, 2020. 5, 7 +[99] Lingyu Zhu and Esa Rahtu. Visually guided sound source separation and localization using self-supervised motion representations. In IEEE/CVF Winter Conference on Applications of Computer Vision (WACV), pages 1289-1299, 2022. 1, 3, 4, 5, 6, 7 +[100] Xizhou Zhu, Weijie Su, Lewei Lu, Bin Li, Xiaogang Wang, and Jifeng Dai. Deformable detr: Deformable transformers for end-to-end object detection. arXiv preprint arXiv:2010.04159, 2020. 3 \ No newline at end of file diff --git a/2023/iQuery_ Instruments As Queries for Audio-Visual Sound Separation/images.zip b/2023/iQuery_ Instruments As Queries for Audio-Visual Sound Separation/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..09cae198e98c86f5843ffce3f5932070878beb43 --- /dev/null +++ b/2023/iQuery_ Instruments As Queries for Audio-Visual Sound Separation/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91e5432c0155469e254cf2bc08eb7ab93c59e8efa3e358d343b2155ad0c73c2e +size 495258 diff --git a/2023/iQuery_ Instruments As Queries for Audio-Visual Sound Separation/layout.json b/2023/iQuery_ Instruments As Queries for Audio-Visual Sound Separation/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..0f3ff8a35f5a46d098e706d9d648857bcd669f61 --- /dev/null +++ b/2023/iQuery_ Instruments As Queries for Audio-Visual Sound Separation/layout.json @@ -0,0 +1,10167 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 88, + 103, + 505, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 103, + 505, + 121 + ], + "spans": [ + { + "bbox": [ + 88, + 103, + 505, + 121 + ], + "type": "text", + "content": "iQuery: Instruments as Queries for Audio-Visual Sound Separation" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 93, + 142, + 499, + 158 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 142, + 499, + 158 + ], + "spans": [ + { + "bbox": [ + 93, + 142, + 499, + 158 + ], + "type": "text", + "content": "Jiaben Chen" + }, + { + "bbox": [ + 93, + 142, + 499, + 158 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 93, + 142, + 499, + 158 + ], + "type": "text", + "content": ", Renrui Zhang" + }, + { + "bbox": [ + 93, + 142, + 499, + 158 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 93, + 142, + 499, + 158 + ], + "type": "text", + "content": ", Dongze Lian" + }, + { + "bbox": [ + 93, + 142, + 499, + 158 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 93, + 142, + 499, + 158 + ], + "type": "text", + "content": ", Jiaqi Yang" + }, + { + "bbox": [ + 93, + 142, + 499, + 158 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 93, + 142, + 499, + 158 + ], + "type": "text", + "content": ", Ziyao Zeng" + }, + { + "bbox": [ + 93, + 142, + 499, + 158 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 93, + 142, + 499, + 158 + ], + "type": "text", + "content": ", Jianbo Shi" + }, + { + "bbox": [ + 93, + 142, + 499, + 158 + ], + "type": "inline_equation", + "content": "^{5}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 68, + 165, + 525, + 195 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 165, + 525, + 195 + ], + "spans": [ + { + "bbox": [ + 68, + 165, + 525, + 195 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 68, + 165, + 525, + 195 + ], + "type": "text", + "content": "UC San Diego " + }, + { + "bbox": [ + 68, + 165, + 525, + 195 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 68, + 165, + 525, + 195 + ], + "type": "text", + "content": "The Chinese University of Hong Kong " + }, + { + "bbox": [ + 68, + 165, + 525, + 195 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 68, + 165, + 525, + 195 + ], + "type": "text", + "content": "National University of Singapore " + }, + { + "bbox": [ + 68, + 165, + 525, + 195 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 68, + 165, + 525, + 195 + ], + "type": "text", + "content": "ShanghaiTech University " + }, + { + "bbox": [ + 68, + 165, + 525, + 195 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 68, + 165, + 525, + 195 + ], + "type": "text", + "content": "University of Pennsylvania" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 143, + 227, + 192, + 239 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 227, + 192, + 239 + ], + "spans": [ + { + "bbox": [ + 143, + 227, + 192, + 239 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 251, + 290, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 251, + 290, + 515 + ], + "spans": [ + { + "bbox": [ + 47, + 251, + 290, + 515 + ], + "type": "text", + "content": "Current audio-visual separation methods share a standard architecture design where an audio encoder-decoder network is fused with visual encoding features at the encoder bottleneck. This design confounds the learning of multi-modal feature encoding with robust sound decoding for audio separation. To generalize to a new instrument, one must fine-tune the entire visual and audio network for all musical instruments. We re-formulate the visual-sound separation task and propose Instruments as Queries (iQuery) with a flexible query expansion mechanism. Our approach ensures cross-modal consistency and cross-instrument disentanglement. We utilize \"visually named\" queries to initiate the learning of audio queries and use cross-modal attention to remove potential sound source interference at the estimated waveforms. To generalize to a new instrument or event class, drawing inspiration from the text-prompt design, we insert additional queries as audio prompts while freezing the attention mechanism. Experimental results on three benchmarks demonstrate that our iQuery improves audio-visual sound source separation performance. Code is available at https://github.com/JiabenChen/iQuery." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 538, + 128, + 551 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 538, + 128, + 551 + ], + "spans": [ + { + "bbox": [ + 47, + 538, + 128, + 551 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 558, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 558, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 47, + 558, + 288, + 714 + ], + "type": "text", + "content": "Humans use multi-modal perception to understand complex activities. To mimic this skill, researchers have studied audio-visual learning [3, 17, 33] by exploiting the synchronization and correlation between auditory and visual information. In this paper, we focus on the sound source separation task, where we aim to identify and separate different sound components within a given sound mixture [60, 74]. Following the \"Mix-and-Separate\" framework [32, 34, 81], we learn to separate sounds by mixing multiple audio signals to generate an artificially complex auditory representation and then use it as a self-supervised task to separate individual sounds from the mixture. The works [26, 53, 89] showed that visually-guided sound separation is achievable" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 305, + 228, + 522, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 228, + 522, + 240 + ], + "spans": [ + { + "bbox": [ + 305, + 228, + 522, + 240 + ], + "type": "text", + "content": "by leveraging visual information of the sound source." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 242, + 546, + 410 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 242, + 546, + 410 + ], + "spans": [ + { + "bbox": [ + 304, + 242, + 546, + 410 + ], + "type": "text", + "content": "Prevalent architectures take a paradigm of a visual-conditioned encoder-decoder architecture [23, 26, 58, 88], where encoded features from audio and visual modalities are fused at the bottleneck for decoding to yield separated spectrogram masks. However, it is noticed that this design often creates a \"muddy\" sound and \"cross-talk\" that leaks from one instrument to another. To create a clean sound separation, one would like the audio-visual encoders to be (1) self-consistent within the music instrument and (2) contrasting across. One approach [27] added critic functions explicitly to enforce these properties. Another method [99] used a two-step process with the second motion-conditioned generation process to filter out unwanted cross-talks. We call these approaches decoder-centric." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 412, + 547, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 412, + 547, + 568 + ], + "spans": [ + { + "bbox": [ + 304, + 412, + 547, + 568 + ], + "type": "text", + "content": "Most recent works focus on addressing the \"muddy\" and \"cross-talk\" issue by improving fine details of audio-visual feature extraction: for example, adding human motion encoding as in [23, 88, 99], or cross-modality representations [58] via self-supervised learning. Once the feature representations are learned, the standard encoder-decoder FCN style segmentation is used as an afterthought. We consider these methods feature-centric. The standard designs have two limitations. First, it is hard to balance decoder-centric and feature-centric approaches that enforce a common goal of cross-modality consistency and cross-instrument contrast. Second, to learn a new musical instrument, one has to retrain the entire network via self-supervision." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 570, + 547, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 570, + 547, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 570, + 547, + 714 + ], + "type": "text", + "content": "To tackle these limitations, we propose a query-based sound separation framework, iQuery. We recast this problem from a query-based transformer segmentation view, where each query learns to segment one instrument, similar to visual segmentation [15, 16, 65, 78]. We treat each audio query as a learnable prototype that parametrically models one sound class. We fuse visual modality with audio by \"visually naming\" the audio query: using object detection to assign visual features to the corresponding audio query. Within the transformer decoder, the visually initialized queries interact with the audio features through cross-attention, thus ensuring cross-modality consistency. Self" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "14675" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 73, + 72, + 521, + 277 + ], + "blocks": [ + { + "bbox": [ + 73, + 72, + 521, + 277 + ], + "lines": [ + { + "bbox": [ + 73, + 72, + 521, + 277 + ], + "spans": [ + { + "bbox": [ + 73, + 72, + 521, + 277 + ], + "type": "image", + "image_path": "bebce95567aff6300ad0b46f8f5ac5744a1e9a11a1842bb242f0a58a0d86d530.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 286, + 545, + 353 + ], + "lines": [ + { + "bbox": [ + 46, + 286, + 545, + 353 + ], + "spans": [ + { + "bbox": [ + 46, + 286, + 545, + 353 + ], + "type": "text", + "content": "Figure 1. Pipeline of iQuery. Our system takes as input an audio mixture and its corresponding video frames, and disentangles separated sound sources for each video. Our pipeline consists of two main modules: an Audio-Visual Feature Extraction module which extracts audio, object, and motion features through three corresponding encoders, and an Audio-Visual Transformer module for sound separation. The query-based sound separation transformer has three key components: 1) \"visually-named\" audio queries are initialized by extracted object features, 2) cross-attention between the audio queries with static image features, dynamic motion features and audio features, 3) self-attention between the learned audio queries to ensure cross-instrument contrast." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 374, + 287, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 374, + 287, + 421 + ], + "spans": [ + { + "bbox": [ + 46, + 374, + 287, + 421 + ], + "type": "text", + "content": "attention across the audio queries for different instruments implements a soft version of the cross-instrument contrast objective. With this design, we unify the feature-centric with the decoder-centric approach." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 422, + 287, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 422, + 287, + 540 + ], + "spans": [ + { + "bbox": [ + 46, + 422, + 287, + 540 + ], + "type": "text", + "content": "How do we achieve generalizability? Motivated by recent success in fine-tuning domain transfer with the text-prompt [28] and visual-prompt designs [7, 35, 41, 86], we adaptively insert the additional queries as audio prompts to accommodate new instruments. With the audio-prompt design, we freeze most of the transformer network parameters and only fine-tune the newly added query embedding layer. We conjecture that the learned prototype queries are instrument-dependent, while the cross/self-attention mechanism in the transformer is instrument-independent." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 59, + 542, + 173, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 542, + 173, + 553 + ], + "spans": [ + { + "bbox": [ + 59, + 542, + 173, + 553 + ], + "type": "text", + "content": "Our main contributions are:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 58, + 558, + 287, + 713 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 58, + 558, + 287, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 558, + 287, + 617 + ], + "spans": [ + { + "bbox": [ + 58, + 558, + 287, + 617 + ], + "type": "text", + "content": "- To the best of our knowledge, we are the first to study the audio-visual sound separation problem from a tunable query view to disentangle different sound sources explicitly through learnable audio prototypes in a mask transformer architecture." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 58, + 624, + 287, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 624, + 287, + 658 + ], + "spans": [ + { + "bbox": [ + 58, + 624, + 287, + 658 + ], + "type": "text", + "content": "- To generalize to a new sound class, we design an audio prompt for fine-tuning with most of the transformer architecture frozen." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 58, + 666, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 666, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 58, + 666, + 287, + 713 + ], + "type": "text", + "content": "- Extensive experiments and ablations verify the effectiveness of our core designs for disentangle-. ment, demonstrating performance gain for audiovisual sound source separation on three benchmarks." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 373, + 389, + 385 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 373, + 389, + 385 + ], + "spans": [ + { + "bbox": [ + 306, + 373, + 389, + 385 + ], + "type": "text", + "content": "2. Related work" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 402, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 402, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 402, + 545, + 713 + ], + "type": "text", + "content": "Audio-Visual Sound Source Separation. Recent years have witnessed promising results of audiovisual multi-modality joint learning [49, 62, 67, 75, 83] in domains like audio-visual sound source localization [4, 5, 14, 36, 55, 61, 63, 93], audio-visual event localization [68, 76, 77, 95] and sound synthesis from videos [25, 52, 54, 80, 97]. Sound source separation, a challenging classical problem, has been researched extensively in the audio signal processing area [11, 22, 37, 40]. A well-known example is the cocktail party problem [31, 48] in speech domain [1, 21]. Works have been proposed recently for tasks like speech separation [2, 27, 39, 51, 70], active sound separation [45, 46] and on-screen sound separation [25, 53, 71, 72]. Our work focuses on audio-visual sound separation. Recent audio-visual sound separation methods could be classified generally into two categories: feature-centric and decoder-centric as discussed in Sec. 1. Feature-centric methods exploit various ways for visual feature extraction selection to aid this multi-modality task. Some works consider frame-based appearance features (static frame features [24, 79, 89] or detected object regions [26, 66]) for extracting visual semantic cues (e.g., instrument categories) to guide sound separation. [12, 13] adds embeddings from an audio-visual scene graph at the U-Net bottleneck to model the visual context of sound sources. Based on the assessment that motion signals" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "14676" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 124, + 70, + 473, + 261 + ], + "blocks": [ + { + "bbox": [ + 124, + 70, + 473, + 261 + ], + "lines": [ + { + "bbox": [ + 124, + 70, + 473, + 261 + ], + "spans": [ + { + "bbox": [ + 124, + 70, + 473, + 261 + ], + "type": "image", + "image_path": "e7d991a0c55c03ea373ee6ab8b3489166941f8aed60c891607dcf99071308c67.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 277, + 547, + 323 + ], + "lines": [ + { + "bbox": [ + 46, + 277, + 547, + 323 + ], + "spans": [ + { + "bbox": [ + 46, + 277, + 547, + 323 + ], + "type": "text", + "content": "Figure 2. Qualitative results on MUSIC test set. The first column shows the mixed video frames, the second to the fourth columns compare our predicted spectrogram masks against masks yielded by state-of-the-art algorithm [66] and ground truth masks, and the fifth to the seventh columns visualize separated spectrograms. [66] produces blurry masks and contains unseparated components from another sound source, while our system successfully generates accurate mask and clean spectrograms as the ground truth." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 342, + 289, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 342, + 289, + 498 + ], + "spans": [ + { + "bbox": [ + 46, + 342, + 289, + 498 + ], + "type": "text", + "content": "could more tightly couple the moving sounding object with corresponding variations of sounds, recent approaches focus on including motion information into the pipeline (e.g., optical flow [88], and human pose [23,58]). Based on this, [94] proposes a framework to search for the optimal fusion strategy for multi-modal features. Decoder-centric methods explore prevention of \"cross-talk\" between the audio sources in the decoder stage. [99] designs a two-stage pipeline, where the second stage conducts a counterfactual synthesis through motion features to remove potentially leaked sound. The approach of [27] added critic functions explicitly to enforce cross-modal consistency and cross-instrument contrast." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 510, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 510, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 510, + 289, + 715 + ], + "type": "text", + "content": "Vision Transformers. Motivated by transformer's success in natural language processing [73], transformers were first introduced in computer vision for image classification as ViT [20]. Given the superior long-range modeling capacity, many follow-up works [47, 69, 82] have upgraded ViT to achieve higher performance and widely surpassed convolutional neural networks. Further, transformer-based models are adopted for various downstream tasks, such as 2D object detection [9, 91, 100], semantic/instance segmentation [65, 78, 92], 3D object detection [50, 85], shape recognition [84, 90] and video understanding [6, 42]. Particularly, following the pipeline from DETR [9], MaskFormer [16] and Mask2Former [15] represent each mask candidate as a learnable query and conduct parallel decoding for instance-level segmentation. However, only few approaches [39, 58, 71, 72, 99] have extended transformer for audio-visual sound separation fields. [58] adopts a BERT" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 304, + 342, + 547, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 342, + 547, + 475 + ], + "spans": [ + { + "bbox": [ + 304, + 342, + 547, + 475 + ], + "type": "text", + "content": "[18] architecture to learn visual, pose, and audio feature representations. [99] designs an audio-motion transformer to refine sound separation results through audio-motion feature fusion. These methods focus mainly on learning better contextualized multi-modality representations through an encoder transformer. In contrast, our mask transformer-based network focuses on the entire process of visual-audio separation task. We disentangle different sound sources through independent learnable query prototypes and segment each time-frequency region on the spectrogram via mask prediction in an end-to-end fashion." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 306, + 486, + 362, + 498 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 486, + 362, + 498 + ], + "spans": [ + { + "bbox": [ + 306, + 486, + 362, + 498 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 506, + 545, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 506, + 545, + 604 + ], + "spans": [ + { + "bbox": [ + 304, + 506, + 545, + 604 + ], + "type": "text", + "content": "We first describe the formulation of the audio-visual sound separation task and introduce our pipeline iQuery briefly in Sec. 3.1. Then we introduce networks for learning representations from visual and audio modalities in Sec. 3.2 and our proposed cross-modality cross-attention transformer architecture for visual sound separation in Sec. 3.3. Finally, we introduce our adaptive query fine-tuning strategy through designs of flexible tunable queries in Sec. 3.4." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 610, + 373, + 623 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 610, + 373, + 623 + ], + "spans": [ + { + "bbox": [ + 306, + 610, + 373, + 623 + ], + "type": "text", + "content": "3.1. Overview" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 629, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 629, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 629, + 547, + 715 + ], + "type": "text", + "content": "As mentioned before, our goal is to disentangle the audio mixture concerning its corresponding sound sources in the given mixture by using so-called queries. Following previous works [21, 89], we adopt a commonly used \"Mix-and-Separate\" self-supervised source separation procedure. Given " + }, + { + "bbox": [ + 304, + 629, + 547, + 715 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 304, + 629, + 547, + 715 + ], + "type": "text", + "content": " video clips with accompanying audio signal: " + }, + { + "bbox": [ + 304, + 629, + 547, + 715 + ], + "type": "inline_equation", + "content": "\\{(V_k,s_k(t))\\}_{k\\in [1,K]}" + }, + { + "bbox": [ + 304, + 629, + 547, + 715 + ], + "type": "text", + "content": ", we create a sound mixture:" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "14677" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 71, + 289, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 71, + 289, + 240 + ], + "spans": [ + { + "bbox": [ + 46, + 71, + 289, + 240 + ], + "type": "inline_equation", + "content": "s_{mix}(t) = \\sum_{k=1}^{K} s_k(t)" + }, + { + "bbox": [ + 46, + 71, + 289, + 240 + ], + "type": "text", + "content": " as training data. Our disentanglement goal is to separate sounds " + }, + { + "bbox": [ + 46, + 71, + 289, + 240 + ], + "type": "inline_equation", + "content": "s_k(t)" + }, + { + "bbox": [ + 46, + 71, + 289, + 240 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 46, + 71, + 289, + 240 + ], + "type": "inline_equation", + "content": "s_{mix}(t)" + }, + { + "bbox": [ + 46, + 71, + 289, + 240 + ], + "type": "text", + "content": " for sound sources in " + }, + { + "bbox": [ + 46, + 71, + 289, + 240 + ], + "type": "inline_equation", + "content": "V_k" + }, + { + "bbox": [ + 46, + 71, + 289, + 240 + ], + "type": "text", + "content": ", respectively. The pipeline, as illustrated in Fig. 1, is mainly composed of two components: an Audio-Visual Feature Extraction module and a Mask Transformer-based Sound Separation module. First, in the feature extraction module, the object detector & image encoder, and video encoder extract object-level visual features and motion features from video clip " + }, + { + "bbox": [ + 46, + 71, + 289, + 240 + ], + "type": "inline_equation", + "content": "V_k" + }, + { + "bbox": [ + 46, + 71, + 289, + 240 + ], + "type": "text", + "content": ". The audio network yields an audio feature and an audio embedding from the given sound mixture " + }, + { + "bbox": [ + 46, + 71, + 289, + 240 + ], + "type": "inline_equation", + "content": "s_{mix}(t)" + }, + { + "bbox": [ + 46, + 71, + 289, + 240 + ], + "type": "text", + "content": ". After that, a cross-modal transformer decoder attends to visual and audio features and outputs audio mask embeddings, which are further combined with audio embeddings for sound separation." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 250, + 223, + 262 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 250, + 223, + 262 + ], + "spans": [ + { + "bbox": [ + 47, + 250, + 223, + 262 + ], + "type": "text", + "content": "3.2. Audio-Visual Feature Extraction" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 269, + 289, + 402 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 269, + 289, + 402 + ], + "spans": [ + { + "bbox": [ + 46, + 269, + 289, + 402 + ], + "type": "text", + "content": "Object Detector & Image Encoder. To initialize learning of audio queries, we assign object-level visual appearance features to the corresponding queries, to create \"visually named\" queries. In the implementation, following [26], we use a Faster R-CNN object detector with ResNet-101 backbone. For frames in a given video clip " + }, + { + "bbox": [ + 46, + 269, + 289, + 402 + ], + "type": "inline_equation", + "content": "V_{k}" + }, + { + "bbox": [ + 46, + 269, + 289, + 402 + ], + "type": "text", + "content": ", the object detector is utilized to acquire the detected objects set " + }, + { + "bbox": [ + 46, + 269, + 289, + 402 + ], + "type": "inline_equation", + "content": "O_{k}" + }, + { + "bbox": [ + 46, + 269, + 289, + 402 + ], + "type": "text", + "content": ". After that, we adopt a pre-trained ResNet-18 similar to [66], followed by a linear layer and max pooling to yield object-level features " + }, + { + "bbox": [ + 46, + 269, + 289, + 402 + ], + "type": "inline_equation", + "content": "F_{O_k}\\in \\mathbb{R}^{C_O}" + }, + { + "bbox": [ + 46, + 269, + 289, + 402 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 46, + 269, + 289, + 402 + ], + "type": "inline_equation", + "content": "C_O" + }, + { + "bbox": [ + 46, + 269, + 289, + 402 + ], + "type": "text", + "content": " denotes channel dimension of object features." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 414, + 289, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 414, + 289, + 534 + ], + "spans": [ + { + "bbox": [ + 46, + 414, + 289, + 534 + ], + "type": "text", + "content": "Video Encoder. The video encoder maps the video frames from " + }, + { + "bbox": [ + 46, + 414, + 289, + 534 + ], + "type": "inline_equation", + "content": "V_{k} \\in \\mathbb{R}^{3 \\times T_{k} \\times H_{k} \\times W_{k}}" + }, + { + "bbox": [ + 46, + 414, + 289, + 534 + ], + "type": "text", + "content": " into a motion feature representation. In contrast with previous motion representations [23, 58, 88, 99], we use self-supervised video representation obtained from a 3D video encoder of I3D [10] pre-trained by FAME [19]. The model is pre-trained contrastively to concentrate on moving foregrounds. Finally, a spatial pooling is applied to obtain motion embedding " + }, + { + "bbox": [ + 46, + 414, + 289, + 534 + ], + "type": "inline_equation", + "content": "F_{M_k} \\in \\mathbb{R}^{C_M \\times T_k'}" + }, + { + "bbox": [ + 46, + 414, + 289, + 534 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 46, + 414, + 289, + 534 + ], + "type": "inline_equation", + "content": "C_M" + }, + { + "bbox": [ + 46, + 414, + 289, + 534 + ], + "type": "text", + "content": " denotes the dimension of the motion feature." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 545, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 545, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 545, + 289, + 715 + ], + "type": "text", + "content": "Audio Network. The audio network takes the form of skip-connected U-Net style architectures [59] following [26, 66, 89]. Given the input audio mixture " + }, + { + "bbox": [ + 46, + 545, + 289, + 715 + ], + "type": "inline_equation", + "content": "s_{mix}(t)" + }, + { + "bbox": [ + 46, + 545, + 289, + 715 + ], + "type": "text", + "content": ", we first apply a Short-Time Fourier Transform (STFT) [30] to convert the raw waveform to a 2D Time-Frequency spectrogram representation " + }, + { + "bbox": [ + 46, + 545, + 289, + 715 + ], + "type": "inline_equation", + "content": "S_{mix} \\in \\mathbb{R}^{F \\times T}" + }, + { + "bbox": [ + 46, + 545, + 289, + 715 + ], + "type": "text", + "content": ", which is then fed into the U-Net encoder to obtain an audio feature map " + }, + { + "bbox": [ + 46, + 545, + 289, + 715 + ], + "type": "inline_equation", + "content": "F_A \\in \\mathbb{R}^{C_A \\times \\frac{F}{S} \\times \\frac{T}{S}}" + }, + { + "bbox": [ + 46, + 545, + 289, + 715 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 46, + 545, + 289, + 715 + ], + "type": "inline_equation", + "content": "C_A" + }, + { + "bbox": [ + 46, + 545, + 289, + 715 + ], + "type": "text", + "content": " denotes the number of channels and " + }, + { + "bbox": [ + 46, + 545, + 289, + 715 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 46, + 545, + 289, + 715 + ], + "type": "text", + "content": " denotes stride of audio feature map) at the bottleneck. A U-Net decoder gradually upsamples the audio features to yield audio embeddings " + }, + { + "bbox": [ + 46, + 545, + 289, + 715 + ], + "type": "inline_equation", + "content": "\\varepsilon_A \\in \\mathbb{R}^{C_\\varepsilon \\times F \\times T}" + }, + { + "bbox": [ + 46, + 545, + 289, + 715 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 46, + 545, + 289, + 715 + ], + "type": "inline_equation", + "content": "C_\\varepsilon" + }, + { + "bbox": [ + 46, + 545, + 289, + 715 + ], + "type": "text", + "content": " denotes the dimension of audio embeddings), which is combined further with the transformer mask embeddings to generate the separated sound spectrogram mask " + }, + { + "bbox": [ + 46, + 545, + 289, + 715 + ], + "type": "inline_equation", + "content": "M_k" + }, + { + "bbox": [ + 46, + 545, + 289, + 715 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 305, + 72, + 453, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 72, + 453, + 84 + ], + "spans": [ + { + "bbox": [ + 305, + 72, + 453, + 84 + ], + "type": "text", + "content": "3.3. Audio-Visual Transformer" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 90, + 547, + 187 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 90, + 547, + 187 + ], + "spans": [ + { + "bbox": [ + 304, + 90, + 547, + 187 + ], + "type": "text", + "content": "Our cross-modality sound separation transformer contains the transformer decoder [73] with " + }, + { + "bbox": [ + 304, + 90, + 547, + 187 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 304, + 90, + 547, + 187 + ], + "type": "text", + "content": " queries (i.e., learnable prototypes), and utilizes the extracted object features " + }, + { + "bbox": [ + 304, + 90, + 547, + 187 + ], + "type": "inline_equation", + "content": "F_{O_k}" + }, + { + "bbox": [ + 304, + 90, + 547, + 187 + ], + "type": "text", + "content": ", motion embeddings " + }, + { + "bbox": [ + 304, + 90, + 547, + 187 + ], + "type": "inline_equation", + "content": "F_{M_k}" + }, + { + "bbox": [ + 304, + 90, + 547, + 187 + ], + "type": "text", + "content": " and audio features " + }, + { + "bbox": [ + 304, + 90, + 547, + 187 + ], + "type": "inline_equation", + "content": "F_A" + }, + { + "bbox": [ + 304, + 90, + 547, + 187 + ], + "type": "text", + "content": " to yield " + }, + { + "bbox": [ + 304, + 90, + 547, + 187 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 304, + 90, + 547, + 187 + ], + "type": "text", + "content": " mask embeddings " + }, + { + "bbox": [ + 304, + 90, + 547, + 187 + ], + "type": "inline_equation", + "content": "\\varepsilon_{mask} \\in \\mathbb{R}^{C_{\\varepsilon} \\times N}" + }, + { + "bbox": [ + 304, + 90, + 547, + 187 + ], + "type": "text", + "content": " for spectrogram mask prediction of separated sound " + }, + { + "bbox": [ + 304, + 90, + 547, + 187 + ], + "type": "inline_equation", + "content": "s_k(t)" + }, + { + "bbox": [ + 304, + 90, + 547, + 187 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 90, + 547, + 187 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 304, + 90, + 547, + 187 + ], + "type": "text", + "content": " denotes maximum of the pre-defined instrument types." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 198, + 547, + 282 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 198, + 547, + 282 + ], + "spans": [ + { + "bbox": [ + 304, + 198, + 547, + 282 + ], + "type": "text", + "content": "Audio query prototypes. We denote audio queries as " + }, + { + "bbox": [ + 304, + 198, + 547, + 282 + ], + "type": "inline_equation", + "content": "Q \\in \\mathbb{R}^{C_Q \\times N}" + }, + { + "bbox": [ + 304, + 198, + 547, + 282 + ], + "type": "text", + "content": " to represent different instruments, which are initialized by \"visually naming\" audio queries. Specifically, \"visually naming\" means that we assign object features " + }, + { + "bbox": [ + 304, + 198, + 547, + 282 + ], + "type": "inline_equation", + "content": "F_{O_k}" + }, + { + "bbox": [ + 304, + 198, + 547, + 282 + ], + "type": "text", + "content": " to the corresponding query in " + }, + { + "bbox": [ + 304, + 198, + 547, + 282 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 304, + 198, + 547, + 282 + ], + "type": "text", + "content": " with element-wise addition to yield \"visually-named\" queries " + }, + { + "bbox": [ + 304, + 198, + 547, + 282 + ], + "type": "inline_equation", + "content": "Q_v" + }, + { + "bbox": [ + 304, + 198, + 547, + 282 + ], + "type": "text", + "content": ", which are then fed into the transformer decoder cross-attention layers." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 293, + 547, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 293, + 547, + 437 + ], + "spans": [ + { + "bbox": [ + 304, + 293, + 547, + 437 + ], + "type": "text", + "content": "Cross-attention layers. In the decoder, we stack one motion-aware decoder layer and three audio-aware decoder layers. The \"visually-named\" queries " + }, + { + "bbox": [ + 304, + 293, + 547, + 437 + ], + "type": "inline_equation", + "content": "Q_{v}" + }, + { + "bbox": [ + 304, + 293, + 547, + 437 + ], + "type": "text", + "content": " first interact temporally with motion features " + }, + { + "bbox": [ + 304, + 293, + 547, + 437 + ], + "type": "inline_equation", + "content": "F_{M_k}" + }, + { + "bbox": [ + 304, + 293, + 547, + 437 + ], + "type": "text", + "content": " in the motion-aware decoder layer with motion cross-attention by Attention " + }, + { + "bbox": [ + 304, + 293, + 547, + 437 + ], + "type": "inline_equation", + "content": "(Q_{v},F_{M_{k}},F_{M_{k}})" + }, + { + "bbox": [ + 304, + 293, + 547, + 437 + ], + "type": "text", + "content": ". This is followed by an FFN to generate the motion-decoded queries " + }, + { + "bbox": [ + 304, + 293, + 547, + 437 + ], + "type": "inline_equation", + "content": "Q^{\\prime}" + }, + { + "bbox": [ + 304, + 293, + 547, + 437 + ], + "type": "text", + "content": ", which are then fed into three audio-aware decoder layers to adaptively interact with audio features " + }, + { + "bbox": [ + 304, + 293, + 547, + 437 + ], + "type": "inline_equation", + "content": "F_{A}" + }, + { + "bbox": [ + 304, + 293, + 547, + 437 + ], + "type": "text", + "content": ", each of which consists of a self-attention, an audio cross-attention computed by Attention " + }, + { + "bbox": [ + 304, + 293, + 547, + 437 + ], + "type": "inline_equation", + "content": "(Q^{\\prime},F_{A},F_{A})" + }, + { + "bbox": [ + 304, + 293, + 547, + 437 + ], + "type": "text", + "content": ", and an FFN. The output " + }, + { + "bbox": [ + 304, + 293, + 547, + 437 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 304, + 293, + 547, + 437 + ], + "type": "text", + "content": " audio segmentation embeddings " + }, + { + "bbox": [ + 304, + 293, + 547, + 437 + ], + "type": "inline_equation", + "content": "\\varepsilon_{Q}\\in \\mathbb{R}^{C_{Q}\\times N}" + }, + { + "bbox": [ + 304, + 293, + 547, + 437 + ], + "type": "text", + "content": " is computed by" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 347, + 443, + 545, + 456 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 347, + 443, + 545, + 456 + ], + "spans": [ + { + "bbox": [ + 347, + 443, + 545, + 456 + ], + "type": "interline_equation", + "content": "\\varepsilon_ {Q} = \\operatorname {A u d i o D e c o d e r} _ {\\times 3} \\left(Q ^ {\\prime}, F _ {A}, F _ {A}\\right), \\tag {1}", + "image_path": "a26f32f7dbde2a517c3288a0889dc86f8ca03e9b5f2fae3678a4c8f0c2a150bc.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 461, + 547, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 461, + 547, + 498 + ], + "spans": [ + { + "bbox": [ + 304, + 461, + 547, + 498 + ], + "type": "text", + "content": "where AudioDecoder stands for our audio-aware decoder layer. Similar to [9, 16], the decoder generates all audio segmentation embeddings parallelly." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 510, + 547, + 642 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 510, + 547, + 642 + ], + "spans": [ + { + "bbox": [ + 304, + 510, + 547, + 642 + ], + "type": "text", + "content": "Separated mask prediction. Through the above decoder, the " + }, + { + "bbox": [ + 304, + 510, + 547, + 642 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 304, + 510, + 547, + 642 + ], + "type": "text", + "content": " audio segmentation embeddings " + }, + { + "bbox": [ + 304, + 510, + 547, + 642 + ], + "type": "inline_equation", + "content": "\\varepsilon_{Q}" + }, + { + "bbox": [ + 304, + 510, + 547, + 642 + ], + "type": "text", + "content": " are converted to " + }, + { + "bbox": [ + 304, + 510, + 547, + 642 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 304, + 510, + 547, + 642 + ], + "type": "text", + "content": " mask embeddings " + }, + { + "bbox": [ + 304, + 510, + 547, + 642 + ], + "type": "inline_equation", + "content": "\\varepsilon_{mask} \\in \\mathbb{R}^{C_{\\varepsilon} \\times N}" + }, + { + "bbox": [ + 304, + 510, + 547, + 642 + ], + "type": "text", + "content": " through a MLP with two hidden layers, where dimension " + }, + { + "bbox": [ + 304, + 510, + 547, + 642 + ], + "type": "inline_equation", + "content": "C_{\\varepsilon}" + }, + { + "bbox": [ + 304, + 510, + 547, + 642 + ], + "type": "text", + "content": " is identical to dimension of audio embeddings " + }, + { + "bbox": [ + 304, + 510, + 547, + 642 + ], + "type": "inline_equation", + "content": "\\varepsilon_{A} \\in \\mathbb{R}^{C_{\\varepsilon} \\times F \\times T}" + }, + { + "bbox": [ + 304, + 510, + 547, + 642 + ], + "type": "text", + "content": ". Then each predicted mask " + }, + { + "bbox": [ + 304, + 510, + 547, + 642 + ], + "type": "inline_equation", + "content": "M_{k} \\in \\mathbb{R}^{F \\times T}" + }, + { + "bbox": [ + 304, + 510, + 547, + 642 + ], + "type": "text", + "content": " of the separated sound spectrogram is generated by a dot-product between the corresponding mask embedding in " + }, + { + "bbox": [ + 304, + 510, + 547, + 642 + ], + "type": "inline_equation", + "content": "\\varepsilon_{mask}" + }, + { + "bbox": [ + 304, + 510, + 547, + 642 + ], + "type": "text", + "content": " and audio embedding " + }, + { + "bbox": [ + 304, + 510, + 547, + 642 + ], + "type": "inline_equation", + "content": "\\varepsilon_{A}" + }, + { + "bbox": [ + 304, + 510, + 547, + 642 + ], + "type": "text", + "content": " from the audio decoder. Finally, we multiply the sound mixture spectrogram " + }, + { + "bbox": [ + 304, + 510, + 547, + 642 + ], + "type": "inline_equation", + "content": "S_{mix}" + }, + { + "bbox": [ + 304, + 510, + 547, + 642 + ], + "type": "text", + "content": " and the predicted mask " + }, + { + "bbox": [ + 304, + 510, + 547, + 642 + ], + "type": "inline_equation", + "content": "M_{k}" + }, + { + "bbox": [ + 304, + 510, + 547, + 642 + ], + "type": "text", + "content": " to disentangle sound spectrogram " + }, + { + "bbox": [ + 304, + 510, + 547, + 642 + ], + "type": "inline_equation", + "content": "S_{k}" + }, + { + "bbox": [ + 304, + 510, + 547, + 642 + ], + "type": "text", + "content": " for sound " + }, + { + "bbox": [ + 304, + 510, + 547, + 642 + ], + "type": "inline_equation", + "content": "s_{k}(t)" + }, + { + "bbox": [ + 304, + 510, + 547, + 642 + ], + "type": "text", + "content": " by" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 386, + 647, + 545, + 660 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 386, + 647, + 545, + 660 + ], + "spans": [ + { + "bbox": [ + 386, + 647, + 545, + 660 + ], + "type": "interline_equation", + "content": "S _ {k} = S _ {\\text {m i x}} \\odot M _ {k}, \\tag {2}", + "image_path": "0fea973c7012865b7490ddcae30ee36bad52be5554f61f8afc27f408e8fdab5b.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 666, + 547, + 703 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 666, + 547, + 703 + ], + "spans": [ + { + "bbox": [ + 304, + 666, + 547, + 703 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 666, + 547, + 703 + ], + "type": "inline_equation", + "content": "\\odot" + }, + { + "bbox": [ + 304, + 666, + 547, + 703 + ], + "type": "text", + "content": " denotes the element-wise multiplication operator. Ultimately, separated sound signal " + }, + { + "bbox": [ + 304, + 666, + 547, + 703 + ], + "type": "inline_equation", + "content": "s_k(t)" + }, + { + "bbox": [ + 304, + 666, + 547, + 703 + ], + "type": "text", + "content": " is produced by applying inverse STFT to the separated spectrogram " + }, + { + "bbox": [ + 304, + 666, + 547, + 703 + ], + "type": "inline_equation", + "content": "S_{k}" + }, + { + "bbox": [ + 304, + 666, + 547, + 703 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "14678" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 48, + 71, + 286, + 133 + ], + "blocks": [ + { + "bbox": [ + 48, + 71, + 286, + 133 + ], + "lines": [ + { + "bbox": [ + 48, + 71, + 286, + 133 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 286, + 133 + ], + "type": "image", + "image_path": "1b63f6aea286d27ae18630fcc1832b89f90211b28f9777a416d1430a45904f23.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 146, + 288, + 202 + ], + "lines": [ + { + "bbox": [ + 46, + 146, + 288, + 202 + ], + "spans": [ + { + "bbox": [ + 46, + 146, + 288, + 202 + ], + "type": "text", + "content": "Figure 3. Audio prompts design. To generalize to new types of instruments/event classes, we propose to insert additional queries (audio prompts) to learn new audio prototypes for unseen classes. With this design, we only fine-tune the query embedding layer while keeping all the other parts of transformer backbone frozen." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 223, + 287, + 260 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 223, + 287, + 260 + ], + "spans": [ + { + "bbox": [ + 46, + 223, + 287, + 260 + ], + "type": "text", + "content": "Training objective. Following [26, 89], we set our training objective as optimizing spectrogram masks. The ground truth ratio mask " + }, + { + "bbox": [ + 46, + 223, + 287, + 260 + ], + "type": "inline_equation", + "content": "M_{k}^{GT}" + }, + { + "bbox": [ + 46, + 223, + 287, + 260 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 46, + 223, + 287, + 260 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 46, + 223, + 287, + 260 + ], + "type": "text", + "content": "-th video is calculated as follows," + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 112, + 267, + 287, + 293 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 267, + 287, + 293 + ], + "spans": [ + { + "bbox": [ + 112, + 267, + 287, + 293 + ], + "type": "interline_equation", + "content": "M _ {k} ^ {G T} (t, f) = \\frac {S _ {k} (t , f)}{S _ {m i x} (t , f)}, \\tag {3}", + "image_path": "3ce4b88d60c97e6700745dfd9aa53757751c25eae1a57d3940175af97bb99850.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 299, + 287, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 299, + 287, + 335 + ], + "spans": [ + { + "bbox": [ + 46, + 299, + 287, + 335 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 299, + 287, + 335 + ], + "type": "inline_equation", + "content": "(t,f)" + }, + { + "bbox": [ + 46, + 299, + 287, + 335 + ], + "type": "text", + "content": " denotes time-frequency coordinates. We adopt per-pixel " + }, + { + "bbox": [ + 46, + 299, + 287, + 335 + ], + "type": "inline_equation", + "content": "L1" + }, + { + "bbox": [ + 46, + 299, + 287, + 335 + ], + "type": "text", + "content": " loss [87] to optimize the overall sound separation network, sound separation loss " + }, + { + "bbox": [ + 46, + 299, + 287, + 335 + ], + "type": "inline_equation", + "content": "L_{sep}" + }, + { + "bbox": [ + 46, + 299, + 287, + 335 + ], + "type": "text", + "content": " is defined as," + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 345, + 287, + 377 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 345, + 287, + 377 + ], + "spans": [ + { + "bbox": [ + 107, + 345, + 287, + 377 + ], + "type": "interline_equation", + "content": "L _ {s e p} = \\sum_ {k = 1} ^ {K} \\left| \\left| M _ {k} - M _ {k} ^ {G T} \\right| \\right| _ {1}, \\tag {4}", + "image_path": "35031d4edb9747264bb1ae6a71d355ced9e7c2aed9b5fe47cdd84ccd438bbba3.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 386, + 256, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 386, + 256, + 399 + ], + "spans": [ + { + "bbox": [ + 47, + 386, + 256, + 399 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 386, + 256, + 399 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 47, + 386, + 256, + 399 + ], + "type": "text", + "content": " denotes number of mixed sounds in " + }, + { + "bbox": [ + 47, + 386, + 256, + 399 + ], + "type": "inline_equation", + "content": "S_{mix}" + }, + { + "bbox": [ + 47, + 386, + 256, + 399 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 406, + 234, + 419 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 406, + 234, + 419 + ], + "spans": [ + { + "bbox": [ + 47, + 406, + 234, + 419 + ], + "type": "text", + "content": "3.4. Tunable Queries as Audio Prompts" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 425, + 287, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 425, + 287, + 604 + ], + "spans": [ + { + "bbox": [ + 46, + 425, + 287, + 604 + ], + "type": "text", + "content": "With the flexible design of tunable queries as learnable prototypes, our pipeline is more friendly to generalizing to new types of instruments. Unlike previous methods that need to finetune the entire mask generation U-Net, we could insert additional queries (i.e., audio prompts) for the new instruments. Such a method enables us only need to finetune the query embedding layer for learning new audio query prototypes in Sec. 3.3 of our transformer architecture while keeping all cross-attention layers frozen (see Fig.3). Specifically, we add " + }, + { + "bbox": [ + 46, + 425, + 287, + 604 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 46, + 425, + 287, + 604 + ], + "type": "text", + "content": " new audio prompts " + }, + { + "bbox": [ + 46, + 425, + 287, + 604 + ], + "type": "inline_equation", + "content": "P \\in \\mathbb{R}^{C_Q \\times L}" + }, + { + "bbox": [ + 46, + 425, + 287, + 604 + ], + "type": "text", + "content": " to original pre-trained audio queries " + }, + { + "bbox": [ + 46, + 425, + 287, + 604 + ], + "type": "inline_equation", + "content": "Q \\in \\mathbb{R}^{C_Q \\times N}" + }, + { + "bbox": [ + 46, + 425, + 287, + 604 + ], + "type": "text", + "content": ", then the query embedding layer for the prompted learnable prototypes " + }, + { + "bbox": [ + 46, + 425, + 287, + 604 + ], + "type": "inline_equation", + "content": "Q_{prompted} \\in \\mathbb{R}^{C_Q \\times (N + L)}" + }, + { + "bbox": [ + 46, + 425, + 287, + 604 + ], + "type": "text", + "content": " is the only layer learnable in our transformer decoder, while keeping the transformer backbone frozen." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 615, + 128, + 628 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 615, + 128, + 628 + ], + "spans": [ + { + "bbox": [ + 47, + 615, + 128, + 628 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 635, + 174, + 647 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 635, + 174, + 647 + ], + "spans": [ + { + "bbox": [ + 47, + 635, + 174, + 647 + ], + "type": "text", + "content": "4.1. Experimental Settings" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 653, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 653, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 653, + 287, + 713 + ], + "type": "text", + "content": "Datasets. We perform experiments on three widely-used datasets: MUSIC [89], MUSIC-21 [88], and Audio-Visual Event (AVE) [29, 68]. MUSIC dataset spans 11 musical instrument categories: accordion, acoustic guitar, cello, clarinet, erhu, flute, saxophone, trumpet, tuba, violin," + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 72, + 545, + 276 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 276 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 276 + ], + "type": "text", + "content": "and xylophone. This dataset is relatively clean, and sound sources are always within the scene, collected for the audio-visual sound separation task. We utilize 503 online available solo videos and split them into training/validation/testing sets with 453/25/25 videos from 11 different categories, respectively, following same settings as [66]. MUSIC-21 dataset [88] is an enlarged version of MUSIC [89], which contains 10 more common instrument categories: bagpipe, banjo, bassoon, congas, drum, electric bass, guzheng, piano, pipa, and ukulele. We utilize 1,092 available solo videos and split them into train/test sets with 894/198 videos respectively from 21 different categories. Note that we follow the same training/testing split as [23, 99]. AVE dataset is a general audio-visual learning dataset, covering 28 event classes such as animal behaviors, vehicles, and human activities. We follow the same setting as [99], and utilize 4143 videos from AVE [68] dataset." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 289, + 545, + 564 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 289, + 545, + 564 + ], + "spans": [ + { + "bbox": [ + 304, + 289, + 545, + 564 + ], + "type": "text", + "content": "Baselines. For MUSIC dataset, we compare our method with four recent methods for sound separation. NMF-MFCC [64] is a non-learnable audio-only method, we consider reporting this result from [26, 58] on MUSIC test set. We also compare with two representative audio-visual sound separation baselines: Sound-of-Pixels [89] and Co-Separation [26]. We retrained these two methods with the same training data and split them as ours for a fair comparison. Finally, we compare our approach with a most recent publicly-available baseline CCoL [66], which has the same training setting as ours. For MUSIC-21 dataset, we compare our method with six recently proposed approaches: Sound-of-Pixels [89], Co-Separation [26], Sound-of-Motions [88], Music Gesture [23], TriBERT [58] and AMnet [99]. For [58], since " + }, + { + "bbox": [ + 304, + 289, + 545, + 564 + ], + "type": "inline_equation", + "content": "12.27\\%" + }, + { + "bbox": [ + 304, + 289, + 545, + 564 + ], + "type": "text", + "content": " of the training samples are missing in their given training split, we consider their reported result as a baseline comparison. Finally, for AVE dataset, we compare our method with six state-of-the-art methods. Since we conduct our experiments with the same setting as AMnet [99], we report results from [99] for Multisensory [53], Sound-of-Pixels [89], Sound-of-Motions [88], Minus-Plus [79], Cascaded Opponent Filter [98] as baseline comparisons." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 579, + 545, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 579, + 545, + 674 + ], + "spans": [ + { + "bbox": [ + 304, + 579, + 545, + 674 + ], + "type": "text", + "content": "Evaluation metrics. The sound separation performance is evaluated by the popular adopted mir_eval library [57] in terms of standard metrics: Signal to Distortion Ratio (SDR), Signal to Interference Ratio (SIR), and Signal to Artifact Ratio (SAR). SDR measures the combination of interference and artifacts, SIR measures interference, and SAR measures artifacts. For all three metrics, a higher value indicates better results." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "text", + "content": "Implementation Details. For MUSIC [89] and MUSIC-21 [88] datasets, we sub-sample the audio at " + }, + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "inline_equation", + "content": "11\\mathrm{kHz}" + }, + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "text", + "content": ", and each" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "14679" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 126, + 70, + 223, + 140 + ], + "blocks": [ + { + "bbox": [ + 126, + 70, + 223, + 140 + ], + "lines": [ + { + "bbox": [ + 126, + 70, + 223, + 140 + ], + "spans": [ + { + "bbox": [ + 126, + 70, + 223, + 140 + ], + "type": "image", + "image_path": "b640fbc59bf991253b8dfd81f79418df5009c6672c85148a0c4da3a770a7e2a8.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 47, + 158, + 288, + 192 + ], + "lines": [ + { + "bbox": [ + 47, + 158, + 288, + 192 + ], + "spans": [ + { + "bbox": [ + 47, + 158, + 288, + 192 + ], + "type": "text", + "content": "Figure 4. Human evaluation results for sound source separation on mixtures of different instrument types. Our system is able to separate sounds with better actual perceptual quality." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 72, + 206, + 259, + 327 + ], + "blocks": [ + { + "bbox": [ + 72, + 206, + 259, + 327 + ], + "lines": [ + { + "bbox": [ + 72, + 206, + 259, + 327 + ], + "spans": [ + { + "bbox": [ + 72, + 206, + 259, + 327 + ], + "type": "image", + "image_path": "9618d924d53041221de055cf5aecac86821b51bc0fc7db5f8d851afc54ed96f0.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 342, + 287, + 376 + ], + "lines": [ + { + "bbox": [ + 46, + 342, + 287, + 376 + ], + "spans": [ + { + "bbox": [ + 46, + 342, + 287, + 376 + ], + "type": "text", + "content": "Figure 5. Visualization of audio query embeddings with t-SNE, different instrument categories are color-coded. Our audio queries have learned to cluster by different classes of sound." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 399, + 287, + 543 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 399, + 287, + 543 + ], + "spans": [ + { + "bbox": [ + 46, + 399, + 287, + 543 + ], + "type": "text", + "content": "audio sample is approximately 6 seconds. STFT is applied using a Hann window size of 1022 and a hop length of 256, yielding a " + }, + { + "bbox": [ + 46, + 399, + 287, + 543 + ], + "type": "inline_equation", + "content": "512 \\times 256" + }, + { + "bbox": [ + 46, + 399, + 287, + 543 + ], + "type": "text", + "content": " Time-Frequency audio representation. It is then re-sampled on a log-frequency scale to obtain a magnitude spectrogram with " + }, + { + "bbox": [ + 46, + 399, + 287, + 543 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 46, + 399, + 287, + 543 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 399, + 287, + 543 + ], + "type": "inline_equation", + "content": "F = 256" + }, + { + "bbox": [ + 46, + 399, + 287, + 543 + ], + "type": "text", + "content": ". Detected objects in frames are resized to " + }, + { + "bbox": [ + 46, + 399, + 287, + 543 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 46, + 399, + 287, + 543 + ], + "type": "text", + "content": " and randomly cropped to the size of " + }, + { + "bbox": [ + 46, + 399, + 287, + 543 + ], + "type": "inline_equation", + "content": "224 \\times 224" + }, + { + "bbox": [ + 46, + 399, + 287, + 543 + ], + "type": "text", + "content": ". We set the video frame rate as 1 FPS, and randomly-selected three frames as input for the object detector. While for AVE [68] dataset, audio signal is sub-sampled at " + }, + { + "bbox": [ + 46, + 399, + 287, + 543 + ], + "type": "inline_equation", + "content": "22\\mathrm{kHz}" + }, + { + "bbox": [ + 46, + 399, + 287, + 543 + ], + "type": "text", + "content": ", and we use the full frame rate(29.97 FPS). Other settings are the same as MUSIC except STFT hop length is set as 184, following [99]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 544, + 287, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 544, + 287, + 628 + ], + "spans": [ + { + "bbox": [ + 46, + 544, + 287, + 628 + ], + "type": "text", + "content": "For MUSIC dataset [89], we use the Faster R-CNN object detector pre-trained by [26] on Open Images [38]. For MUSIC-21 [88] and AVE [68] datasets, since additional musical and general classes are not covered for this object detector, we adopt a pre-trained Detic detector [96] based on CLIP [56] to detect the 10 more instruments in MUSIC-21 dataset [88] and 28 event classes in AVE dataset [68]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "type": "text", + "content": "We utilize 8 heads for all attention modules and select the maximum " + }, + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "type": "text", + "content": " objects (number of queries) as 15, 25, and 30 for MUSIC, MUSIC-21 and AVE. The video encoder [19] and the object detector is pre-trained and kept frozen during training and inference. The multi-layer perception (MLP) for separated mask prediction has 2 hidden layers of 256 channels following [16]. Audio feature " + }, + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "type": "inline_equation", + "content": "F_{A}" + }, + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "type": "text", + "content": ", motion feature" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 345, + 99, + 509, + 261 + ], + "blocks": [ + { + "bbox": [ + 345, + 99, + 509, + 261 + ], + "lines": [ + { + "bbox": [ + 345, + 99, + 509, + 261 + ], + "spans": [ + { + "bbox": [ + 345, + 99, + 509, + 261 + ], + "type": "image", + "image_path": "42a96aa078bdcd9978bcf18c671d5f06e38df317fe0ea9d895598e8f91ccf8c5.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 274, + 545, + 329 + ], + "lines": [ + { + "bbox": [ + 305, + 274, + 545, + 329 + ], + "spans": [ + { + "bbox": [ + 305, + 274, + 545, + 329 + ], + "type": "text", + "content": "Figure 6. Qualitative results on AVE test dataset. Beyond restricted musical instruments, our model is also able to handle general sound separation tasks (e.g. sounds of galloping race car and frying food on the first two rows; sounds of driving motorcycles and speeches on the last two rows)." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 305, + 350, + 546, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 350, + 546, + 483 + ], + "spans": [ + { + "bbox": [ + 305, + 350, + 546, + 483 + ], + "type": "inline_equation", + "content": "F_{M}" + }, + { + "bbox": [ + 305, + 350, + 546, + 483 + ], + "type": "text", + "content": ", object feature " + }, + { + "bbox": [ + 305, + 350, + 546, + 483 + ], + "type": "inline_equation", + "content": "F_{O}" + }, + { + "bbox": [ + 305, + 350, + 546, + 483 + ], + "type": "text", + "content": ", and audio queries " + }, + { + "bbox": [ + 305, + 350, + 546, + 483 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 305, + 350, + 546, + 483 + ], + "type": "text", + "content": " have a channel dimension of 256. And we set the channel dimension of both audio embeddings " + }, + { + "bbox": [ + 305, + 350, + 546, + 483 + ], + "type": "inline_equation", + "content": "\\varepsilon_{A}" + }, + { + "bbox": [ + 305, + 350, + 546, + 483 + ], + "type": "text", + "content": " and mask embeddings " + }, + { + "bbox": [ + 305, + 350, + 546, + 483 + ], + "type": "inline_equation", + "content": "\\varepsilon_{M}" + }, + { + "bbox": [ + 305, + 350, + 546, + 483 + ], + "type": "text", + "content": " as 32. The epoch number is 80, and batch size is set to 8. We use AdamW [43] for the mask transformer with a weight decay of " + }, + { + "bbox": [ + 305, + 350, + 546, + 483 + ], + "type": "inline_equation", + "content": "10^{-4}" + }, + { + "bbox": [ + 305, + 350, + 546, + 483 + ], + "type": "text", + "content": " and Adam for all other networks as optimizer selection. The learning rate of the transformer is set as " + }, + { + "bbox": [ + 305, + 350, + 546, + 483 + ], + "type": "inline_equation", + "content": "10^{-4}" + }, + { + "bbox": [ + 305, + 350, + 546, + 483 + ], + "type": "text", + "content": " and decreases by multiplying 0.1 at 60-th epoch. We set the learning rate for other networks as " + }, + { + "bbox": [ + 305, + 350, + 546, + 483 + ], + "type": "inline_equation", + "content": "10^{-4}" + }, + { + "bbox": [ + 305, + 350, + 546, + 483 + ], + "type": "text", + "content": ", decreased by multiplying 0.1 at 30-th and 50-th epoch, respectively. Training is conducted on 8 NVIDIA Titan V GPUs." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 491, + 511, + 504 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 491, + 511, + 504 + ], + "spans": [ + { + "bbox": [ + 306, + 491, + 511, + 504 + ], + "type": "text", + "content": "4.2. Audio-Visual Sound Source Separation" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "text", + "content": "Quantitative evaluation. Table. 1 demonstrates quantitative results for sound separation results against state-of-the-art methods on MUSIC dataset [89]. Our method outperforms baseline models in separation accuracy measured by all evaluation metrics. Our method outperforms the most recent publicly available state-ofthe-art algorithm [66] by " + }, + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "inline_equation", + "content": "3.43\\mathrm{dB}" + }, + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "text", + "content": " in terms of SDR score. Regarding quantitative results on MUSIC21 dataset [88], we demonstrate the performance comparison in Table. 2. Again, our method outperforms baseline models in terms of SDR metric. Performance on the previous two datasets demonstrate our model's ability to disentangle musical sounds. To further verify the scalability of our proposed method to general audio-source separation problems, we perform quantitative comparisons on AVE dataset in Table. 3. As is demonstrated, we surpass the state-of-the-art algorithm [99] by " + }, + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "inline_equation", + "content": "1.31\\mathrm{dB}" + }, + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "text", + "content": " in terms of SDR score. AVE is" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "14680" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 63, + 70, + 269, + 159 + ], + "blocks": [ + { + "bbox": [ + 63, + 70, + 269, + 159 + ], + "lines": [ + { + "bbox": [ + 63, + 70, + 269, + 159 + ], + "spans": [ + { + "bbox": [ + 63, + 70, + 269, + 159 + ], + "type": "table", + "html": "
MethodsSDR↑SIR↑SAR↑
NMF-MFCC [64]0.925.686.84
Sound-of-Pixels [89]4.239.399.85
Co-Separation [26]6.5411.379.46
CCoL [66]7.7413.2211.54
iQuery (Ours)11.1715.8414.27
", + "image_path": "4d50ec237999731a25a363d661b0d9be33de93dd58d7b6423817469bb374d00b.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 56, + 199, + 276, + 313 + ], + "blocks": [ + { + "bbox": [ + 47, + 163, + 287, + 185 + ], + "lines": [ + { + "bbox": [ + 47, + 163, + 287, + 185 + ], + "spans": [ + { + "bbox": [ + 47, + 163, + 287, + 185 + ], + "type": "text", + "content": "Table 1. Audio-visual sound separation results on MUSIC. Best results in bold and second-best results in Blue." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 56, + 199, + 276, + 313 + ], + "lines": [ + { + "bbox": [ + 56, + 199, + 276, + 313 + ], + "spans": [ + { + "bbox": [ + 56, + 199, + 276, + 313 + ], + "type": "table", + "html": "
MethodsSDR↑SIR↑SAR↑
Sound-of-Pixels [89]*7.5213.0111.53
Co-Separation [26]*7.6413.8011.30
Sound-of-Motions [88]*8.3114.8213.11
Music Gesture [23]*10.1215.81-
TriBERT [58]10.0917.4512.80
AMnet [99]*11.0818.0013.22
iQuery (Ours)11.1215.9814.16
", + "image_path": "bf47437b2911661dc98823f1b0ea170b970efdfd371f7e80cea9108783103a63.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 363, + 287, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 363, + 287, + 411 + ], + "spans": [ + { + "bbox": [ + 46, + 363, + 287, + 411 + ], + "type": "text", + "content": "a general dataset containing scenes like male and female speeches, animal sounds, and vehicle sounds. This clearly shows our model's adaptivity to more general problems of sound source separation." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 425, + 287, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 425, + 287, + 533 + ], + "spans": [ + { + "bbox": [ + 46, + 425, + 287, + 533 + ], + "type": "text", + "content": "Qualitative evaluation. Fig. 2 illustrates qualitative sound separation results on MUSIC dataset. It can be seen that our method disentangles sound sources cleaner and more accurately, with less \"muddy\" sound. Fig. 6 provides additional qualitative examples on AVE dataset, and this again illustrates our model's good performance on general sound source separation cases. Both qualitative and quantitative results verify the superiority of our designed sound query-based segmentation pipeline iQuery." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 545, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 545, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 545, + 287, + 713 + ], + "type": "text", + "content": "Human evaluation. Our quantitative evaluation shows the superiority of our model compared with baseline models, however, studies [8] have shown that audio separation quality could not be truthfully determined purely by the widely used mir_eval [57] metrics. Due to this reason, we further conduct a subjective human evaluation to study the actual perceptual quality of sound-separation results. Specifically, we compare the sound separation result of our model and the publicly available best baseline model [66] on MUSIC [89]. We collected 50 testing samples for all 11 classes from the test set, and each testing sample contains separated sounds with a length of 6 seconds predicted by our model and baseline [66] for the same sound mixture. Ground truth sound is also provided for each sample as a" + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 317, + 70, + 535, + 184 + ], + "blocks": [ + { + "bbox": [ + 47, + 316, + 286, + 338 + ], + "lines": [ + { + "bbox": [ + 47, + 316, + 286, + 338 + ], + "spans": [ + { + "bbox": [ + 47, + 316, + 286, + 338 + ], + "type": "text", + "content": "Table 2. Audio-visual sound separation results on MUSIC-21. The results noted by * are obtained from [23, 99]." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 317, + 70, + 535, + 184 + ], + "lines": [ + { + "bbox": [ + 317, + 70, + 535, + 184 + ], + "spans": [ + { + "bbox": [ + 317, + 70, + 535, + 184 + ], + "type": "table", + "html": "
MethodsSDR↑SIR↑SAR↑
Multisensory [53]*0.843.446.69
Sound-of-Pixels [89]*1.217.086.84
Sound-of-Motions [88]*1.487.417.39
Minus-Plus [79]*1.967.958.08
Cascaded Filter [98]*2.688.188.48
AMnet [99]*3.719.1511.00
iQuery (Ours)5.028.2112.32
", + "image_path": "74494a1ab496ba3a5437483c3d6a37a7644e450b06d2d92a8843c6699843afb1.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 322, + 220, + 528, + 297 + ], + "blocks": [ + { + "bbox": [ + 306, + 186, + 545, + 209 + ], + "lines": [ + { + "bbox": [ + 306, + 186, + 545, + 209 + ], + "spans": [ + { + "bbox": [ + 306, + 186, + 545, + 209 + ], + "type": "text", + "content": "Table 3. Audio-visual sound separation results on AVE. The results noted by * are obtained from [99]." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 322, + 220, + 528, + 297 + ], + "lines": [ + { + "bbox": [ + 322, + 220, + 528, + 297 + ], + "spans": [ + { + "bbox": [ + 322, + 220, + 528, + 297 + ], + "type": "table", + "html": "
MethodsSDR↑SIR↑SAR↑
Sound-of-Pixels [89]4.118.179.84
Co-Separation [26]5.379.858.72
CCoL [66]6.7411.9410.22
iQuery (Ours)8.0411.6013.21
", + "image_path": "9324a1a91a701b0c13ff39d526fe07f1fd9c8657deec2e4d7dde2117cb718617.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 300, + 545, + 355 + ], + "lines": [ + { + "bbox": [ + 305, + 300, + 545, + 355 + ], + "spans": [ + { + "bbox": [ + 305, + 300, + 545, + 355 + ], + "type": "text", + "content": "Table 4. Fine-tuning sound separation performance comparison. All methods are pretrained on MUSIC dataset without one particular instrument and then fine-tuned on this new data. Baseline models are tuned with whole network unfrozen, and we keep our transformer backbone frozen." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 304, + 376, + 545, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 376, + 545, + 485 + ], + "spans": [ + { + "bbox": [ + 304, + 376, + 545, + 485 + ], + "type": "text", + "content": "reference. The experiment is conducted by 40 participants separately. For each participant, the orders of our model and baseline [66] are randomly shuffled, and we ask the participant to answer \"Which sound separation result is more close to the ground truth audio?\" for each sample. Statistical results are shown in Fig. 4. Notably, our method significantly surpasses the compared baseline with a winning rate of " + }, + { + "bbox": [ + 304, + 376, + 545, + 485 + ], + "type": "inline_equation", + "content": "72.45\\%" + }, + { + "bbox": [ + 304, + 376, + 545, + 485 + ], + "type": "text", + "content": ". This additionally demonstrate the better actual perceptual performance of our model." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 496, + 545, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 496, + 545, + 568 + ], + "spans": [ + { + "bbox": [ + 304, + 496, + 545, + 568 + ], + "type": "text", + "content": "Learned Query Embedding. To visualize that our proposed model has indeed learned to disentangle different sound sources through learnable queries, we show t-SNE embeddings of our learnable queries in MUSIC test set [89]. As is shown in Fig. 5, our queries tend to cluster by different instrument classes, learning representative prototypes." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 575, + 509, + 588 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 575, + 509, + 588 + ], + "spans": [ + { + "bbox": [ + 306, + 575, + 509, + 588 + ], + "type": "text", + "content": "4.3. Extendable Audio Prompt Fine-tuning" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 594, + 545, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 594, + 545, + 700 + ], + "spans": [ + { + "bbox": [ + 304, + 594, + 545, + 700 + ], + "type": "text", + "content": "Table. 4 evaluates our approach's generalization ability compared with previous methods. We conduct fine-tuning experiments by leave-one-out cross-validation. Baseline models are fine-tuned on the new instrument with all the networks structure unfrozen. With the design of audio prompts discussed in Sec. 3.4, we keep most of our transformer parameters frozen, only fine-tuning the query embedding layer, which has much fewer parameters (0.048% of the total parameters in Transformer)." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 317, + 701, + 545, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 701, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 317, + 701, + 545, + 714 + ], + "type": "text", + "content": "Fig. 7 (a) shows our performance with a varying num" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "14681" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 53, + 78, + 159, + 159 + ], + "blocks": [ + { + "bbox": [ + 53, + 78, + 159, + 159 + ], + "lines": [ + { + "bbox": [ + 53, + 78, + 159, + 159 + ], + "spans": [ + { + "bbox": [ + 53, + 78, + 159, + 159 + ], + "type": "image", + "image_path": "d1d29e0ea15b66cf5b7bbdbecab26f17454c6e4b80b4481fecbcfaf7ed81ca3d.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 102, + 161, + 112, + 170 + ], + "lines": [ + { + "bbox": [ + 102, + 161, + 112, + 170 + ], + "spans": [ + { + "bbox": [ + 102, + 161, + 112, + 170 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 171, + 78, + 276, + 159 + ], + "blocks": [ + { + "bbox": [ + 171, + 78, + 276, + 159 + ], + "lines": [ + { + "bbox": [ + 171, + 78, + 276, + 159 + ], + "spans": [ + { + "bbox": [ + 171, + 78, + 276, + 159 + ], + "type": "image", + "image_path": "885f6b465a1146a5887214e466db127fab6c7dd2d817c50b512258edd4f63142.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 220, + 160, + 231, + 170 + ], + "lines": [ + { + "bbox": [ + 220, + 160, + 231, + 170 + ], + "spans": [ + { + "bbox": [ + 220, + 160, + 231, + 170 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 46, + 174, + 287, + 207 + ], + "lines": [ + { + "bbox": [ + 46, + 174, + 287, + 207 + ], + "spans": [ + { + "bbox": [ + 46, + 174, + 287, + 207 + ], + "type": "text", + "content": "Figure 7. Fine-tuning curves of sound separation. (a) Fine-tuning with different number of unseen instrument classes on MUSIC. (b) Fine-tuning with different number of unseen event classes on AVE." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 230, + 287, + 385 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 230, + 287, + 385 + ], + "spans": [ + { + "bbox": [ + 46, + 230, + 287, + 385 + ], + "type": "text", + "content": "ber of new instrument classes for fine-tuning on MUSIC dataset. We hold out 1, 2, 4, and 6 instrument classes in the pre-training stage and fine-tune our method on these new classes with only the query embedding layer unfrozen. MUSIC dataset contains in total of 11 instruments. Notably, our method still yields good results when the network is only pre-trained on 5 instrument types, even fewer than the unseen classes. Fig. 7 (b) shows our model's fine-tuning performance on AVE dataset with a varying number of new event classes for fine-tuning. We follow the experimental setup on MUSIC, and hold out 2, 4, 6, 8, and 12 event classes for fine-tuning. This demonstrates our model's adaptivity in general sound separation cases." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 395, + 181, + 406 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 395, + 181, + 406 + ], + "spans": [ + { + "bbox": [ + 47, + 395, + 181, + 406 + ], + "type": "text", + "content": "4.4. Contrastive Verification" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 414, + 288, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 414, + 288, + 666 + ], + "spans": [ + { + "bbox": [ + 46, + 414, + 288, + 666 + ], + "type": "text", + "content": "Our learnable query-prototypes network is designed to ensure cross-modality consistency and cross-instrument contrast. We assume these prototypes to draw samples of each particular sound class sample close and push away the different prototypes. The question is whether our network design with \"visually-named\" query trained in the \"Mix-and and-Separate\" can already achieve this goal? As an alternative, we design an auxiliary contrastive loss for verification: to maximize the cosine similarity of separated audio embedding " + }, + { + "bbox": [ + 46, + 414, + 288, + 666 + ], + "type": "inline_equation", + "content": "\\varepsilon_{A_k} = \\varepsilon_A \\odot M_k" + }, + { + "bbox": [ + 46, + 414, + 288, + 666 + ], + "type": "text", + "content": " and the corresponding query embedding " + }, + { + "bbox": [ + 46, + 414, + 288, + 666 + ], + "type": "inline_equation", + "content": "Q_k" + }, + { + "bbox": [ + 46, + 414, + 288, + 666 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 46, + 414, + 288, + 666 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 46, + 414, + 288, + 666 + ], + "type": "text", + "content": ", while minimizing the cosine similarity of separated audio embedding and other query embeddings " + }, + { + "bbox": [ + 46, + 414, + 288, + 666 + ], + "type": "inline_equation", + "content": "Q_n" + }, + { + "bbox": [ + 46, + 414, + 288, + 666 + ], + "type": "text", + "content": " (where " + }, + { + "bbox": [ + 46, + 414, + 288, + 666 + ], + "type": "inline_equation", + "content": "n \\in [1, N], n \\neq k" + }, + { + "bbox": [ + 46, + 414, + 288, + 666 + ], + "type": "text", + "content": "). We optimize the cross-entropy losses of the cosine similarity scores to obtain contrastive loss " + }, + { + "bbox": [ + 46, + 414, + 288, + 666 + ], + "type": "inline_equation", + "content": "L_{contras}" + }, + { + "bbox": [ + 46, + 414, + 288, + 666 + ], + "type": "text", + "content": ". To ensure the qualities of audio embedding " + }, + { + "bbox": [ + 46, + 414, + 288, + 666 + ], + "type": "inline_equation", + "content": "\\varepsilon_A" + }, + { + "bbox": [ + 46, + 414, + 288, + 666 + ], + "type": "text", + "content": " and predicted mask " + }, + { + "bbox": [ + 46, + 414, + 288, + 666 + ], + "type": "inline_equation", + "content": "M_i" + }, + { + "bbox": [ + 46, + 414, + 288, + 666 + ], + "type": "text", + "content": " are accurate enough, we use a hierarchical task learning strategy [44] to control weights for " + }, + { + "bbox": [ + 46, + 414, + 288, + 666 + ], + "type": "inline_equation", + "content": "L_{sep}" + }, + { + "bbox": [ + 46, + 414, + 288, + 666 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 414, + 288, + 666 + ], + "type": "inline_equation", + "content": "L_{contras}" + }, + { + "bbox": [ + 46, + 414, + 288, + 666 + ], + "type": "text", + "content": " at each epoch. The verification loss " + }, + { + "bbox": [ + 46, + 414, + 288, + 666 + ], + "type": "inline_equation", + "content": "L_{verify}" + }, + { + "bbox": [ + 46, + 414, + 288, + 666 + ], + "type": "text", + "content": " is: " + }, + { + "bbox": [ + 46, + 414, + 288, + 666 + ], + "type": "inline_equation", + "content": "L_{verify} = w_{sep}(e) \\cdot L_{sep} + w_{contras}(e) \\cdot L_{contras}" + }, + { + "bbox": [ + 46, + 414, + 288, + 666 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 46, + 414, + 288, + 666 + ], + "type": "inline_equation", + "content": "e" + }, + { + "bbox": [ + 46, + 414, + 288, + 666 + ], + "type": "text", + "content": " denotes training epoch and " + }, + { + "bbox": [ + 46, + 414, + 288, + 666 + ], + "type": "inline_equation", + "content": "w(e)" + }, + { + "bbox": [ + 46, + 414, + 288, + 666 + ], + "type": "text", + "content": " denotes loss weight." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "type": "text", + "content": "Ablations of auxiliary contrastive loss, shown in Table. 5, demonstrates that our existing design achieves better results without using explicit contrastive loss. This answers the" + } + ] + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 341, + 71, + 511, + 142 + ], + "blocks": [ + { + "bbox": [ + 341, + 71, + 511, + 142 + ], + "lines": [ + { + "bbox": [ + 341, + 71, + 511, + 142 + ], + "spans": [ + { + "bbox": [ + 341, + 71, + 511, + 142 + ], + "type": "table", + "html": "
ArchitectureSDR↑SIR↑SAR↑
w/o lrn.10.0514.2713.71
w/o adpt.10.8915.5114.14
w/ con. best11.0215.9114.10
Ours (w/o con)11.1715.8414.27
", + "image_path": "089b8d163d490edbcdc85c1970abf5b6493e6724bfb3a8b2bffad262bccd97c0.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 335, + 224, + 515, + 308 + ], + "blocks": [ + { + "bbox": [ + 305, + 145, + 547, + 212 + ], + "lines": [ + { + "bbox": [ + 305, + 145, + 547, + 212 + ], + "spans": [ + { + "bbox": [ + 305, + 145, + 547, + 212 + ], + "type": "text", + "content": "Table 5. Ablations on the auxiliary contrastive loss on MUSIC dataset. \"w/o lrn.\" denotes without learnable linear layer added to queries produced by Transformer decoder; \"w/o adpt.\" denotes that we use a fixed weight for auxiliary contrastive loss without the Hierarchical Task Learning strategy; \"w/ con. best\" denotes our best model design using auxiliary contrastive loss." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 335, + 224, + 515, + 308 + ], + "lines": [ + { + "bbox": [ + 335, + 224, + 515, + 308 + ], + "spans": [ + { + "bbox": [ + 335, + 224, + 515, + 308 + ], + "type": "table", + "html": "
ArchitectureSDR↑SIR↑SAR↑
Random6.5810.7912.77
Self-audio10.5414.8114.23
Self-motion-audio10.6515.3713.96
Dual-stream10.4615.2513.79
Motion-self-audio11.1715.8414.27
", + "image_path": "009100f0961cfd4ba307198bfa4541bfaa8935c878d285d378a536e10f389171.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 316, + 311, + 535, + 321 + ], + "lines": [ + { + "bbox": [ + 316, + 311, + 535, + 321 + ], + "spans": [ + { + "bbox": [ + 316, + 311, + 535, + 321 + ], + "type": "text", + "content": "Table 6. Ablations on the design of Transformer decoder." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 305, + 343, + 545, + 367 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 343, + 545, + 367 + ], + "spans": [ + { + "bbox": [ + 305, + 343, + 545, + 367 + ], + "type": "text", + "content": "question we raised, that our \"visually-named\" queries are already contrastive enough for sound disentanglement." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 376, + 520, + 388 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 376, + 520, + 388 + ], + "spans": [ + { + "bbox": [ + 306, + 376, + 520, + 388 + ], + "type": "text", + "content": "4.5. Ablations of Transformer decoder design" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 394, + 545, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 394, + 545, + 550 + ], + "spans": [ + { + "bbox": [ + 304, + 394, + 545, + 550 + ], + "type": "text", + "content": "Ablation results of Transformer decoder design on " + }, + { + "bbox": [ + 304, + 394, + 545, + 550 + ], + "type": "inline_equation", + "content": "MUSIC" + }, + { + "bbox": [ + 304, + 394, + 545, + 550 + ], + "type": "text", + "content": " dataset is shown in Table. 6. \"Random\" denotes randomly assigning object features to queries, its poor separation result verifies the importance of our \"visually-named\" queries. \"Self-audio\" means removing the motion cross attention layer, which confirms the effectiveness of adding the motion feature. We tried two baseline designs against our final selection \"Motion-self-audio\", as stated in Sec. 3.3. \"Self-motion-audio\" is a design that puts self-, motion cross-, and audio cross-attention in a single decoder layer. \"Dual-stream\" means we conduct motion and audio cross-attention in parallel then fuse in the decoder layer. Specific details are in the Supplemental material." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 562, + 378, + 574 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 562, + 378, + 574 + ], + "spans": [ + { + "bbox": [ + 306, + 562, + 378, + 574 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 582, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 582, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 582, + 545, + 713 + ], + "type": "text", + "content": "We proposed an audio-visual separation method using an adaptable query-based audio mask transformer network. Our network disentangles different sound sources explicitly through learnable audio prototypes initiated by \"visually naming\". We demonstrate cross-modal consistency and cross-instrument contrast via a multi-modal cross-attention mechanism. When generalizing to new unseen classes, our method can be adapted by inserting additional queries as audio prompts while freezing the attention mechanism. Experiments on both musical and general sound datasets demonstrate performance gain by our iQuery." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "14682" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 53, + 92, + 287, + 713 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 58, + 92, + 287, + 124 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 92, + 287, + 124 + ], + "spans": [ + { + "bbox": [ + 58, + 92, + 287, + 124 + ], + "type": "text", + "content": "[1] Triantafyllos Afouras, Joon Son Chung, and Andrew Zisserman. The conversation: Deep audio-visual speech enhancement. arXiv preprint arXiv:1804.04121, 2018. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 126, + 287, + 168 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 126, + 287, + 168 + ], + "spans": [ + { + "bbox": [ + 58, + 126, + 287, + 168 + ], + "type": "text", + "content": "[2] Triantafyllos Afouras, Joon Son Chung, and Andrew Zisserman. My lips are concealed: Audio-visual speech enhancement through obstructions. arXiv preprint arXiv:1907.04975, 2019. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 58, + 171, + 287, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 171, + 287, + 215 + ], + "spans": [ + { + "bbox": [ + 58, + 171, + 287, + 215 + ], + "type": "text", + "content": "[3] Triantafyllos Afouras, Andrew Owens, Joon Son Chung, and Andrew Zisserman. Self-supervised learning of audiovisual objects from video. In European Conference on Computer Vision (ECCV), pages 208-224, 2020. 1" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 58, + 217, + 287, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 217, + 287, + 250 + ], + "spans": [ + { + "bbox": [ + 58, + 217, + 287, + 250 + ], + "type": "text", + "content": "[4] Relja Arandjelovic and Andrew Zisserman. Look, listen and learn. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 609-617, 2017. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 58, + 251, + 287, + 283 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 251, + 287, + 283 + ], + "spans": [ + { + "bbox": [ + 58, + 251, + 287, + 283 + ], + "type": "text", + "content": "[5] Relja Arandjelovic and Andrew Zisserman. Objects that sound. In Proceedings of the European conference on computer vision (ECCV), pages 435-451, 2018. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 58, + 285, + 287, + 329 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 285, + 287, + 329 + ], + "spans": [ + { + "bbox": [ + 58, + 285, + 287, + 329 + ], + "type": "text", + "content": "[6] Anurag Arnab, Mostafa Dehghani, Georg Heigold, Chen Sun, Mario Lucic, and Cordelia Schmid. Vivit: A video vision transformer. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 6836-6846, 2021. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 58, + 331, + 287, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 331, + 287, + 373 + ], + "spans": [ + { + "bbox": [ + 58, + 331, + 287, + 373 + ], + "type": "text", + "content": "[7] Hyojin Bahng, Ali Jahanian, Swami Sankaranarayanan, and Phillip Isola. Visual prompting: Modifying pixel space to adapt pre-trained models. arXiv preprint arXiv:2203.17274, 2022. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 58, + 376, + 287, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 376, + 287, + 430 + ], + "spans": [ + { + "bbox": [ + 58, + 376, + 287, + 430 + ], + "type": "text", + "content": "[8] Estefanía Cano, Derry FitzGerald, and Karlheinz Brandenburg. Evaluation of quality of sound source separation algorithms: Human perception vs quantitative metrics. In 2016 24th European Signal Processing Conference (EUSIPCO), pages 1758-1762. IEEE, 2016. 7" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 58, + 433, + 287, + 486 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 433, + 287, + 486 + ], + "spans": [ + { + "bbox": [ + 58, + 433, + 287, + 486 + ], + "type": "text", + "content": "[9] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In European Conference on Computer Vision (ECCV), pages 213-229. Springer, 2020. 3, 4" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 53, + 488, + 287, + 532 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 488, + 287, + 532 + ], + "spans": [ + { + "bbox": [ + 53, + 488, + 287, + 532 + ], + "type": "text", + "content": "[10] Joao Carreira and Andrew Zisserman. Quo vadis, action recognition? a new model and the kinetics dataset. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 6299-6308, 2017. 4" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 53, + 534, + 287, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 534, + 287, + 588 + ], + "spans": [ + { + "bbox": [ + 53, + 534, + 287, + 588 + ], + "type": "text", + "content": "[11] British Chandna, Marius Miron, Jordi Janer, and Emilia Gómez. Monoaural audio source separation using deep convolutional neural networks. In International conference on latent variable analysis and signal separation, pages 258-266. Springer, 2017. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 53, + 590, + 287, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 590, + 287, + 633 + ], + "spans": [ + { + "bbox": [ + 53, + 590, + 287, + 633 + ], + "type": "text", + "content": "[12] Moitreya Chatterjee, Narendra Ahuja, and Anoop Cherian. Learning audio-visual dynamics using scene graphs for audio source separation. In Advances in Neural Information Processing Systems (NeurIPS), 2022. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 53, + 635, + 287, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 635, + 287, + 678 + ], + "spans": [ + { + "bbox": [ + 53, + 635, + 287, + 678 + ], + "type": "text", + "content": "[13] Moitreya Chatterjee, Jonathan Le Roux, Narendra Ahuja, and Anoop Cherian. Visual scene graphs for audio source separation. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 1204-1213, 2021. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 53, + 681, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 681, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 53, + 681, + 287, + 713 + ], + "type": "text", + "content": "[14] Honglie Chen, Weidi Xie, Triantafyllos Afouras, Arsha Nagrani, Andrea Vedaldi, and Andrew Zisserman. Localizing visual sounds the hard way. In IEEE/CVF Conference on" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 312, + 74, + 545, + 712 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 331, + 74, + 545, + 94 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 74, + 545, + 94 + ], + "spans": [ + { + "bbox": [ + 331, + 74, + 545, + 94 + ], + "type": "text", + "content": "Computer Vision and Pattern Recognition (CVPR), pages 16867-16876, 2021. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 312, + 97, + 545, + 151 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 97, + 545, + 151 + ], + "spans": [ + { + "bbox": [ + 312, + 97, + 545, + 151 + ], + "type": "text", + "content": "[15] Bowen Cheng, Ishan Misra, Alexander G Schwing, Alexander Kirillov, and Rohit Girdhar. Masked-attention mask transformer for universal image segmentation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 1290–1299, 2022. 1, 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 312, + 153, + 545, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 153, + 545, + 195 + ], + "spans": [ + { + "bbox": [ + 312, + 153, + 545, + 195 + ], + "type": "text", + "content": "[16] Bowen Cheng, Alex Schwing, and Alexander Kirillov. Per-pixel classification is not all you need for semantic segmentation. Advances in Neural Information Processing Systems (NeurIPS), 34, 2021. 1, 3, 4, 6" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 312, + 198, + 545, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 198, + 545, + 251 + ], + "spans": [ + { + "bbox": [ + 312, + 198, + 545, + 251 + ], + "type": "text", + "content": "[17] Ying Cheng, Ruize Wang, Zhihao Pan, Rui Feng, and Yuejie Zhang. Look, listen, and attend: Co-attention network for self-supervised audio-visual representation learning. In Proceedings of the 28th ACM International Conference on Multimedia, pages 3884–3892, 2020. 1" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 312, + 255, + 545, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 255, + 545, + 296 + ], + "spans": [ + { + "bbox": [ + 312, + 255, + 545, + 296 + ], + "type": "text", + "content": "[18] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018.3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 312, + 300, + 545, + 364 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 300, + 545, + 364 + ], + "spans": [ + { + "bbox": [ + 312, + 300, + 545, + 364 + ], + "type": "text", + "content": "[19] Shuangrui Ding, Maomao Li, Tianyu Yang, Rui Qian, Haohang Xu, Qingyi Chen, Jue Wang, and Hongkai Xiong. Motion-aware contrastive video representation learning via foreground-background merging. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 9716-9726, 2022. 4, 6" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 312, + 366, + 545, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 366, + 545, + 430 + ], + "spans": [ + { + "bbox": [ + 312, + 366, + 545, + 430 + ], + "type": "text", + "content": "[20] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929, 2020.3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 312, + 434, + 545, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 434, + 545, + 487 + ], + "spans": [ + { + "bbox": [ + 312, + 434, + 545, + 487 + ], + "type": "text", + "content": "[21] Ariel Ephrat, Inbar Mosseri, Oran Lang, Tali Dekel, Kevin Wilson, Avinatan Hassidim, William T Freeman, and Michael Rubinstein. Looking to listen at the cocktail party: A speaker-independent audio-visual model for speech separation. arXiv preprint arXiv:1804.03619, 2018. 2, 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 312, + 489, + 545, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 489, + 545, + 533 + ], + "spans": [ + { + "bbox": [ + 312, + 489, + 545, + 533 + ], + "type": "text", + "content": "[22] Cédric Févotte, Nancy Bertin, and Jean-Louis Durrieu. Nonnegative matrix factorization with the itakura-saito divergence: With application to music analysis. Neural computation, 21(3):793-830, 2009. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 312, + 535, + 545, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 535, + 545, + 588 + ], + "spans": [ + { + "bbox": [ + 312, + 535, + 545, + 588 + ], + "type": "text", + "content": "[23] Chuang Gan, Deng Huang, Hang Zhao, Joshua B Tenenbaum, and Antonio Torralba. Music gesture for visual sound separation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 10478-10487, 2020. 1, 3, 4, 5, 7" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 312, + 590, + 545, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 590, + 545, + 633 + ], + "spans": [ + { + "bbox": [ + 312, + 590, + 545, + 633 + ], + "type": "text", + "content": "[24] Ruohan Gao, Rogerio Feris, and Kristen Grauman. Learning to separate object sounds by watching unlabeled video. In European Conference on Computer Vision (ECCV), pages 35-53, 2018. 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 312, + 635, + 545, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 635, + 545, + 668 + ], + "spans": [ + { + "bbox": [ + 312, + 635, + 545, + 668 + ], + "type": "text", + "content": "[25] Ruohan Gao and Kristen Grauman. 2.5 d visual sound. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 324-333, 2019. 2" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 312, + 670, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 670, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 312, + 670, + 545, + 712 + ], + "type": "text", + "content": "[26] Ruohan Gao and Kristen Grauman. Co-separating sounds of visual objects. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 3879-3888, 2019. 1, 2, 4, 5, 6, 7" + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "type": "text", + "content": "14683" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 53, + 73, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 53, + 73, + 287, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 73, + 287, + 127 + ], + "spans": [ + { + "bbox": [ + 53, + 73, + 287, + 127 + ], + "type": "text", + "content": "[27] Ruohan Gao and Kristen Grauman. Visualvoice: Audiovisual speech separation with cross-modal consistency. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 15490-15500. IEEE, 2021. 1, 2, 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 53, + 129, + 287, + 163 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 129, + 287, + 163 + ], + "spans": [ + { + "bbox": [ + 53, + 129, + 287, + 163 + ], + "type": "text", + "content": "[28] Tianyu Gao, Adam Fisch, and Danqi Chen. Making pretrained language models better few-shot learners. arXiv preprint arXiv:2012.15723, 2020. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 164, + 288, + 229 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 164, + 288, + 229 + ], + "spans": [ + { + "bbox": [ + 53, + 164, + 288, + 229 + ], + "type": "text", + "content": "[29] Jort F Gemmeke, Daniel PW Ellis, Dylan Freedman, Aren Jansen, Wade Lawrence, R Channing Moore, Manoj Plakal, and Marvin Ritter. Audio set: An ontology and human-labeled dataset for audio events. In IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 776-780. IEEE, 2017. 5" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 231, + 288, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 231, + 288, + 274 + ], + "spans": [ + { + "bbox": [ + 53, + 231, + 288, + 274 + ], + "type": "text", + "content": "[30] Daniel Griffin and Jae Lim. Signal estimation from modified short-time fourier transform. IEEE Transactions on acoustics, speech, and signal processing, 32(2):236-243, 1984. 4" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 276, + 286, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 276, + 286, + 297 + ], + "spans": [ + { + "bbox": [ + 53, + 276, + 286, + 297 + ], + "type": "text", + "content": "[31] Simon Haykin and Zhe Chen. The cocktail party problem. Neural computation, 17(9):1875-1902, 2005. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 299, + 287, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 299, + 287, + 354 + ], + "spans": [ + { + "bbox": [ + 53, + 299, + 287, + 354 + ], + "type": "text", + "content": "[32] John R Hershey, Zhuo Chen, Jonathan Le Roux, and Shinji Watanabe. Deep clustering: Discriminative embeddings for segmentation and separation. In IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 31-35. IEEE, 2016. 1" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 355, + 287, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 355, + 287, + 399 + ], + "spans": [ + { + "bbox": [ + 53, + 355, + 287, + 399 + ], + "type": "text", + "content": "[33] Di Hu, Feiping Nie, and Xuelong Li. Deep multimodal clustering for unsupervised audiovisual learning. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 9248-9257, 2019. 1" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 400, + 288, + 455 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 400, + 288, + 455 + ], + "spans": [ + { + "bbox": [ + 53, + 400, + 288, + 455 + ], + "type": "text", + "content": "[34] Po-Sen Huang, Minje Kim, Mark Hasegawa-Johnson, and Paris Smaragdis. Joint optimization of masks and deep recurrent neural networks for monaural source separation. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 23(12):2136-2147, 2015. 1" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 456, + 287, + 500 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 456, + 287, + 500 + ], + "spans": [ + { + "bbox": [ + 53, + 456, + 287, + 500 + ], + "type": "text", + "content": "[35] Menglin Jia, Luming Tang, Bor-Chun Chen, Claire Cardie, Serge Belongie, Bharath Hariharan, and Ser-Nam Lim. Visual prompt tuning. In European Conference on Computer Vision (ECCV), pages 709-727. Springer, 2022. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 502, + 287, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 502, + 287, + 544 + ], + "spans": [ + { + "bbox": [ + 53, + 502, + 287, + 544 + ], + "type": "text", + "content": "[36] Einat Kidron, Yoav Y Schechner, and Michael Elad. Pixels that sound. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), volume 1, pages 88-95. IEEE, 2005. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 53, + 547, + 287, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 547, + 287, + 590 + ], + "spans": [ + { + "bbox": [ + 53, + 547, + 287, + 590 + ], + "type": "text", + "content": "[37] Kevin Kilgour, Beat Gfeller, Qingqing Huang, Aren Jansen, Scott Wisdom, and Marco Tagliasacchi. Text-driven separation of arbitrary sounds. arXiv preprint arXiv:2204.05738, 2022. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 53, + 591, + 287, + 657 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 591, + 287, + 657 + ], + "spans": [ + { + "bbox": [ + 53, + 591, + 287, + 657 + ], + "type": "text", + "content": "[38] Ivan Krasin, Tom Duerig, Neil Alldrin, Vittorio Ferrari, Sami Abu-El-Haija, Alina Kuznetsova, Hassan Rom, Jasper Uijlings, Stefan Popov, Andreas Veit, et al. Openimages: A public dataset for large-scale multi-label and multi-class image classification. Dataset available from https://github.com/openimages, 2(3):18, 2017. 6" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 53, + 658, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 658, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 53, + 658, + 287, + 713 + ], + "type": "text", + "content": "[39] Jiyoung Lee, Soo-Whan Chung, Sunok Kim, Hong-Goo Kang, and Kwanghoon Sohn. Looking into your speech: Learning cross-modal affinity for audio-visual speech separation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 1336–1345, 2021. 2, 3" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 312, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 312, + 73, + 545, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 73, + 545, + 106 + ], + "spans": [ + { + "bbox": [ + 312, + 73, + 545, + 106 + ], + "type": "text", + "content": "[40] Jie Hwan Lee, Hyeong-Seok Choi, and Kyogu Lee. Audio query-based music source separation. arXiv preprint arXiv:1908.06593, 2019. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 312, + 108, + 545, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 108, + 545, + 150 + ], + "spans": [ + { + "bbox": [ + 312, + 108, + 545, + 150 + ], + "type": "text", + "content": "[41] Dongze Lian, Daquan Zhou, Jiashi Feng, and Xinchao Wang. Scaling & shifting your features: A new baseline for efficient model tuning. In Advances in Neural Information Processing Systems (NeurIPS), 2022. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 312, + 152, + 545, + 196 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 152, + 545, + 196 + ], + "spans": [ + { + "bbox": [ + 312, + 152, + 545, + 196 + ], + "type": "text", + "content": "[42] Ze Liu, Jia Ning, Yue Cao, Yixuan Wei, Zheng Zhang, Stephen Lin, and Han Hu. Video swim transformer. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3202-3211, 2022. 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 312, + 198, + 545, + 219 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 198, + 545, + 219 + ], + "spans": [ + { + "bbox": [ + 312, + 198, + 545, + 219 + ], + "type": "text", + "content": "[43] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017. 6" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 312, + 220, + 545, + 275 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 220, + 545, + 275 + ], + "spans": [ + { + "bbox": [ + 312, + 220, + 545, + 275 + ], + "type": "text", + "content": "[44] Yan Lu, Xinzhu Ma, Lei Yang, Tianzhu Zhang, Yating Liu, Qi Chu, Junjie Yan, and Wanli Ouyang. Geometry uncertainty projection network for monocular 3d object detection. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 3111-3121, 2021. 8" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 312, + 277, + 545, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 277, + 545, + 319 + ], + "spans": [ + { + "bbox": [ + 312, + 277, + 545, + 319 + ], + "type": "text", + "content": "[45] Sagnik Majumder, Ziad Al-Halah, and Kristen Grauman. Move2hear: Active audio-visual source separation. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 275–285, 2021. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 312, + 322, + 545, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 322, + 545, + 365 + ], + "spans": [ + { + "bbox": [ + 312, + 322, + 545, + 365 + ], + "type": "text", + "content": "[46] Sagnik Majumder and Kristen Grauman. Active audiovisual separation of dynamic sound sources. In European Conference on Computer Vision (ECCV), pages 551-569. Springer, 2022. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 312, + 366, + 545, + 421 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 366, + 545, + 421 + ], + "spans": [ + { + "bbox": [ + 312, + 366, + 545, + 421 + ], + "type": "text", + "content": "[47] Mingyuan Mao, Renrui Zhang, Honghui Zheng, Teli Ma, Yan Peng, Errui Ding, Baochang Zhang, Shumin Han, et al. Dual-stream network for visual recognition. Advances in Neural Information Processing Systems (NeurIPS), 34:25346-25358, 2021. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 312, + 422, + 545, + 443 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 422, + 545, + 443 + ], + "spans": [ + { + "bbox": [ + 312, + 422, + 545, + 443 + ], + "type": "text", + "content": "[48] Josh H McDermott. The cocktail party problem. Current Biology, 19(22):R1024-R1027, 2009. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 312, + 445, + 545, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 445, + 545, + 498 + ], + "spans": [ + { + "bbox": [ + 312, + 445, + 545, + 498 + ], + "type": "text", + "content": "[49] Otniel-Bogdan Mercea, Thomas Hummel, A Koepke, and Zeynep Akata. Temporal and cross-modal attention for audio-visual zero-shot learning. In European Conference on Computer Vision (ECCV), pages 488–505. Springer, 2022. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 312, + 501, + 545, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 501, + 545, + 544 + ], + "spans": [ + { + "bbox": [ + 312, + 501, + 545, + 544 + ], + "type": "text", + "content": "[50] Ishan Misra, Rohit Girdhar, and Armand Joulin. An end-to-end transformer model for 3d object detection. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 2906-2917, 2021. 3" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 312, + 546, + 545, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 546, + 545, + 590 + ], + "spans": [ + { + "bbox": [ + 312, + 546, + 545, + 590 + ], + "type": "text", + "content": "[51] Juan F Montesinos, Venkatesh S Kadandale, and Gloria Haro. Vovit: Low latency graph-based audio-visual voice separation transformer. In European Conference on Computer Vision (ECCV), pages 310–326. Springer, 2022. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 312, + 591, + 545, + 634 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 591, + 545, + 634 + ], + "spans": [ + { + "bbox": [ + 312, + 591, + 545, + 634 + ], + "type": "text", + "content": "[52] Pedro Morgado, Nuno Nvasconcelos, Timothy Langlois, and Oliver Wang. Self-supervised generation of spatial audio for 360 video. Advances in Neural Information Processing Systems (NeurIPS), 31, 2018. 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 312, + 635, + 545, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 635, + 545, + 679 + ], + "spans": [ + { + "bbox": [ + 312, + 635, + 545, + 679 + ], + "type": "text", + "content": "[53] Andrew Owens and Alexei A Efros. Audio-visual scene analysis with self-supervised multisensory features. In European Conference on Computer Vision (ECCV), pages 631–648, 2018. 1, 2, 5, 7" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 312, + 681, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 681, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 312, + 681, + 545, + 713 + ], + "type": "text", + "content": "[54] Andrew Owens, Phillip Isola, Josh McDermott, Antonio Torralba, Edward H Adelson, and William T Freeman. Visually indicated sounds. In IEEE/CVF Conference on Com" + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "14684" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 53, + 73, + 287, + 712 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 72, + 73, + 287, + 94 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 73, + 287, + 94 + ], + "spans": [ + { + "bbox": [ + 72, + 73, + 287, + 94 + ], + "type": "text", + "content": "puter Vision and Pattern Recognition (CVPR), pages 2405-2413, 2016. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 53, + 96, + 287, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 96, + 287, + 139 + ], + "spans": [ + { + "bbox": [ + 53, + 96, + 287, + 139 + ], + "type": "text", + "content": "[55] Rui Qian, Di Hu, Heinrich Dinkel, Mengyue Wu, Ning Xu, and Weiyao Lin. Multiple sound sources localization from coarse to fine. In European Conference on Computer Vision (ECCV), pages 292-308. Springer, 2020. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 54, + 140, + 286, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 140, + 286, + 205 + ], + "spans": [ + { + "bbox": [ + 54, + 140, + 286, + 205 + ], + "type": "text", + "content": "[56] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International Conference on Machine Learning (ICML), pages 8748-8763. PMLR, 2021. 6" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 54, + 206, + 286, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 206, + 286, + 270 + ], + "spans": [ + { + "bbox": [ + 54, + 206, + 286, + 270 + ], + "type": "text", + "content": "[57] Colin Raffel, Brian McFee, Eric J Humphrey, Justin Salamon, Oriol Nieto, Dawen Liang, Daniel PW Ellis, and C Colin Raffel. mir.eval: A transparent implementation of common mir metrics. In *In Proceedings of the 15th International Society for Music Information Retrieval Conference*, ISMIR. CiteSeer, 2014. 5, 7" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 272, + 286, + 315 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 272, + 286, + 315 + ], + "spans": [ + { + "bbox": [ + 53, + 272, + 286, + 315 + ], + "type": "text", + "content": "[58] Tanzila Rahman, Mengyu Yang, and Leonid Sigal. Tribert: Full-body human-centric audio-visual representation learning for visual sound separation. arXiv preprint arXiv:2110.13412, 2021. 1, 3, 4, 5, 7" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 54, + 316, + 286, + 370 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 316, + 286, + 370 + ], + "spans": [ + { + "bbox": [ + 54, + 316, + 286, + 370 + ], + "type": "text", + "content": "[59] Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-net: Convolutional networks for biomedical image segmentation. In International Conference on Medical image computing and computer-assisted intervention, pages 234-241. Springer, 2015. 4" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 54, + 372, + 286, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 372, + 286, + 403 + ], + "spans": [ + { + "bbox": [ + 54, + 372, + 286, + 403 + ], + "type": "text", + "content": "[60] Sam Roweis. One microphone source separation. Advances in Neural Information Processing Systems (NeurIPS), 13, 2000. 1" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 54, + 404, + 286, + 458 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 404, + 286, + 458 + ], + "spans": [ + { + "bbox": [ + 54, + 404, + 286, + 458 + ], + "type": "text", + "content": "[61] Arda Senocak, Tae-Hyun Oh, Junsik Kim, Ming-Hsuan Yang, and In So Kweon. Learning to localize sound source in visual scenes. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4358-4366, 2018. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 54, + 460, + 286, + 513 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 460, + 286, + 513 + ], + "spans": [ + { + "bbox": [ + 54, + 460, + 286, + 513 + ], + "type": "text", + "content": "[62] Bowen Shi, Wei-Ning Hsu, Kushal Lakhotia, and Abdelrahman Mohamed. Learning audio-visual speech representation by masked multimodal cluster prediction. In International Conference on Learning Representations (ICLR), 2022. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 515, + 286, + 569 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 515, + 286, + 569 + ], + "spans": [ + { + "bbox": [ + 53, + 515, + 286, + 569 + ], + "type": "text", + "content": "[63] Zengjie Song, Yuxi Wang, Junsong Fan, Tieniu Tan, and Zhaoxiang Zhang. Self-supervised predictive learning: A negative-free method for sound source localization in visual scenes. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3222-3231, 2022. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 53, + 571, + 286, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 571, + 286, + 613 + ], + "spans": [ + { + "bbox": [ + 53, + 571, + 286, + 613 + ], + "type": "text", + "content": "[64] Martin Spiertz and Volker Gnann. Source-filter based clustering for monaural blind source separation. In Proceedings of the 12th International Conference on Digital Audio Effects, volume 4, 2009. 5, 7" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 53, + 614, + 286, + 658 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 614, + 286, + 658 + ], + "spans": [ + { + "bbox": [ + 53, + 614, + 286, + 658 + ], + "type": "text", + "content": "[65] Robin Strudel, Ricardo Garcia, Ivan Laptev, and Cordelia Schmid. Segmenter: Transformer for semantic segmentation. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 7262-7272, 2021. 1, 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 53, + 659, + 286, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 659, + 286, + 712 + ], + "spans": [ + { + "bbox": [ + 53, + 659, + 286, + 712 + ], + "type": "text", + "content": "[66] Yapeng Tian, Di Hu, and Chenliang Xu. Cyclic co-learning of sounding object visual grounding and sound separation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2745-2754, 2021. 2, 3, 4, 5, 6, 7" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 312, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 312, + 73, + 545, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 73, + 545, + 116 + ], + "spans": [ + { + "bbox": [ + 312, + 73, + 545, + 116 + ], + "type": "text", + "content": "[67] Yapeng Tian, Dingzeyu Li, and Chenliang Xu. Unified multisensory perception: Weakly-supervised audio-visual video parsing. In European Conference on Computer Vision (ECCV), pages 436–454. Springer, 2020. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 312, + 118, + 545, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 118, + 545, + 161 + ], + "spans": [ + { + "bbox": [ + 312, + 118, + 545, + 161 + ], + "type": "text", + "content": "[68] Yapeng Tian, Jing Shi, Bochen Li, Zhiyao Duan, and Chenliang Xu. Audio-visual event localization in unconstrained videos. In European Conference on Computer Vision (ECCV), pages 247–263, 2018. 2, 5, 6" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 312, + 162, + 545, + 216 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 162, + 545, + 216 + ], + "spans": [ + { + "bbox": [ + 312, + 162, + 545, + 216 + ], + "type": "text", + "content": "[69] Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, and Hervé Jégou. Training data-efficient image transformers & distillation through attention. In International Conference on Machine Learning (ICML), pages 10347-10357. PMLR, 2021. 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 312, + 217, + 545, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 217, + 545, + 270 + ], + "spans": [ + { + "bbox": [ + 312, + 217, + 545, + 270 + ], + "type": "text", + "content": "[70] Thanh-Dat Truong, Chi Nhan Duong, Hoang Anh Pham, Bhiksha Raj, Ngan Le, Khoa Luu, et al. The right to talk: An audio-visual transformer approach. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 1105–1114, 2021. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 312, + 272, + 545, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 272, + 545, + 326 + ], + "spans": [ + { + "bbox": [ + 312, + 272, + 545, + 326 + ], + "type": "text", + "content": "[71] Efthymios Tzinis, Scott Wisdom, Aren Jansen, Shawn Hershey, Tal Remez, Dan Ellis, and John R Hershey. Into the wild with audioscope: Unsupervised audio-visual separation of on-screen sounds. In International Conference on Learning Representations (ICLR), 2020. 2, 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 312, + 327, + 545, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 327, + 545, + 381 + ], + "spans": [ + { + "bbox": [ + 312, + 327, + 545, + 381 + ], + "type": "text", + "content": "[72] Efthymios Tzinis, Scott Wisdom, Tal Remez, and John R Hershey. Audioscopev2: Audio-visual attention architectures for calibrated open-domain on-screen sound separation. In European Conference on Computer Vision (ECCV), pages 368–385. Springer, 2022. 2, 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 312, + 383, + 545, + 436 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 383, + 545, + 436 + ], + "spans": [ + { + "bbox": [ + 312, + 383, + 545, + 436 + ], + "type": "text", + "content": "[73] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in Neural Information Processing Systems (NeurIPS), 30, 2017. 3, 4" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 312, + 437, + 545, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 437, + 545, + 491 + ], + "spans": [ + { + "bbox": [ + 312, + 437, + 545, + 491 + ], + "type": "text", + "content": "[74] Tuomas Virtanen. Monaural sound source separation by nonnegative matrix factorization with temporal continuity and sparseness criteria. IEEE transactions on audio, speech, and language processing, 15(3):1066-1074, 2007. 1" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 312, + 493, + 545, + 546 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 493, + 545, + 546 + ], + "spans": [ + { + "bbox": [ + 312, + 493, + 545, + 546 + ], + "type": "text", + "content": "[75] Ho-Hsiang Wu, Prem Seetharaman, Kundan Kumar, and Juan Pablo Bello. Wav2clip: Learning robust audio representations from clip. In IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 4563-4567. IEEE, 2022. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 312, + 548, + 545, + 592 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 548, + 545, + 592 + ], + "spans": [ + { + "bbox": [ + 312, + 548, + 545, + 592 + ], + "type": "text", + "content": "[76] Yu Wu, Linchao Zhu, Yan Yan, and Yi Yang. Dual attention matching for audio-visual event localization. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 6292–6300, 2019. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 312, + 593, + 545, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 593, + 545, + 635 + ], + "spans": [ + { + "bbox": [ + 312, + 593, + 545, + 635 + ], + "type": "text", + "content": "[77] Yan Xia and Zhou Zhao. Cross-modal background suppression for audio-visual event localization. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 19989-19998, 2022. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 312, + 636, + 545, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 636, + 545, + 690 + ], + "spans": [ + { + "bbox": [ + 312, + 636, + 545, + 690 + ], + "type": "text", + "content": "[78] Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M Alvarez, and Ping Luo. Segformer: Simple and efficient design for semantic segmentation with transformers. Advances in Neural Information Processing Systems (NeurIPS), 34, 2021. 1, 3" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 312, + 692, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 692, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 312, + 692, + 545, + 713 + ], + "type": "text", + "content": "[79] Xudong Xu, Bo Dai, and Dahua Lin. Recursive visual sound separation using minus-plus net. In IEEE/CVF In" + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "type": "text", + "content": "14685" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 53, + 73, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 72, + 73, + 286, + 94 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 73, + 286, + 94 + ], + "spans": [ + { + "bbox": [ + 72, + 73, + 286, + 94 + ], + "type": "text", + "content": "ternational Conference on Computer Vision (ICCV), pages 882-891, 2019. 2, 5, 7" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 53, + 95, + 287, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 95, + 287, + 149 + ], + "spans": [ + { + "bbox": [ + 53, + 95, + 287, + 149 + ], + "type": "text", + "content": "[80] Xudong Xu, Hang Zhou, Ziwei Liu, Bo Dai, Xiaogang Wang, and Dahua Lin. Visually informed binaural audio generation without binaural audios. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 15485-15494, 2021. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 150, + 288, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 150, + 288, + 215 + ], + "spans": [ + { + "bbox": [ + 53, + 150, + 288, + 215 + ], + "type": "text", + "content": "[81] Dong Yu, Morten Kolbaek, Zheng-Hua Tan, and Jesper Jensen. Permutation invariant training of deep models for speaker-independent multi-talker speech separation. In IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 241-245. IEEE, 2017. 1" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 217, + 288, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 217, + 288, + 281 + ], + "spans": [ + { + "bbox": [ + 53, + 217, + 288, + 281 + ], + "type": "text", + "content": "[82] Li Yuan, Yunpeng Chen, Tao Wang, Weihao Yu, Yujun Shi, Zi-Hang Jiang, Francis EH Tay, Jiashi Feng, and Shuicheng Yan. Tokens-to-token vit: Training vision transformers from scratch onImagenet. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 558-567, 2021. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 282, + 288, + 348 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 282, + 288, + 348 + ], + "spans": [ + { + "bbox": [ + 53, + 282, + 288, + 348 + ], + "type": "text", + "content": "[83] Rowan Zellers, Jiasen Lu, Ximing Lu, Youngjae Yu, Yanpeng Zhao, Mohammadreza Salehi, Aditya Kusupati, Jack Hessel, Ali Farhadi, and Yejin Choi. Merlot reserve: Neural script knowledge through vision and language and sound. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 16375-16387, 2022. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 349, + 288, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 349, + 288, + 403 + ], + "spans": [ + { + "bbox": [ + 53, + 349, + 288, + 403 + ], + "type": "text", + "content": "[84] Renrui Zhang, Ziyu Guo, Peng Gao, Rongyao Fang, Bin Zhao, Dong Wang, Yu Qiao, and Hongsheng Li. Pointm2ae: Multi-scale masked autoencoders for hierarchical point cloud pre-training. Advances in Neural Information Processing Systems (NeurIPS), 2022. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 404, + 288, + 448 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 404, + 288, + 448 + ], + "spans": [ + { + "bbox": [ + 53, + 404, + 288, + 448 + ], + "type": "text", + "content": "[85] Renrui Zhang, Han Qiu, Tai Wang, Xuanzhuo Xu, Ziyu Guo, Yu Qiao, Peng Gao, and Hongsheng Li. Monodetr: Depth-aware transformer for monocular 3d object detection. arXiv preprint arXiv:2203.13310, 2022. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 449, + 288, + 470 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 449, + 288, + 470 + ], + "spans": [ + { + "bbox": [ + 53, + 449, + 288, + 470 + ], + "type": "text", + "content": "[86] Yuanhan Zhang, Kaiyang Zhou, and Ziwei Liu. Neural prompt search. arXiv preprint arXiv:2206.04673, 2022. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 471, + 288, + 513 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 471, + 288, + 513 + ], + "spans": [ + { + "bbox": [ + 53, + 471, + 288, + 513 + ], + "type": "text", + "content": "[87] Hang Zhao, Orazio Gallo, Iuri Frosio, and Jan Kautz. Loss functions for image restoration with neural networks. IEEE Transactions on computational imaging, 3(1):47-57, 2016. 5" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 514, + 288, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 514, + 288, + 557 + ], + "spans": [ + { + "bbox": [ + 53, + 514, + 288, + 557 + ], + "type": "text", + "content": "[88] Hang Zhao, Chuang Gan, Wei-Chiu Ma, and Antonio Torralba. The sound of motions. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 1735-1744, 2019. 1, 3, 4, 5, 6, 7" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 53, + 558, + 288, + 602 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 558, + 288, + 602 + ], + "spans": [ + { + "bbox": [ + 53, + 558, + 288, + 602 + ], + "type": "text", + "content": "[89] Hang Zhao, Chuang Gan, Andrew Rouditchenko, Carl Vondrick, Josh McDermott, and Antonio Torralba. The sound of pixels. In European Conference on Computer Vision (ECCV), pages 570-586, 2018. 1, 2, 3, 4, 5, 6, 7" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 53, + 603, + 288, + 646 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 603, + 288, + 646 + ], + "spans": [ + { + "bbox": [ + 53, + 603, + 288, + 646 + ], + "type": "text", + "content": "[90] Hengshuang Zhao, Li Jiang, Jiaya Jia, Philip HS Torr, and Vladlen Koltun. Point transformer. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 16259-16268, 2021. 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 53, + 647, + 288, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 647, + 288, + 690 + ], + "spans": [ + { + "bbox": [ + 53, + 647, + 288, + 690 + ], + "type": "text", + "content": "[91] Minghang Zheng, Peng Gao, Renrui Zhang, Kunchang Li, Xiaogang Wang, Hongsheng Li, and Hao Dong. End-to-end object detection with adaptive clustering transformer. arXiv preprint arXiv:2011.09315, 2020. 3" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 53, + 691, + 288, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 691, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 53, + 691, + 288, + 713 + ], + "type": "text", + "content": "[92] Sixiao Zheng, Jiachen Lu, Hengshuang Zhao, Xiatian Zhu, Zekun Luo, Yabiao Wang, Yanwei Fu, Jianfeng Feng, Tao" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 308, + 73, + 545, + 540 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 331, + 73, + 545, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 73, + 545, + 117 + ], + "spans": [ + { + "bbox": [ + 331, + 73, + 545, + 117 + ], + "type": "text", + "content": "Xiang, Philip HS Torr, et al. Rethinking semantic segmentation from a sequence-to-sequence perspective with transformers. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 6881-6890, 2021. 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 312, + 118, + 545, + 171 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 118, + 545, + 171 + ], + "spans": [ + { + "bbox": [ + 312, + 118, + 545, + 171 + ], + "type": "text", + "content": "[93] Bolei Zhou, Aditya Khosla, Agata Lapedriza, Aude Oliva, and Antonio Torralba. Learning deep features for discriminative localization. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2921-2929, 2016. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 312, + 173, + 545, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 173, + 545, + 228 + ], + "spans": [ + { + "bbox": [ + 312, + 173, + 545, + 228 + ], + "type": "text", + "content": "[94] Dongzhan Zhou, Xinchi Zhou, Di Hu, Hang Zhou, Lei Bai, Ziwei Liu, and Wanli Ouyang. Sepfusion: Finding optimal fusion structures for visual sound separation. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 36, pages 3544-3552, 2022. 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 312, + 229, + 545, + 283 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 229, + 545, + 283 + ], + "spans": [ + { + "bbox": [ + 312, + 229, + 545, + 283 + ], + "type": "text", + "content": "[95] Jinxing Zhou, Liang Zheng, Yiran Zhong, Shijie Hao, and Meng Wang. Positive sample propagation along the audiovisual event line. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 8436-8444, 2021. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 312, + 285, + 545, + 340 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 285, + 545, + 340 + ], + "spans": [ + { + "bbox": [ + 312, + 285, + 545, + 340 + ], + "type": "text", + "content": "[96] Xingyi Zhou, Rohit Girdhar, Armand Joulin, Philipp Krähenbuhl, and Ishan Misra. Detecting twenty-thousand classes using image-level supervision. In European Conference on Computer Vision (ECCV), pages 350-368. Springer, 2022. 6" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 312, + 341, + 545, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 341, + 545, + 396 + ], + "spans": [ + { + "bbox": [ + 312, + 341, + 545, + 396 + ], + "type": "text", + "content": "[97] Yipin Zhou, Zhaowen Wang, Chen Fang, Trung Bui, and Tamara L Berg. Visual to sound: Generating natural sound for videos in the wild. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3550-3558, 2018. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 312, + 396, + 545, + 440 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 396, + 545, + 440 + ], + "spans": [ + { + "bbox": [ + 312, + 396, + 545, + 440 + ], + "type": "text", + "content": "[98] Lingyu Zhu and Esa Rahtu. Visually guided sound source separation using cascaded opponent filter network. In Proceedings of the Asian Conference on Computer Vision, 2020. 5, 7" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 312, + 441, + 545, + 496 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 441, + 545, + 496 + ], + "spans": [ + { + "bbox": [ + 312, + 441, + 545, + 496 + ], + "type": "text", + "content": "[99] Lingyu Zhu and Esa Rahtu. Visually guided sound source separation and localization using self-supervised motion representations. In IEEE/CVF Winter Conference on Applications of Computer Vision (WACV), pages 1289-1299, 2022. 1, 3, 4, 5, 6, 7" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 497, + 545, + 540 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 497, + 545, + 540 + ], + "spans": [ + { + "bbox": [ + 308, + 497, + 545, + 540 + ], + "type": "text", + "content": "[100] Xizhou Zhu, Weijie Su, Lewei Lu, Bin Li, Xiaogang Wang, and Jifeng Dai. Deformable detr: Deformable transformers for end-to-end object detection. arXiv preprint arXiv:2010.04159, 2020. 3" + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "type": "text", + "content": "14686" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/itKD_ Interchange Transfer-Based Knowledge Distillation for 3D Object Detection/0c6f9efe-676e-4d3a-914a-2dc77bae5a75_content_list.json b/2023/itKD_ Interchange Transfer-Based Knowledge Distillation for 3D Object Detection/0c6f9efe-676e-4d3a-914a-2dc77bae5a75_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..3052b3280c9e12c6fa637d4c965654c78d4475e3 --- /dev/null +++ b/2023/itKD_ Interchange Transfer-Based Knowledge Distillation for 3D Object Detection/0c6f9efe-676e-4d3a-914a-2dc77bae5a75_content_list.json @@ -0,0 +1,1458 @@ +[ + { + "type": "text", + "text": "itKD: Interchange Transfer-based Knowledge Distillation for 3D Object Detection", + "text_level": 1, + "bbox": [ + 122, + 128, + 848, + 174 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Hyeon Cho $^{1}$ , Junyong Choi $^{1,2}$ , Geonwoo Baek $^{1}$ , and Wonjun Hwang $^{1,3}$", + "bbox": [ + 202, + 202, + 764, + 220 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Ajou University, $^{2}$ Hyundai Motor Company, $^{3}$ Naver AI Lab", + "bbox": [ + 243, + 220, + 723, + 239 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ch0104@ajou.ac.kr, chldusxkr@hyundai.com, bkw0622@ajou.ac.kr, wjhwang@ajou.ac.kr", + "bbox": [ + 127, + 241, + 836, + 256 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 291, + 313, + 306 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Point-cloud based 3D object detectors recently have achieved remarkable progress. However, most studies are limited to the development of network architectures for improving only their accuracy without consideration of the computational efficiency. In this paper, we first propose an autoencoder-style framework comprising channel-wise compression and decompression via interchange transfer-based knowledge distillation. To learn the map-view feature of a teacher network, the features from teacher and student networks are independently passed through the shared autoencoder; here, we use a compressed representation loss that binds the channel-wised compression knowledge from both student and teacher networks as a kind of regularization. The decompressed features are transferred in opposite directions to reduce the gap in the interchange reconstructions. Lastly, we present an head attention loss to match the 3D object detection information drawn by the multi-head self-attention mechanism. Through extensive experiments, we verify that our method can train the lightweight model that is well-aligned with the 3D point cloud detection task and we demonstrate its superiority using the well-known public datasets; e.g., Waymo and nuScenes.", + "bbox": [ + 75, + 323, + 473, + 656 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 686, + 209, + 700 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Convolutional neural network (CNN)-based 3D object detection methods using point clouds [13] [35] [36] [43] [49] have attracted wide attention based on their outstanding performance for self-driving cars. Recent CNN-based works have required more computational complexity to achieve higher precision under the various wild situation. Some studies [23] [36] [43] have proposed methods to improve the speed of 3D object detection through which the non-maximum suppression (NMS) or anchor procedures are removed but the network parameters are still large.", + "bbox": [ + 75, + 710, + 468, + 863 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/fd151daba5e023596b2a4639e0ae5b201339f012a5bbd5fbe7d0d4786d001c5f.jpg", + "image_caption": [ + "Figure 1. Performance comparison between teacher and student networks for a point-cloud based 3D object detection. The top example images are qualitatively compared between the results of teacher, student and our networks. Specifically, the first row images are an input sample with labels and the center heatmap head of the teacher network. The second row examples are responses of teacher, student, and ours for the yellow circle on the heatmap (or the blue dash circle on the input). The bottom image quantitatively shows the computational complexity and the corresponding accuracy of teacher, student and our networks, respectively. Best viewed in color." + ], + "image_footnote": [], + "bbox": [ + 517, + 292, + 869, + 532 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Knowledge distillation (KD) is one of the parameter compression techniques, which can effectively train a compact student network through the guidance of a deep teacher network, as shown in the example images of Fig. 1. Starting with Hinton's work [9], many KD studies [10] [20] [28] [44] have transferred the discriminative teacher knowledge to the student network for classification tasks. From the viewpoint of the detection task, KD should be extended to the regression problem, including the object locations, which is not easy to straight-forwardly apply the classification-based KD methods to the detection task. To alleviate this problem, KD methods for object detection have been developed for mimicking the output of the backbone network [15] (e.g., region", + "bbox": [ + 496, + 703, + 893, + 902 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 807, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "1Our code is available at https://github.com/hyeon-jo/interchange-transfer-KD.", + "bbox": [ + 75, + 875, + 468, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "13540", + "bbox": [ + 480, + 944, + 519, + 957 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "proposal network) or individual detection head [2] [32]. Nevertheless, these methods have only been studied for detecting 2D image-based objects, and there is a limit to applying them to sparse 3D point cloud-based data that have not object-specific color information but only 3D position-based object structure information.", + "bbox": [ + 75, + 90, + 468, + 181 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Taking a closer look at differences between 2D and 3D data, there is a large gap in that 2D object detection usually predicts 2D object locations based on inherent color information with the corresponding appearances, but 3D object detection estimates 3D object boxes from inputs consisting of only 3D point clouds. Moreover, the number of the point clouds constituting objects varies depending on the distances and presence of occlusions [42]. Another challenge in 3D object detection for KD is that, compared to 2D object detection, 3D object detection methods [4] [6] [43] [21] have more detection head components such as 3D boxes, and orientations. These detection heads are highly correlated with each other and represent different 3D characteristics. In this respect, when transferring the detection heads of the teacher network to the student network using KD, it is required to guide the distilled knowledge under the consideration of the correlation among the multiple detection head components.", + "bbox": [ + 75, + 185, + 470, + 458 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this paper, we propose a novel interchange transfer-based KD (itKD) method designed for the lightweight point-cloud based 3D object detection. The proposed itKD comprises two modules: (1) a channel-wise autoencoder based on the interchange transfer of reconstructed knowledge and (2) a head relation-aware self-attention on multiple 3D detection heads. First of all, through a channel-wise compressing and decompressing processes for KD, the interchange transfer-based autoencoder effectively represents the map-view features from the viewpoint of 3D representation centric-knowledge. Specifically, the encoder provides an efficient representation by compressing the map-view feature in the channel direction to preserve the spatial positions of the objects and the learning of the student network could be regularized by the distilled position information of objects in the teacher network. For transferring the interchange knowledge to the opposite networks, the decoder of the student network reconstructs the map-view feature under the guidance of the teacher network while the reconstruction of the teacher network is guided by the map-view feature of the student network. As a result, the student network can effectively learn how to represent the 3D map-view feature of the teacher. Furthermore, to refine the teacher's object detection results as well as its representation, our proposed head relation-aware self-attention gives a chance to learn the pivotal information that should be taught to the student network for improving the 3D detection results by considering the inter-head relation among the multiple detection head and the intra-head relation of", + "bbox": [ + 75, + 463, + 470, + 900 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "the individual detection head.", + "bbox": [ + 498, + 90, + 696, + 104 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this way, we implement a unified KD framework to successfully learn the 3D representation and 3D detection results of the teacher network for the lightweight 3D point cloud object detection. We also conduct extensive ablation studies for thoroughly validating our approach in Waymo and nuScenes datasets. The results reveal the outstanding potential of our approach for transferring distilled knowledge that can be utilized to improve the performance of 3D point cloud object detection models.", + "bbox": [ + 496, + 106, + 890, + 241 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our contributions are summarized as follows:", + "bbox": [ + 517, + 243, + 821, + 257 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- For learning the 3D representation-centric knowledge from the teacher network, we propose the channelwise autoencoder regularized in the compressed domain and the interchange knowledge transfer method wherein the reconstructed features are guided by the opposite networks.", + "- For detection head-centric knowledge of the teacher, we suggest the head relation-aware self-attention which can efficiently distill the detection properties under the consideration of the inter-head relation and intra-head relation of the multiple 3D detection heads.", + "- Our work is the best attempt to reduce the parameters of point cloud-based 3D object detection using KD. Additionally, we validate its superiority using two large datasets that reflect real-world driving conditions, e.g., Waymo and NuScenes." + ], + "bbox": [ + 517, + 263, + 890, + 503 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Works", + "text_level": 1, + "bbox": [ + 500, + 518, + 648, + 534 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1. 3D Object Detection based on Point Cloud", + "text_level": 1, + "bbox": [ + 500, + 544, + 859, + 560 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "During the last few years, encouraged by the success of CNNs, the development of object detectors using CNNs is developing rapidly. Recently, many 3D object detectors have been studied and they can be briefly categorized by how they extract representations from point clouds; e.g., grid-based [35] [36] [49] [13] [43], point-based [18] [23] [17] [25] [39] and hybrid-based [3] [40] [8] [48] [22] methods. In detail, Vote3Deep [5] thoroughly exploited feature-centric voting to build CNNs for detecting objects in point clouds. In [29], they have studied on the task of amodal 3D object detection in RGB-D images, where a 3D region proposal network (RPN) to learn objectness from geometric shapes and the joint object recognition network to extract geometric features in 3D and color features in 2D. The 3D fully convolutional network [14] was straightforwardly applied to point cloud data for vehicle detection. In the early days, VoxelNet [49] has designed an end-to-end trainable detector based on learning-based voxelization using fully connected layers. In [35], they encoded the point cloud by VoxelNet and used the sparse convolution to achieve the fast detection. HVNet [41] fused the multi-scale voxel feature encoder at the point-wise level and projected into multi-", + "bbox": [ + 496, + 568, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "13541", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "ple pseudo-image feature maps for solving the various sizes of the feature map. In [26], they replaced the point cloud with a grid-based bird's-eye view (BEV) RGB-map and utilized YOLOv2 to detect the 3D objects. FIXOR [36] converted the point cloud to a 3D BEV map and carried out the real-time 3D object detection with an RPN-free single-stage based model.", + "bbox": [ + 75, + 90, + 470, + 195 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Recently, PointPillars (PP)-based method [13] utilized the PointNet [19] to learn the representation of point clouds organized in vertical columns for achieving the fast 3D object detection. To boost both performance and speed over PP, a pillar-based method [33] that incorporated a cylindrical projection into multi-view feature learning was proposed. More recently, CenterPoint [43] was introduced as an anchor-free detector that predicted the center of an object using a PP or VoxelNet-based feature encoder. In this paper, we employ the backbone architecture using CenterPoint because it is simple, near real-time, and achieves good performance in the wild situation.", + "bbox": [ + 75, + 196, + 470, + 378 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.2. Knowledge Distillation", + "text_level": 1, + "bbox": [ + 76, + 391, + 289, + 407 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "KD is one of the methods used for compressing deep neural networks and its fundamental key is to imitate the knowledge extracted from the teacher network, which has heavy parameters as well as good accuracy. Hinton et al. [9] performed a knowledge transfer using KL divergence; FitNet [20] proposed a method for teaching student networks by imitating intermediate layers. On the other hand, TAKD [16] and DGKD [28] used multiple teacher networks for transferring more knowledge to the student network in spite of large parameter gaps. Recently, some studies have been proposed using the layers shared between the teacher and the student networks for KD. Specifically, in [37], KD was performed through softmax regression as the student and teacher networks shared the same classifier. IEKD [10] proposed a method to split the student network into inheritance and exploration parts and mimic the compact teacher knowledge through a shared latent feature space via an autoencoder.", + "bbox": [ + 75, + 414, + 468, + 686 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Beyond its use in classification, KD for detection should transfer the regression knowledge regarding the positions of the objects to the student network. For this purpose, a KD for 2D object detection [15] was first proposed using feature map mimic learning. In [2], they transferred the detection knowledge of the teacher network using hint learning for an RPN, weighted cross-entropy loss for classification, and bound regression loss for regression. Recently, Wang et al. [32] proposed a KD framework for detection by utilizing the cross-location discrepancy of feature responses through fine-grained feature imitation.", + "bbox": [ + 75, + 686, + 468, + 852 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "As far as we know, there are few KD studies [7] [47] [34] [38] on point cloud-based 3D object detection so far. However, looking at similar studies on 3D knowledge trans", + "bbox": [ + 76, + 854, + 468, + 900 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "fer, SE-SSD [47] presented a knowledge distillation-based self-ensembling method for exploiting soft and hard targets with constraints to jointly optimize the model without extra computational cost during inference time. Object-DGCNN [34] proposed a NMS-free 3D object detection via dynamic graphs and a set-to-set distillation. They used the set-to-set distillation method for improving the performance without the consideration of the model compression. Another latest study is SparseKD [38] which suggested a label KD method that distills a few pivotal positions determined by teacher classification response to enhance the logit KD method. On the other hand, in this paper, we are more interest in how to make the student network lighter, or lower computational complexity, by using the KD for 3D object detection.", + "bbox": [ + 496, + 90, + 890, + 316 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Methodology", + "text_level": 1, + "bbox": [ + 500, + 332, + 633, + 349 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Background", + "text_level": 1, + "bbox": [ + 500, + 357, + 630, + 372 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The 3D point cloud object detection methods [13] [49] generally consists of three components; a point cloud encoder, a backbone network, and detection heads. In this paper, we employ CenterPoint [43] network as a backbone architecture. Since the parameter size of the backbone network is the largest among components of the 3D object detector, we aim to construct the student network by reducing the channel sizes of the backbone network for efficient network. We design our method to teach the student 3D representation-centric knowledge and detection head-centric knowledge of the teacher network, respectively.", + "bbox": [ + 496, + 380, + 890, + 547 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. Interchange Transfer", + "text_level": 1, + "bbox": [ + 500, + 556, + 697, + 571 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We adopt an autoencoder framework to effectively transfer the meaningful distilled knowledge regarding 3D detection from the teacher to the student network. The traditional encoder-based KD methods [10] [11] have been limited to the classification task, which transfers only compressed categorical knowledge to the student network. However, from the viewpoint of the detection task, the main KD goal of this paper is transferring the distilled knowledge regarding not only categorical features but also object location-related features. Particularly, unlike 2D detectors, 3D object detectors should regress more location information such as object orientations, 3D box sizes, etc., and it results in increasing the importance of how to transfer the 3D location features to the student network successfully.", + "bbox": [ + 496, + 580, + 890, + 791 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "For this purpose, we transfer the backbone knowledge that contains 3D object representation from the teacher network to the student through the compressed and reconstructed knowledge domains. As shown in Fig. 2, we in", + "bbox": [ + 496, + 792, + 890, + 853 + ], + "page_idx": 2 + }, + { + "type": "page_footnote", + "text": "2The total parameter size of the 3D detector is about $5.2\\mathrm{M}$ and the backbone size is approximately $4.8\\mathrm{M}$ , which is $92\\%$ . Further details are found in the supplementary material.", + "bbox": [ + 500, + 862, + 890, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "13542", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/6d270483b17383a09ed06897d8dd184d913465b5861a6c55e98422a7b11ef053.jpg", + "image_caption": [ + "Figure 2. Overview of the proposed knowledge distillation method. The teacher and student networks take the same point clouds as inputs. Then, the map-view features $M^t$ and $M^s$ are extracted from the teacher and student networks, respectively. The channel-wise autoencoder transfers the knowledge obtained from $M^t$ to $M^s$ by using the compressed representation loss and interchange transfer loss consecutively. The head relation-aware self-attention provides the relation-aware knowledge of multiple detection head to the student network using the attention head loss. The dotted lines of the modules denote that there are shared network parameters between the teacher and student networks. The light-yellow boxes are buffer layers for sampling the features to match the channel sizes of networks." + ], + "image_footnote": [], + "bbox": [ + 160, + 90, + 812, + 381 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "produce a channel-wise autoencoder which consists of an encoder in which the channel dimension of the autoencoder is gradually decreased and a decoder in the form of increasing the channel dimension. Note that spatial features play a pivotal role in the detection task and we try to preserve the spatial information by encoding features in the channel direction. We propose a compressed representation loss to coarsely guide location information of the objects to the student network in Fig. 2, and the compressed representation loss has an effect similar to the regularization of the autoencoder that binds the coordinates of the objectness between the teacher and student networks. The compressed representation loss function $\\mathcal{L}_{cr}$ is represented as follows:", + "bbox": [ + 75, + 481, + 472, + 678 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {L} _ {c r} = m _ {o b j} \\circ \\mathcal {S} \\left[ E \\left(\\theta_ {e n c}, M ^ {t}\\right), E \\left(\\theta_ {e n c}, M ^ {s}\\right) \\right] \\tag {1} \\\\ = m _ {o b j} \\circ \\mathcal {S} \\left[ M _ {e n c} ^ {t}, M _ {e n c} ^ {s} \\right], \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 122, + 686, + 468, + 724 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $E$ is a shared encoder, which has the parameters $\\theta_{enc}$ , and $S$ denotes $l_1$ loss as a similarity measure. $M^t$ and $M^s$ are outputs of the teacher and student backbones, respectively. $m_{obj}$ represents a binary mask to indicate object locations in backbone output like [38] and $\\circ$ is an element-wise product.", + "bbox": [ + 75, + 734, + 468, + 824 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "After performing the coarse representation-based knowledge distillation in a compressed domain, the fine representation features of the teacher network are required to teach the student network from the viewpoint of 3D object detection. In this respect, the decoder reconstructs the fine map", + "bbox": [ + 75, + 825, + 470, + 901 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "view features in the channel direction from the compressed features. Through the proposed interchange transfer loss, the reconstructed features are guided from the opposite networks, not their own stem networks, as shown in Fig. 2. Specifically, since the teacher network is frozen and we use the shared autoencoder for both student and teacher networks, we can teach the reconstructed fine features from the student network to resemble the output of the teacher network $M^t$ rather than the student $M^s$ . Moreover, the reconstructed fine features from the teacher network can guide the student's output, $M^s$ at the same time. The proposed interchange transfer loss $\\mathcal{L}_{it}$ is defined as follows:", + "bbox": [ + 496, + 481, + 893, + 662 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {t \\rightarrow s} = \\mathcal {S} \\left[ M ^ {s}, D \\left(\\theta_ {\\text {d e c}}, M _ {\\text {e n c}} ^ {t}\\right)\\right], \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 586, + 674, + 890, + 691 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {s \\rightarrow t} = \\mathcal {S} \\left[ M ^ {t}, D \\left(\\theta_ {\\text {d e c}}, M _ {\\text {e n c}} ^ {s}\\right)\\right], \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 586, + 700, + 890, + 718 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {i t} = \\mathcal {L} _ {s \\rightarrow t} + \\mathcal {L} _ {t \\rightarrow s}, \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 624, + 722, + 890, + 738 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $D$ is the decoder that contains the network parameter $\\theta_{dec}$ , which is a shared parameter. We hereby present the representation-based KD for 3D object detection in both compressed and decompressed domains to guide the student network to learn the map-view feature of the teacher network efficiently.", + "bbox": [ + 496, + 746, + 893, + 835 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3. Head Relation-Aware Self-Attention", + "text_level": 1, + "bbox": [ + 500, + 845, + 815, + 861 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Fundamentally, our backbone network, e.g., Center-Point [43], has various types of 3D object characteristics", + "bbox": [ + 498, + 869, + 892, + 901 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "13543", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/535309497c413003f5efdc77034d7ee76e0a19cbd4470c32fbff243586d83e47.jpg", + "image_caption": [ + "Figure 3. Head Relation-Aware Self-Attention. We make the object center-head feature from object center locations in the detection head feature and use it as different shaped inputs to self-attention for inter-head relation and intra-head relation. In the self-attention for inter-head relation, we use the object center-head feature as an input for the self-attention. In the self-attention for intra-head relation, the detection heads are separately used for the independent self-attention functions. The outputs of the self-attention are concatenated by $\\mathbb{C}$ operations and the head relation-aware self-attention is generated through the fusion layer." + ], + "image_footnote": [], + "bbox": [ + 163, + 90, + 810, + 287 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "on detection heads. Specifically, the locations, size, and direction of an object are different properties, but they are inevitably correlated to each other because they come from the same object. However, the traditional KD methods [2] [34] were only concerned with how the student network straight-forwardly mimicked the outputs of the teacher network without considering the relation among the detection heads. To overcome this problem, we make use of the relation of detection heads as a major factor for the detection head-centric KD.", + "bbox": [ + 75, + 372, + 472, + 521 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Our proposed head relation-aware self-attention is directly inspired by the multi-head self-attention [31] in order to learn the relation between the multiple detection head. As shown in Fig. 3, we first extract $i$ -th instance feature $v^{i} \\in \\mathbb{R}^{c}$ , where $c$ is the channel size, from the center location of the object in the detection head feature. Note that, since the instance feature is extracted from the multiple detection head, it has several object properties such as a class-specific heatmap $v_{hm}^{i}$ , a sub-voxel location refinement $v_{o}^{i}$ , a height-above-ground $v_{h}^{i}$ , a 3D size $v_{s}^{i}$ , and a yaw rotation angle $v_{r}^{i}$ . When there are a total of $n$ objects, we combine them to make an object center-head feature $v \\in \\mathbb{R}^{n \\times c}$ . We use the same object center-head feature $v$ of dimension $n$ for query, key, and value, which are an input of the scaled dot-product attention. The self-attention function $\\mathcal{F}$ is computed by", + "bbox": [ + 75, + 523, + 472, + 767 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {F} (v) = \\operatorname {s o f t m a x} \\left(\\frac {v ^ {\\top} \\cdot v}{\\sqrt {n}}\\right) \\cdot v. \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 169, + 777, + 468, + 813 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The proposed head relation-aware self-attention consists of two different self-attention for inter-head and intra-head relations as illustrated in Fig. 3. We propose the self-attention based on the inter-head relation of the instance features, which is made in order to consider the relation", + "bbox": [ + 75, + 825, + 472, + 900 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "between all detected objects and their different properties, rather than a single detected instance, from the global viewpoint. The self-attention for inter-head relation is computed by", + "bbox": [ + 496, + 372, + 890, + 431 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {F} _ {\\text {i n t e r}} (v) = \\mathcal {F} ([ v _ {h m}, v _ {o}, v _ {h}, v _ {s}, v _ {r} ]). \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 570, + 431, + 890, + 450 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "On the other hand, we suggest the self-attention for intrahead relation using the individual detection heads. Here we perform the attentions using only local relation in individual detection heads designed for different properties (e.g., orientation, size, etc.) and concatenate them. Its equation is", + "bbox": [ + 496, + 453, + 890, + 529 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {F} _ {\\text {i n t r a}} (v) = \\left[ \\mathcal {F} \\left(v _ {h m}\\right), \\mathcal {F} \\left(v _ {o}\\right), \\mathcal {F} \\left(v _ {h}\\right), \\mathcal {F} \\left(v _ {s}\\right), \\mathcal {F} \\left(v _ {r}\\right) \\right]. \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 508, + 551, + 890, + 569 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We concatenate the outputs of the self-attention and apply the fusion layer to calculate a final attention score that considers the relation between the detection heads and objects. The head relation-aware self-attention equation $\\mathcal{F}_{RA}$ is derived by:", + "bbox": [ + 496, + 575, + 890, + 651 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {F} _ {R A} (v) = \\mathcal {G} \\left(\\left[ \\mathcal {F} _ {\\text {i n t e r}} (v), \\mathcal {F} _ {\\text {i n t r a}} (v) \\right]\\right), \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 568, + 659, + 890, + 676 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\mathcal{G}$ is the fusion layer, e.g., $1 \\times 1$ convolution layer. The student network indirectly takes the teacher's knowledge by learning the relation between the multiple detection head of the teacher network through head attention loss as follows:", + "bbox": [ + 496, + 684, + 890, + 744 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {a t t n}} = \\mathcal {S} \\left(\\mathcal {F} _ {R A} \\left(v _ {t}\\right), \\mathcal {F} _ {R A} \\left(v _ {s}\\right)\\right), \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 586, + 753, + 890, + 770 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $v_{t}$ and $v_{s}$ are the object center-head features of the teacher and the student, respectively.", + "bbox": [ + 496, + 777, + 890, + 806 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Consequently, the overall loss is derived by", + "bbox": [ + 517, + 808, + 807, + 823 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {t o t a l}} = \\alpha \\mathcal {L} _ {\\text {s u p}} + \\beta \\left(\\mathcal {L} _ {\\text {i t}} + \\mathcal {L} _ {\\text {c r}} + \\mathcal {L} _ {\\text {a t t n}}\\right), \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 555, + 830, + 890, + 848 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\mathcal{L}_{sup}$ is the supervised loss that consists of focal loss and regression loss, and $\\alpha$ and $\\beta$ are the balancing parameters, which we set as 1 for simplicity.", + "bbox": [ + 496, + 854, + 890, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "13544", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experimental Results and Discussions", + "text_level": 1, + "bbox": [ + 76, + 89, + 418, + 107 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1. Environment Settings", + "text_level": 1, + "bbox": [ + 76, + 114, + 279, + 132 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Waymo Waymo open dataset [30] is one of the large-scale datasets for autonomous driving, which is captured by the synchronized and calibrated high-quality LiDAR and camera across a range of urban and suburban geographies. This dataset provides 798 training scenes and 202 validation scenes obtained by detecting all the objects within a $75\\mathrm{m}$ radius; it has a total of 3 object categories (e.g., vehicle, pedestrian, and cyclist) which have 6.1M, 2.8M, and 67K sets, respectively. The mean Average Precision (mAP) and mAP weighted by heading accuracy (mAPH) are the official metrics for Waymo evaluation. mAPH is a metric that gives more weight to the heading than it does to the sizes, and it accounts for the direction of the object.", + "bbox": [ + 75, + 138, + 468, + 335 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "nuScenes nuScenes dataset [1] is another large-scale dataset used for autonomous driving. This dataset contains 1,000 driving sequences. 700, 150, and 150 sequences are used for training, validation, and testing, respectively. Each sequence is captured approximately 20 seconds with 20 FPS using the 32-lane LiDAR. Its evaluation metrics are the average precision (AP) and nuScenes detection score (NDS). NDS is a weighted average of mAP and true positive metrics which measures the quality of the detections in terms of box location, size, orientation, attributes, and velocity.", + "bbox": [ + 75, + 335, + 468, + 487 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Implementation details Following the pillar-based CenterPoint [43] as the teacher network, we use an Adam optimizer [12] with a weight decay of 0.01 and a cosine annealing strategy [27] to adjust the learning rate. We set 0.0003 for initial learning rate, 0.003 for max learning rate, and 0.95 for momentum. The networks have been trained for 36 epochs on $8 \\times \\mathrm{V}100$ GPUs with a batch size of 32. For Waymo dataset, we set the detection range to $[-74.88\\mathrm{m}, 74.88\\mathrm{m}]$ for the X and Y axes, $[-2\\mathrm{m}, 4\\mathrm{m}]$ for the Z-axis, and a grid size of $(0.32\\mathrm{m}, 0.32\\mathrm{m})$ . In experiments on nuScenes dataset, we used a $(0.2\\mathrm{m}, 0.2\\mathrm{m})$ grid and set the detection range to $[-51.2\\mathrm{m}, 51.2\\mathrm{m}]$ for the X and Y-axes, $[-5\\mathrm{m}, 3\\mathrm{m}]$ for the Z-axis, and a grid size of $(0.2\\mathrm{m}, 0.2\\mathrm{m})$ . Compared to the teacher network, the student network has $1/4$ less channel capacity of backbone network. Our channel-wise autoencoder consists of three $1 \\times 1$ convolution layers as the encoder and three $1 \\times 1$ convolution layers as the decoder and the number of filters are 128, 64, 32 in encoder layers and 64, 128, 384 in decoder layers. The student's input buffer layer increases the channel size of 196 to 384 and the teacher's output buffer layer decreases the channel size 384 to 196.", + "bbox": [ + 75, + 488, + 470, + 819 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Overall KD Performance Comparison", + "text_level": 1, + "bbox": [ + 76, + 830, + 406, + 848 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We validate the performance of our method compared with well-known KD methods on the Waymo and nuScenes datasets. We re-implement the seven KD methods from 2D", + "bbox": [ + 76, + 854, + 468, + 900 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "classification-based KD to 3D detection-based KD in this paper. We set the baseline by applying the Kullback-Leibler (KL) divergence loss [9] to the center heatmap head and $l_{1}$ loss to the other regression heads. FitNet [20] is a method that mimics the intermediate outputs of layers and we apply it to the output of the backbone for simplicity. We also simply extend EOD-KD [2], one of the 2D object detection KDs, to 3D object detection. We apply TOFD [45], a 3D classification-based KD, to our detection task and straightforwardly use SE-SSD [47], Object DGCNN [34], and SparseKD [38] for 3D object detection KD.", + "bbox": [ + 496, + 90, + 890, + 257 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 1 shows that our method almost outperforms other KD methods on mAP and mAPH values for level 1 and level 2 under all three categories of objects. Especially, our performance improvement of mAPH is better than other methods, which indicates our method guides the student network well where the detected objects are facing. To verify the generality of the proposed method, we make additional comparison results using the nuScenes dataset, another large-scale 3D dataset for autonomous driving, in Table 2. Compared with the other methods, our method achieves the best accuracy under the NDS and mAP metrics in the nuScenes validation set. Specifically, when the student network shows $50.24\\%$ NDS and $38.52\\%$ mAP, our method achieves $53.90\\%$ $(+3.66\\%)$ NDS and $41.33\\%$ $(+2.81\\%)$ mAP. In detail, our method outperforms the other methods for the most of object classes except the construction vehicle and the bicycle.", + "bbox": [ + 496, + 258, + 892, + 515 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3. Ablation Studies", + "text_level": 1, + "bbox": [ + 500, + 527, + 663, + 541 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To analyze of our proposed method in detail, we conduct ablation studies on the Waymo dataset, and the whole performances are measured by mAPH at level 2 for simplicity. For the qualitative analysis, we visualize the map-view feature at each stage to validate the what kinds of knowledge are transferred from the teacher to the student by the proposed method. For simple visualization, we apply the $L_{1}$ normalization to the map-view feature in the channel direction.", + "bbox": [ + 496, + 551, + 890, + 686 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "As shown in Fig. 4, the objects and backgrounds are well activated in the example image of the teacher output. On the other hand, the encoder output is activated by further highlighting the coarse positions of the target objects. When looking at the decoder output, we can see that all the fine surrounding information is represented again. At this point, it is worth noting that compared to the teacher output, the target objects are highlighted a little more. From these visual comparisons, we can infer how our method successfully transfers the object-centered knowledge to the student.", + "bbox": [ + 496, + 688, + 890, + 839 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We explore the buffer layer that matches the channel size of the channel-wise autoencoder without the head attention loss. As shown in Table 3, we compare the three types for the buffer layer: (1) $S \\rightarrow T$ is the upsampling method that in-", + "bbox": [ + 496, + 839, + 890, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "13545", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/b4ae11852cf8a581801c2120641f0cd0208f8e4b6153246d07d165c81bbcd9a7.jpg", + "table_caption": [ + "Table 1. Waymo evaluation. Comparisons with different KD methods in the Waymo validation set. The best accuracy is indicated in bold, and the second-best accuracy is underlined." + ], + "table_footnote": [], + "table_body": "
MethodVehiclePedestrianCyclist
Level 1Level 2Level 1Level 2Level 1Level 2
mAPmAPHmAPmAPHmAPmAPHmAPmAPHmAPmAPHmAPmAPH
Teacher [43]73.7273.1765.6165.1172.4361.7264.7354.9964.3062.6161.9160.28
Student (1/4)64.2263.5656.2155.6263.7253.2256.1446.7853.0151.7250.9949.75
Baseline64.7864.0556.9256.2664.8552.9857.3746.7554.7152.4652.6550.48
FitNet [20]65.1164.3857.2456.5864.8953.2957.3747.0054.9152.6152.8450.63
EOD-KD [2]66.5065.7958.5657.9265.9954.5858.4848.2555.1852.9353.1050.94
SE-SSD [47]65.9565.2258.0557.4065.3953.9857.9247.6955.0152.9852.9450.99
TOFD [45]64.0963.4356.1355.5566.2454.9858.5048.4554.9553.0652.8651.04
Obj. DGCNN [34]66.0765.3859.2758.5565.9854.4459.4249.1154.6552.6253.1350.93
SparseKD [38]65.2564.5956.9756.3867.4454.5459.2447.8355.5453.4553.6351.61
Ours67.4366.7259.4458.8167.2656.0259.7349.6156.0954.2453.9652.19
", + "bbox": [ + 81, + 119, + 888, + 310 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/1b83ee9b0276affc2b0494216fcc8f6b04411dc54d34695819e41779b29e102c.jpg", + "table_caption": [ + "Table 2. nuScenes evaluation. Comparisons with different KD methods in the nuScenes validation set. The best accuracy is indicated in bold, and the second-best accuracy is underlined." + ], + "table_footnote": [], + "table_body": "
MethodNDSmAPcartruckbustrailercon. veh.ped.motor.bicycletr. conebarrier
Teacher [43]60.1650.2584.0453.4864.2931.9012.5078.9344.0118.1854.8760.30
Student (1/4)50.2438.5277.8538.1851.3822.333.9571.5123.903.5143.0349.56
Baseline51.4839.1978.7237.9050.4722.423.5172.2926.254.6544.9150.77
FitNet [20]51.4238.9078.3037.4050.4022.203.8072.1025.704.2544.2050.60
EOD-KD [2]52.4939.8278.4038.6050.9022.703.9073.2028.205.3045.0051.97
SE-SSD [47]52.2139.5378.6938.5649.8123.703.7272.8628.274.2544.2451.18
TOFD [45]52.8840.5779.0639.7352.0324.513.5673.5129.585.6245.3452.79
Obj. DGCNN [34]52.9140.3478.9539.2453.3723.964.1372.9828.634.9944.7252.46
SparseKD [38]53.0140.2678.7839.5051.8723.643.3073.1729.345.7544.9852.26
Ours53.9041.3379.4840.3854.3526.443.5873.9130.215.3945.9053.70
", + "bbox": [ + 81, + 351, + 888, + 512 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/80e990b6ad24ecda37c55bb800204ee04ce5abaf1ee1fe120253260bac5e5570.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 94, + 522, + 272, + 628 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/af145eb234aaf14eb731dc65b35ac889d5912d6647354036b87edc55f3ee0d4c.jpg", + "image_caption": [ + "(b) Teacher output $(M^t)$" + ], + "image_footnote": [], + "bbox": [ + 272, + 522, + 447, + 628 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/7834374594e0224e6c18ec4ac004861e6dc676bdc03cb7fae7929afc66109bd4.jpg", + "image_caption": [ + "(a) Input", + "(c) Encoder output", + "Figure 4. Feature visualization on the proposed channel-wise autoencoder. (a) an example input image and (b) the output feature of the teacher network. (c) and (d) are the output images of encoder and decoder of the teacher, respectively." + ], + "image_footnote": [], + "bbox": [ + 96, + 645, + 269, + 750 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/85010ca490235b842b38dd2f64829c1a656f8d2a99107e24823c6c348ddb8650.jpg", + "image_caption": [ + "(d) Decoder output" + ], + "image_footnote": [], + "bbox": [ + 272, + 645, + 447, + 750 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "creases the student's map-view feature to the teacher's feature. (2) $T \\to S$ is the downsampling method that decreases the teacher's feature to the student's feature. (3) $(S + T) / 2$ is that the teacher's feature is downsampled and the stu", + "bbox": [ + 75, + 839, + 472, + 900 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/d68497035f6c60584bef9a79bf856032e3cc065cfdf00c91a70dbf467308b4d7.jpg", + "table_caption": [ + "Table 3. Buffer layer for different channel size." + ], + "table_footnote": [], + "table_body": "
MethodVehiclePedestrianCyclistAvg.
S → T58.4148.9051.9053.07
T → S58.6248.7851.7553.05
(S + T) / 258.4748.8451.5452.95
", + "bbox": [ + 511, + 537, + 880, + 606 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/b245847452da89c4dbd5f04d151a630c1476243549f6a9fefad228ae39cf962d.jpg", + "table_caption": [ + "Table 4. Effect of shared and non-shared parameters for the autoencoder." + ], + "table_footnote": [], + "table_body": "
MethodVehiclePedestrianCyclistAvg.
Non-shared56.2645.8548.2350.11
Shared58.4148.9051.9053.07
", + "bbox": [ + 506, + 643, + 883, + 696 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "dent's feature is upsampled to the median size. The experiments show that the upsampling method performs better when considering all the classes.", + "bbox": [ + 496, + 718, + 890, + 762 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In Table 4, we observe the performance difference when the autoencoder parameters are shared or not. From the result, we can conclude that the shared parameters achieve better performance because what we want to is for the student to learn the teacher's knowledge, not the independent model.", + "bbox": [ + 496, + 763, + 890, + 853 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We investigate improvements made by our interchange transfer for KD without the head attention loss as shown in Table 5. Self-reconstruction is a method wherein the de", + "bbox": [ + 498, + 854, + 890, + 898 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "13546", + "bbox": [ + 480, + 945, + 519, + 955 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/6c2555dff997a6447462afe4b370b4df44bdd7781f280788334a88c2f9a95564.jpg", + "table_caption": [ + "Table 5. Comparison of different reconstruction methods for the autoencoder." + ], + "table_footnote": [], + "table_body": "
MethodVehiclePedestrianCyclistAvg.
Self Recon.56.5747.2650.2951.37
Ours58.4148.9051.9053.07
", + "bbox": [ + 78, + 117, + 468, + 172 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/b74e0aa02246d70805e2c18728cc7898189db39df6691acba1233e104e49d144.jpg", + "table_caption": [ + "Table 6. Comparison of KD methods for the multiple detection head. KL loss and $l_{1}$ loss denote that directly apply the loss function to all detection heads for KD." + ], + "table_footnote": [], + "table_body": "
MethodVehiclePedestrianCyclistAvg.
Student55.6246.7849.7550.72
Baseline56.2646.7550.4851.16
KL loss [9]55.9245.0847.4949.50
l1loss55.6245.1048.7349.82
AT [44]56.8547.3450.3651.52
Linter56.4146.9050.9051.40
Lintra57.2047.1951.2351.87
Lattn57.1047.3451.7952.08
", + "bbox": [ + 78, + 224, + 468, + 375 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "coder uses the corresponding input for the reconstruction and our interchange reconstruction is a method wherein the proposed $\\mathcal{L}_{it}$ objective transfers the reconstructed knowledge to the opponent network. Our interchange transfer-based reconstruction achieves better results and note that our main task is not the reconstruction but the 3D object-based knowledge transfer for KD.", + "bbox": [ + 75, + 382, + 468, + 487 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "3D detection [4] [6] [43] [21] has the multiple detection head. To prove the superiority of the proposed head attention objective for 3D object detection, we make the KD comparison results against only multiple detection head without the autoencoder, as shown in Table 6. Since the heatmap head classifies objects and other heads regress 3D bounding box information, Applying KL loss and $l_{1}$ loss to all detection heads has a negative effect. However, it is required to consider the relation of detection heads. In this respect, our method achieves better performance than the other KD methods which directly mimic the output of detection heads or simply employ attention mechanism.", + "bbox": [ + 75, + 489, + 468, + 671 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 7 shows the overall effect of the proposed losses on the KD performances. We set up the experiments by adding each loss based on the supervised loss $\\mathcal{L}_{\\mathrm{sup}}$ . Specifically, the interchange transfer loss $\\mathcal{L}_{it}$ improves on an average of $1.41\\%$ mAPH and the compressed representation loss $\\mathcal{L}_{cr}$ leads to a $0.94\\%$ performance improvement. In the end, the head attention loss $\\mathcal{L}_{\\mathrm{attn}}$ helps to improve the performance and the final average mAPH is $53.54\\%$ . We conclude that each proposed loss contributes positively to performance improvement in the 3D object detection-based KD task.", + "bbox": [ + 75, + 672, + 468, + 837 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "From Table 8, we observed quantitative comparisons of the computational complexity between the student network and the teacher network. Specifically, the student network, which reduced the channel by $1/4$ , decreased about", + "bbox": [ + 75, + 839, + 468, + 900 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/224cfae5fa4ffe2c1c6bec0b49a043582a729dbeb738742fcacc5c565c5d5795.jpg", + "table_caption": [ + "Table 7. Ablation results from investigating effects of different components." + ], + "table_footnote": [], + "table_body": "
LsupLitLcrLattnVehiclePedestrianCyclistAvg.
55.6246.7849.7550.72
57.4148.2050.7752.13
58.4148.9051.9053.07
58.8149.6152.1953.54
", + "bbox": [ + 501, + 117, + 890, + 186 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/255f0ff426dbc80d62db3348cc112c91b66f9fe4798bd57962e9322cef7d3bfa.jpg", + "table_caption": [ + "Table 8. Quantitative evaluation for model efficiency on Waymo dataset." + ], + "table_footnote": [], + "table_body": "
MethodParams (M)FLOPS (G)mAPH / L2
PointPillars [13]4.8255.057.05
SECOND [35]5.384.557.23
Part-A2[24]4.687.157.43
IA-SSD [46]2.746.158.08
SparseKD-v0.64 [38]5.285.158.89
Teacher [43]5.2333.960.13
Ours: Student (1/2)1.5130.159.04
Ours: Student (1/4)0.645.153.54
", + "bbox": [ + 501, + 224, + 890, + 349 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "8.6 times compared to the parameters of the teacher, and FLOPS was reduced by 7.4 times. Above all, we should not overlook the fact that the performance of the student improved from $50.72\\%$ to $53.54\\%$ mAPH/L2 by our KD method. Furthermore, we apply our method to the student whose channel was reduced by half. The student's performance increases to $59.04\\%$ , and the parameters and FLOPS compared to the teacher are reduced by 3.5 times and 2.6 times, respectively. Compared to lightweight network-based methods [13] [35] [24] [46], our student networks are able to derive stable performance with fewer parameters and FLOPS in 3D object detection.", + "bbox": [ + 496, + 364, + 890, + 547 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 564, + 617, + 579 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this paper, we propose a novel KD method that transfers knowledge to produce a lightweight point cloud detector. Our main method involves interchange transfer, which learns coarse knowledge by increasing the similarity of the compressed feature and fine knowledge by decompressing the map-view feature of the other side using the channel-wise autoencoder. Moreover, we introduce a method to guide multiple detection head using head relation-aware self-attention, which refines knowledge by considering the relation of instances and properties. Ablation studies demonstrate the effectiveness of our proposed algorithm, and extensive experiments on the two large-scale open datasets verify that our proposed method achieves competitive performance against state-of-the-art methods.", + "bbox": [ + 496, + 590, + 890, + 801 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgement. This work was partly supported by NRF-2022R1A2C1091402, BK21 FOUR program of the NRF of Korea funded by the Ministry of Education (NRF5199991014091), and IITP grant funded by the Korea government(MSIT) (No.2021-0-00951, Development of Cloud based Autonomous Driving AI learning Software; No. 2021-0-02068, Artificial Intelligence Innovation Hub). W. Hwang is the corresponding author.", + "bbox": [ + 496, + 801, + 890, + 900 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "13547", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 174, + 106 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Holger Caesar, Varun Bankiti, Alex H Lang, Sourabh Vora, Venice Erin Liong, Qiang Xu, Anush Krishnan, Yu Pan, Giancarlo Baldan, and Oscar Beijbom. nuscenes: A multimodal dataset for autonomous driving. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11621-11631, 2020. 6", + "[2] Guobin Chen, Wongun Choi, Xiang Yu, Tony Han, and Manmohan Chandraker. Learning efficient object detection models with knowledge distillation. Advances in neural information processing systems, 30, 2017. 2, 3, 5, 6, 7", + "[3] Yilun Chen, Shu Liu, Xiaoyong Shen, and Jiaya Jia. Fast point r-cnn. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9775-9784, 2019. 2", + "[4] Xiyang Dai, Yinpeng Chen, Bin Xiao, Dongdong Chen, Mengchen Liu, Lu Yuan, and Lei Zhang. Dynamic head: Unifying object detection heads with attentions. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7373-7382, 2021. 2, 8", + "[5] Martin Engelcke, Dushyant Rao, Dominic Zeng Wang, Chi Hay Tong, and Ingmar Posner. Vote3deep: Fast object detection in 3d point clouds using efficient convolutional neural networks. In 2017 IEEE International Conference on Robotics and Automation (ICRA), pages 1355-1361. IEEE, 2017. 2", + "[6] Runzhou Ge, Zhuangzhuang Ding, Yihan Hu, Yu Wang, Sijia Chen, Li Huang, and Yuan Li. Afdet: Anchor free one stage 3d object detection. arXiv preprint arXiv:2006.12671, 2020. 2, 8", + "[7] Xiaoyang Guo, Shaoshuai Shi, Xiaogang Wang, and Hongsheng Li. Liga-stereo: Learning lidar geometry aware representations for stereo-based 3d detector. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3153-3163, 2021. 3", + "[8] Chenhang He, Hui Zeng, Jianqiang Huang, Xian-Sheng Hua, and Lei Zhang. Structure aware single-stage 3d object detection from point cloud. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11873-11882, 2020. 2", + "[9] Geoffrey Hinton, Oriol Vinyals, Jeff Dean, et al. Distilling the knowledge in a neural network. arXiv preprint arXiv:1503.02531, 2(7), 2015. 1, 3, 6, 8", + "[10] Zhen Huang, Xu Shen, Jun Xing, Tongliang Liu, Xinmei Tian, Houqiang Li, Bing Deng, Jianqiang Huang, and Xian-Sheng Hua. Revisiting knowledge distillation: An inheritance and exploration framework. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3579-3588, 2021. 1, 3", + "[11] Jangho Kim, SeongUk Park, and Nojun Kwak. Paraphrasing complex network: Network compression via factor transfer. Advances in neural information processing systems, 31, 2018. 3", + "[12] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. 6", + "[13] Alex H Lang, Sourabh Vora, Holger Caesar, Lubing Zhou, Jiong Yang, and Oscar Beijbom. Pointpillars: Fast encoders" + ], + "bbox": [ + 78, + 114, + 472, + 902 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "for object detection from point clouds. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12697-12705, 2019. 1, 2, 3, 8", + "[14] Bo Li. 3d fully convolutional network for vehicle detection in point cloud. In 2017 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pages 1513-1518. IEEE, 2017. 2", + "[15] Quanquan Li, Shengying Jin, and Junjie Yan. Mimicking very efficient network for object detection. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 6356-6364, 2017. 1, 3", + "[16] Seyed Iman Mirzadeh, Mehrdad Farajtabar, Ang Li, Nir Levine, Akihiro Matsukawa, and Hassan Ghasemzadeh. Improved knowledge distillation via teacher assistant. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 5191-5198, 2020. 3", + "[17] Jiquan Ngiam, Benjamin Caine, Wei Han, Brandon Yang, Yuning Chai, Pei Sun, Yin Zhou, Xi Yi, Ouais Alsharif, Patrick Nguyen, et al. Starnet: Targeted computation for object detection in point clouds. arXiv preprint arXiv:1908.11069, 2019. 2", + "[18] Charles R Qi, Wei Liu, Chenxia Wu, Hao Su, and Leonidas J Guibas. Frustum pointnets for 3d object detection from rgb data. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 918-927, 2018. 2", + "[19] Charles R Qi, Hao Su, Kaichun Mo, and Leonidas J Guibas. Pointnet: Deep learning on point sets for 3d classification and segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 652-660, 2017. 3", + "[20] Adriana Romero, Nicolas Ballas, Samira Ebrahimi Kahou, Antoine Chassang, Carlo Gatta, and Yoshua Bengio. Fitnets: Hints for thin deep nets. arXiv preprint arXiv:1412.6550, 2014. 1, 3, 6, 7", + "[21] Danila Rukhovich, Anna Vorontsova, and Anton Konushin. Imvoxelnet: Image to voxels projection for monocular and multi-view general-purpose 3d object detection. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 2397-2406, 2022. 2, 8", + "[22] Shaoshuai Shi, Chaoxu Guo, Li Jiang, Zhe Wang, Jianping Shi, Xiaogang Wang, and Hongsheng Li. Pv-rcnn: Pointvoxel feature set abstraction for 3d object detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10529–10538, 2020. 2", + "[23] Shaoshuai Shi, Xiaogang Wang, and Hongsheng Li. Pointrcnn: 3d object proposal generation and detection from point cloud. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 770-779, 2019. 1, 2", + "[24] Shaoshuai Shi, Zhe Wang, Jianping Shi, Xiaogang Wang, and Hongsheng Li. From points to parts: 3d object detection from point cloud with part-aware and part-aggregation network. IEEE transactions on pattern analysis and machine intelligence, 43(8):2647-2664, 2020. 8", + "[25] Weijing Shi and Raj Rajkumar. Point-gnn: Graph neural network for 3d object detection in a point cloud. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 1711-1719, 2020. 2" + ], + "bbox": [ + 503, + 92, + 893, + 902 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "13548", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[26] Martin Simony, Stefan Milzy, Karl Amendey, and Horst-Michael Gross. Complex-yolo: An euler-region-proposal for real-time 3d object detection on point clouds. In Proceedings of the European Conference on Computer Vision (ECCV) Workshops, pages 0–0, 2018. 3", + "[27] Leslie N Smith. Cyclical learning rates for training neural networks. In 2017 IEEE winter conference on applications of computer vision (WACV), pages 464-472. IEEE, 2017. 6", + "[28] Wonchul Son, Jaemin Na, Junyong Choi, and Wonjun Hwang. Densely guided knowledge distillation using multiple teacher assistants. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9395-9404, 2021. 1, 3", + "[29] Shuran Song and Jianxiong Xiao. Deep sliding shapes for amodal 3d object detection in rgb-d images. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 808-816, 2016. 2", + "[30] Pei Sun, Henrik Kretzschmar, Xerxes Dotiwalla, Aurelien Chouard, Vijaysai Patnaik, Paul Tsui, James Guo, Yin Zhou, Yuning Chai, Benjamin Caine, et al. Scalability in perception for autonomous driving: Waymo open dataset. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2446-2454, 2020. 6", + "[31] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 30, 2017. 5", + "[32] Tao Wang, Li Yuan, Xiaopeng Zhang, and Jiashi Feng. Distilling object detectors with fine-grained feature imitation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4933-4942, 2019. 2, 3", + "[33] Yue Wang, Alireza Fathi, Abhijit Kundu, David A Ross, Caroline Pantofaru, Tom Funkhouser, and Justin Solomon. Pillar-based object detection for autonomous driving. In European Conference on Computer Vision, pages 18-34. Springer, 2020. 3", + "[34] Yue Wang and Justin M Solomon. Object dgenn: 3d object detection using dynamic graphs. Advances in Neural Information Processing Systems, 34, 2021. 3, 5, 6, 7", + "[35] Yan Yan, Yuxing Mao, and Bo Li. Second: Sparsely embedded convolutional detection. Sensors, 18(10):3337, 2018. 1, 2, 8", + "[36] Bin Yang, Wenjie Luo, and Raquel Urtasun. Pixor: Realtime 3d object detection from point clouds. In Proceedings of the IEEE conference on Computer Vision and Pattern Recognition, pages 7652-7660, 2018. 1, 2, 3", + "[37] Jing Yang, Brais Martinez, Adrian Bulat, and Georgios Tzimiropoulos. Knowledge distillation via softmax regression representation learning. In International Conference on Learning Representations, 2020. 3", + "[38] Jihan Yang, Shaoshuai Shi, Runyu Ding, Zhe Wang, and Xiaojuan Qi. Towards efficient 3d object detection with knowledge distillation. arXiv preprint arXiv:2205.15156, 2022. 3, 4, 6, 7, 8", + "[39] Zetong Yang, Yanan Sun, Shu Liu, and Jiaya Jia. 3dssd: Point-based 3d single stage object detector. In Proceedings" + ], + "bbox": [ + 78, + 90, + 468, + 901 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "of the IEEE/CVF conference on computer vision and pattern recognition, pages 11040-11048, 2020. 2", + "[40] Zetong Yang, Yanan Sun, Shu Liu, Xiaoyong Shen, and Jiaya Jia. Std: Sparse-to-dense 3d object detector for point cloud. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 1951-1960, 2019. 2", + "[41] Maosheng Ye, Shuangjie Xu, and Tongyi Cao. Hvnet: Hybrid voxel network for lidar based 3d object detection. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 1631-1640, 2020. 2", + "[42] Zeng Yihan, Chunwei Wang, Yunbo Wang, Hang Xu, Chaoqiang Ye, Zhen Yang, and Chao Ma. Learning transferable features for point cloud detection via 3d contrastive cotraining. Advances in Neural Information Processing Systems, 34, 2021. 2", + "[43] Tianwei Yin, Xingyi Zhou, and Philipp Krahenbuhl. Center-based 3d object detection and tracking. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11784-11793, 2021. 1, 2, 3, 4, 6, 7, 8", + "[44] Sergey Zagoruyko and Nikos Komodakis. Paying more attention to attention: Improving the performance of convolutional neural networks via attention transfer. 5th international conference on Learning Representations, Apr. 2017. 1, 8", + "[45] Linfeng Zhang, Yukang Shi, Zuoqiang Shi, Kaisheng Ma, and Chenglong Bao. Task-oriented feature distillation. Advances in Neural Information Processing Systems, 33:14759-14771, 2020. 6, 7", + "[46] Yifan Zhang, Qingyong Hu, Guoquan Xu, Yanxin Ma, Jianwei Wan, and Yulan Guo. Not all points are equal: Learning highly efficient point-based detectors for 3d lidar point clouds. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18953-18962, 2022. 8", + "[47] Wu Zheng, Weiliang Tang, Li Jiang, and Chi-Wing Fu. Sessd: Self-ensembling single-stage object detector from point cloud. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14494–14503, 2021. 3, 6, 7", + "[48] Yin Zhou, Pei Sun, Yu Zhang, Dragomir Anguelov, Jiyang Gao, Tom Ouyang, James Guo, Jiquan Ngiam, and Vijay Vasudevan. End-to-end multi-view fusion for 3d object detection in lidar point clouds. In Conference on Robot Learning, pages 923-932. PMLR, 2020. 2", + "[49] Yin Zhou and Oncel Tuzel. Voxelnet: End-to-end learning for point cloud based 3d object detection. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4490-4499, 2018. 1, 2, 3" + ], + "bbox": [ + 501, + 92, + 893, + 768 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "13549", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2023/itKD_ Interchange Transfer-Based Knowledge Distillation for 3D Object Detection/0c6f9efe-676e-4d3a-914a-2dc77bae5a75_model.json b/2023/itKD_ Interchange Transfer-Based Knowledge Distillation for 3D Object Detection/0c6f9efe-676e-4d3a-914a-2dc77bae5a75_model.json new file mode 100644 index 0000000000000000000000000000000000000000..a636317327c0ff35aab505d15bd6ed908a1d160e --- /dev/null +++ b/2023/itKD_ Interchange Transfer-Based Knowledge Distillation for 3D Object Detection/0c6f9efe-676e-4d3a-914a-2dc77bae5a75_model.json @@ -0,0 +1,2090 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.808, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.123, + 0.13, + 0.849, + 0.175 + ], + "angle": 0, + "content": "itKD: Interchange Transfer-based Knowledge Distillation for 3D Object Detection" + }, + { + "type": "text", + "bbox": [ + 0.203, + 0.203, + 0.766, + 0.222 + ], + "angle": 0, + "content": "Hyeon Cho\\(^{1}\\), Junyong Choi\\(^{1,2}\\), Geonwoo Baek\\(^{1}\\), and Wonjun Hwang\\(^{1,3}\\)" + }, + { + "type": "text", + "bbox": [ + 0.245, + 0.222, + 0.725, + 0.24 + ], + "angle": 0, + "content": "\\(^{1}\\) Ajou University, \\(^{2}\\)Hyundai Motor Company, \\(^{3}\\)Naver AI Lab" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.242, + 0.837, + 0.257 + ], + "angle": 0, + "content": "ch0104@ajou.ac.kr, chldusxkr@hyundai.com, bkw0622@ajou.ac.kr, wjhwang@ajou.ac.kr" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.292, + 0.314, + 0.308 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.324, + 0.474, + 0.657 + ], + "angle": 0, + "content": "Point-cloud based 3D object detectors recently have achieved remarkable progress. However, most studies are limited to the development of network architectures for improving only their accuracy without consideration of the computational efficiency. In this paper, we first propose an autoencoder-style framework comprising channel-wise compression and decompression via interchange transfer-based knowledge distillation. To learn the map-view feature of a teacher network, the features from teacher and student networks are independently passed through the shared autoencoder; here, we use a compressed representation loss that binds the channel-wised compression knowledge from both student and teacher networks as a kind of regularization. The decompressed features are transferred in opposite directions to reduce the gap in the interchange reconstructions. Lastly, we present an head attention loss to match the 3D object detection information drawn by the multi-head self-attention mechanism. Through extensive experiments, we verify that our method can train the lightweight model that is well-aligned with the 3D point cloud detection task and we demonstrate its superiority using the well-known public datasets; e.g., Waymo and nuScenes." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.687, + 0.21, + 0.702 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.712, + 0.47, + 0.864 + ], + "angle": 0, + "content": "Convolutional neural network (CNN)-based 3D object detection methods using point clouds [13] [35] [36] [43] [49] have attracted wide attention based on their outstanding performance for self-driving cars. Recent CNN-based works have required more computational complexity to achieve higher precision under the various wild situation. Some studies [23] [36] [43] have proposed methods to improve the speed of 3D object detection through which the non-maximum suppression (NMS) or anchor procedures are removed but the network parameters are still large." + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.294, + 0.87, + 0.534 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.537, + 0.895, + 0.689 + ], + "angle": 0, + "content": "Figure 1. Performance comparison between teacher and student networks for a point-cloud based 3D object detection. The top example images are qualitatively compared between the results of teacher, student and our networks. Specifically, the first row images are an input sample with labels and the center heatmap head of the teacher network. The second row examples are responses of teacher, student, and ours for the yellow circle on the heatmap (or the blue dash circle on the input). The bottom image quantitatively shows the computational complexity and the corresponding accuracy of teacher, student and our networks, respectively. Best viewed in color." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.704, + 0.895, + 0.903 + ], + "angle": 0, + "content": "Knowledge distillation (KD) is one of the parameter compression techniques, which can effectively train a compact student network through the guidance of a deep teacher network, as shown in the example images of Fig. 1. Starting with Hinton's work [9], many KD studies [10] [20] [28] [44] have transferred the discriminative teacher knowledge to the student network for classification tasks. From the viewpoint of the detection task, KD should be extended to the regression problem, including the object locations, which is not easy to straight-forwardly apply the classification-based KD methods to the detection task. To alleviate this problem, KD methods for object detection have been developed for mimicking the output of the backbone network [15] (e.g., region" + }, + { + "type": "page_footnote", + "bbox": [ + 0.076, + 0.875, + 0.47, + 0.9 + ], + "angle": 0, + "content": "1Our code is available at https://github.com/hyeon-jo/interchange-transfer-KD." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.958 + ], + "angle": 0, + "content": "13540" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.182 + ], + "angle": 0, + "content": "proposal network) or individual detection head [2] [32]. Nevertheless, these methods have only been studied for detecting 2D image-based objects, and there is a limit to applying them to sparse 3D point cloud-based data that have not object-specific color information but only 3D position-based object structure information." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.186, + 0.471, + 0.459 + ], + "angle": 0, + "content": "Taking a closer look at differences between 2D and 3D data, there is a large gap in that 2D object detection usually predicts 2D object locations based on inherent color information with the corresponding appearances, but 3D object detection estimates 3D object boxes from inputs consisting of only 3D point clouds. Moreover, the number of the point clouds constituting objects varies depending on the distances and presence of occlusions [42]. Another challenge in 3D object detection for KD is that, compared to 2D object detection, 3D object detection methods [4] [6] [43] [21] have more detection head components such as 3D boxes, and orientations. These detection heads are highly correlated with each other and represent different 3D characteristics. In this respect, when transferring the detection heads of the teacher network to the student network using KD, it is required to guide the distilled knowledge under the consideration of the correlation among the multiple detection head components." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.464, + 0.471, + 0.901 + ], + "angle": 0, + "content": "In this paper, we propose a novel interchange transfer-based KD (itKD) method designed for the lightweight point-cloud based 3D object detection. The proposed itKD comprises two modules: (1) a channel-wise autoencoder based on the interchange transfer of reconstructed knowledge and (2) a head relation-aware self-attention on multiple 3D detection heads. First of all, through a channel-wise compressing and decompressing processes for KD, the interchange transfer-based autoencoder effectively represents the map-view features from the viewpoint of 3D representation centric-knowledge. Specifically, the encoder provides an efficient representation by compressing the map-view feature in the channel direction to preserve the spatial positions of the objects and the learning of the student network could be regularized by the distilled position information of objects in the teacher network. For transferring the interchange knowledge to the opposite networks, the decoder of the student network reconstructs the map-view feature under the guidance of the teacher network while the reconstruction of the teacher network is guided by the map-view feature of the student network. As a result, the student network can effectively learn how to represent the 3D map-view feature of the teacher. Furthermore, to refine the teacher's object detection results as well as its representation, our proposed head relation-aware self-attention gives a chance to learn the pivotal information that should be taught to the student network for improving the 3D detection results by considering the inter-head relation among the multiple detection head and the intra-head relation of" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.092, + 0.697, + 0.106 + ], + "angle": 0, + "content": "the individual detection head." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.107, + 0.892, + 0.242 + ], + "angle": 0, + "content": "In this way, we implement a unified KD framework to successfully learn the 3D representation and 3D detection results of the teacher network for the lightweight 3D point cloud object detection. We also conduct extensive ablation studies for thoroughly validating our approach in Waymo and nuScenes datasets. The results reveal the outstanding potential of our approach for transferring distilled knowledge that can be utilized to improve the performance of 3D point cloud object detection models." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.244, + 0.822, + 0.258 + ], + "angle": 0, + "content": "Our contributions are summarized as follows:" + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.264, + 0.892, + 0.353 + ], + "angle": 0, + "content": "- For learning the 3D representation-centric knowledge from the teacher network, we propose the channelwise autoencoder regularized in the compressed domain and the interchange knowledge transfer method wherein the reconstructed features are guided by the opposite networks." + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.354, + 0.892, + 0.429 + ], + "angle": 0, + "content": "- For detection head-centric knowledge of the teacher, we suggest the head relation-aware self-attention which can efficiently distill the detection properties under the consideration of the inter-head relation and intra-head relation of the multiple 3D detection heads." + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.43, + 0.892, + 0.505 + ], + "angle": 0, + "content": "- Our work is the best attempt to reduce the parameters of point cloud-based 3D object detection using KD. Additionally, we validate its superiority using two large datasets that reflect real-world driving conditions, e.g., Waymo and NuScenes." + }, + { + "type": "list", + "bbox": [ + 0.519, + 0.264, + 0.892, + 0.505 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.52, + 0.65, + 0.535 + ], + "angle": 0, + "content": "2. Related Works" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.545, + 0.86, + 0.561 + ], + "angle": 0, + "content": "2.1. 3D Object Detection based on Point Cloud" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.569, + 0.892, + 0.901 + ], + "angle": 0, + "content": "During the last few years, encouraged by the success of CNNs, the development of object detectors using CNNs is developing rapidly. Recently, many 3D object detectors have been studied and they can be briefly categorized by how they extract representations from point clouds; e.g., grid-based [35] [36] [49] [13] [43], point-based [18] [23] [17] [25] [39] and hybrid-based [3] [40] [8] [48] [22] methods. In detail, Vote3Deep [5] thoroughly exploited feature-centric voting to build CNNs for detecting objects in point clouds. In [29], they have studied on the task of amodal 3D object detection in RGB-D images, where a 3D region proposal network (RPN) to learn objectness from geometric shapes and the joint object recognition network to extract geometric features in 3D and color features in 2D. The 3D fully convolutional network [14] was straightforwardly applied to point cloud data for vehicle detection. In the early days, VoxelNet [49] has designed an end-to-end trainable detector based on learning-based voxelization using fully connected layers. In [35], they encoded the point cloud by VoxelNet and used the sparse convolution to achieve the fast detection. HVNet [41] fused the multi-scale voxel feature encoder at the point-wise level and projected into multi-" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "13541" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.471, + 0.196 + ], + "angle": 0, + "content": "ple pseudo-image feature maps for solving the various sizes of the feature map. In [26], they replaced the point cloud with a grid-based bird's-eye view (BEV) RGB-map and utilized YOLOv2 to detect the 3D objects. FIXOR [36] converted the point cloud to a 3D BEV map and carried out the real-time 3D object detection with an RPN-free single-stage based model." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.198, + 0.471, + 0.38 + ], + "angle": 0, + "content": "Recently, PointPillars (PP)-based method [13] utilized the PointNet [19] to learn the representation of point clouds organized in vertical columns for achieving the fast 3D object detection. To boost both performance and speed over PP, a pillar-based method [33] that incorporated a cylindrical projection into multi-view feature learning was proposed. More recently, CenterPoint [43] was introduced as an anchor-free detector that predicted the center of an object using a PP or VoxelNet-based feature encoder. In this paper, we employ the backbone architecture using CenterPoint because it is simple, near real-time, and achieves good performance in the wild situation." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.392, + 0.29, + 0.408 + ], + "angle": 0, + "content": "2.2. Knowledge Distillation" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.415, + 0.47, + 0.687 + ], + "angle": 0, + "content": "KD is one of the methods used for compressing deep neural networks and its fundamental key is to imitate the knowledge extracted from the teacher network, which has heavy parameters as well as good accuracy. Hinton et al. [9] performed a knowledge transfer using KL divergence; FitNet [20] proposed a method for teaching student networks by imitating intermediate layers. On the other hand, TAKD [16] and DGKD [28] used multiple teacher networks for transferring more knowledge to the student network in spite of large parameter gaps. Recently, some studies have been proposed using the layers shared between the teacher and the student networks for KD. Specifically, in [37], KD was performed through softmax regression as the student and teacher networks shared the same classifier. IEKD [10] proposed a method to split the student network into inheritance and exploration parts and mimic the compact teacher knowledge through a shared latent feature space via an autoencoder." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.688, + 0.47, + 0.853 + ], + "angle": 0, + "content": "Beyond its use in classification, KD for detection should transfer the regression knowledge regarding the positions of the objects to the student network. For this purpose, a KD for 2D object detection [15] was first proposed using feature map mimic learning. In [2], they transferred the detection knowledge of the teacher network using hint learning for an RPN, weighted cross-entropy loss for classification, and bound regression loss for regression. Recently, Wang et al. [32] proposed a KD framework for detection by utilizing the cross-location discrepancy of feature responses through fine-grained feature imitation." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.856, + 0.47, + 0.901 + ], + "angle": 0, + "content": "As far as we know, there are few KD studies [7] [47] [34] [38] on point cloud-based 3D object detection so far. However, looking at similar studies on 3D knowledge trans" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.318 + ], + "angle": 0, + "content": "fer, SE-SSD [47] presented a knowledge distillation-based self-ensembling method for exploiting soft and hard targets with constraints to jointly optimize the model without extra computational cost during inference time. Object-DGCNN [34] proposed a NMS-free 3D object detection via dynamic graphs and a set-to-set distillation. They used the set-to-set distillation method for improving the performance without the consideration of the model compression. Another latest study is SparseKD [38] which suggested a label KD method that distills a few pivotal positions determined by teacher classification response to enhance the logit KD method. On the other hand, in this paper, we are more interest in how to make the student network lighter, or lower computational complexity, by using the KD for 3D object detection." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.333, + 0.634, + 0.35 + ], + "angle": 0, + "content": "3. Methodology" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.358, + 0.631, + 0.373 + ], + "angle": 0, + "content": "3.1. Background" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.381, + 0.892, + 0.548 + ], + "angle": 0, + "content": "The 3D point cloud object detection methods [13] [49] generally consists of three components; a point cloud encoder, a backbone network, and detection heads. In this paper, we employ CenterPoint [43] network as a backbone architecture. Since the parameter size of the backbone network is the largest among components of the 3D object detector, we aim to construct the student network by reducing the channel sizes of the backbone network for efficient network. We design our method to teach the student 3D representation-centric knowledge and detection head-centric knowledge of the teacher network, respectively." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.558, + 0.699, + 0.573 + ], + "angle": 0, + "content": "3.2. Interchange Transfer" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.581, + 0.892, + 0.792 + ], + "angle": 0, + "content": "We adopt an autoencoder framework to effectively transfer the meaningful distilled knowledge regarding 3D detection from the teacher to the student network. The traditional encoder-based KD methods [10] [11] have been limited to the classification task, which transfers only compressed categorical knowledge to the student network. However, from the viewpoint of the detection task, the main KD goal of this paper is transferring the distilled knowledge regarding not only categorical features but also object location-related features. Particularly, unlike 2D detectors, 3D object detectors should regress more location information such as object orientations, 3D box sizes, etc., and it results in increasing the importance of how to transfer the 3D location features to the student network successfully." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.793, + 0.892, + 0.854 + ], + "angle": 0, + "content": "For this purpose, we transfer the backbone knowledge that contains 3D object representation from the teacher network to the student through the compressed and reconstructed knowledge domains. As shown in Fig. 2, we in" + }, + { + "type": "page_footnote", + "bbox": [ + 0.5, + 0.863, + 0.892, + 0.901 + ], + "angle": 0, + "content": "2The total parameter size of the 3D detector is about \\(5.2\\mathrm{M}\\) and the backbone size is approximately \\(4.8\\mathrm{M}\\), which is \\(92\\%\\). Further details are found in the supplementary material." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "13542" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.161, + 0.092, + 0.813, + 0.382 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.386, + 0.895, + 0.471 + ], + "angle": 0, + "content": "Figure 2. Overview of the proposed knowledge distillation method. The teacher and student networks take the same point clouds as inputs. Then, the map-view features \\( M^t \\) and \\( M^s \\) are extracted from the teacher and student networks, respectively. The channel-wise autoencoder transfers the knowledge obtained from \\( M^t \\) to \\( M^s \\) by using the compressed representation loss and interchange transfer loss consecutively. The head relation-aware self-attention provides the relation-aware knowledge of multiple detection head to the student network using the attention head loss. The dotted lines of the modules denote that there are shared network parameters between the teacher and student networks. The light-yellow boxes are buffer layers for sampling the features to match the channel sizes of networks." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.482, + 0.473, + 0.679 + ], + "angle": 0, + "content": "produce a channel-wise autoencoder which consists of an encoder in which the channel dimension of the autoencoder is gradually decreased and a decoder in the form of increasing the channel dimension. Note that spatial features play a pivotal role in the detection task and we try to preserve the spatial information by encoding features in the channel direction. We propose a compressed representation loss to coarsely guide location information of the objects to the student network in Fig. 2, and the compressed representation loss has an effect similar to the regularization of the autoencoder that binds the coordinates of the objectness between the teacher and student networks. The compressed representation loss function \\(\\mathcal{L}_{cr}\\) is represented as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.124, + 0.688, + 0.47, + 0.726 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {L} _ {c r} = m _ {o b j} \\circ \\mathcal {S} \\left[ E \\left(\\theta_ {e n c}, M ^ {t}\\right), E \\left(\\theta_ {e n c}, M ^ {s}\\right) \\right] \\tag {1} \\\\ = m _ {o b j} \\circ \\mathcal {S} \\left[ M _ {e n c} ^ {t}, M _ {e n c} ^ {s} \\right], \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.735, + 0.47, + 0.825 + ], + "angle": 0, + "content": "where \\( E \\) is a shared encoder, which has the parameters \\( \\theta_{enc} \\), and \\( S \\) denotes \\( l_1 \\) loss as a similarity measure. \\( M^t \\) and \\( M^s \\) are outputs of the teacher and student backbones, respectively. \\( m_{obj} \\) represents a binary mask to indicate object locations in backbone output like [38] and \\( \\circ \\) is an element-wise product." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.826, + 0.471, + 0.902 + ], + "angle": 0, + "content": "After performing the coarse representation-based knowledge distillation in a compressed domain, the fine representation features of the teacher network are required to teach the student network from the viewpoint of 3D object detection. In this respect, the decoder reconstructs the fine map" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.482, + 0.895, + 0.664 + ], + "angle": 0, + "content": "view features in the channel direction from the compressed features. Through the proposed interchange transfer loss, the reconstructed features are guided from the opposite networks, not their own stem networks, as shown in Fig. 2. Specifically, since the teacher network is frozen and we use the shared autoencoder for both student and teacher networks, we can teach the reconstructed fine features from the student network to resemble the output of the teacher network \\( M^t \\) rather than the student \\( M^s \\). Moreover, the reconstructed fine features from the teacher network can guide the student's output, \\( M^s \\) at the same time. The proposed interchange transfer loss \\( \\mathcal{L}_{it} \\) is defined as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.587, + 0.675, + 0.892, + 0.693 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {t \\rightarrow s} = \\mathcal {S} \\left[ M ^ {s}, D \\left(\\theta_ {\\text {d e c}}, M _ {\\text {e n c}} ^ {t}\\right)\\right], \\tag {2}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.588, + 0.701, + 0.891, + 0.719 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {s \\rightarrow t} = \\mathcal {S} \\left[ M ^ {t}, D \\left(\\theta_ {\\text {d e c}}, M _ {\\text {e n c}} ^ {s}\\right)\\right], \\tag {3}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.625, + 0.723, + 0.892, + 0.739 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {i t} = \\mathcal {L} _ {s \\rightarrow t} + \\mathcal {L} _ {t \\rightarrow s}, \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.747, + 0.894, + 0.837 + ], + "angle": 0, + "content": "where \\(D\\) is the decoder that contains the network parameter \\(\\theta_{dec}\\), which is a shared parameter. We hereby present the representation-based KD for 3D object detection in both compressed and decompressed domains to guide the student network to learn the map-view feature of the teacher network efficiently." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.847, + 0.816, + 0.862 + ], + "angle": 0, + "content": "3.3. Head Relation-Aware Self-Attention" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.871, + 0.893, + 0.902 + ], + "angle": 0, + "content": "Fundamentally, our backbone network, e.g., Center-Point [43], has various types of 3D object characteristics" + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "13543" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.164, + 0.092, + 0.812, + 0.288 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.075, + 0.291, + 0.895, + 0.364 + ], + "angle": 0, + "content": "Figure 3. Head Relation-Aware Self-Attention. We make the object center-head feature from object center locations in the detection head feature and use it as different shaped inputs to self-attention for inter-head relation and intra-head relation. In the self-attention for inter-head relation, we use the object center-head feature as an input for the self-attention. In the self-attention for intra-head relation, the detection heads are separately used for the independent self-attention functions. The outputs of the self-attention are concatenated by \\( \\mathbb{C} \\) operations and the head relation-aware self-attention is generated through the fusion layer." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.373, + 0.473, + 0.522 + ], + "angle": 0, + "content": "on detection heads. Specifically, the locations, size, and direction of an object are different properties, but they are inevitably correlated to each other because they come from the same object. However, the traditional KD methods [2] [34] were only concerned with how the student network straight-forwardly mimicked the outputs of the teacher network without considering the relation among the detection heads. To overcome this problem, we make use of the relation of detection heads as a major factor for the detection head-centric KD." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.524, + 0.473, + 0.768 + ], + "angle": 0, + "content": "Our proposed head relation-aware self-attention is directly inspired by the multi-head self-attention [31] in order to learn the relation between the multiple detection head. As shown in Fig. 3, we first extract \\(i\\)-th instance feature \\(v^{i} \\in \\mathbb{R}^{c}\\), where \\(c\\) is the channel size, from the center location of the object in the detection head feature. Note that, since the instance feature is extracted from the multiple detection head, it has several object properties such as a class-specific heatmap \\(v_{hm}^{i}\\), a sub-voxel location refinement \\(v_{o}^{i}\\), a height-above-ground \\(v_{h}^{i}\\), a 3D size \\(v_{s}^{i}\\), and a yaw rotation angle \\(v_{r}^{i}\\). When there are a total of \\(n\\) objects, we combine them to make an object center-head feature \\(v \\in \\mathbb{R}^{n \\times c}\\). We use the same object center-head feature \\(v\\) of dimension \\(n\\) for query, key, and value, which are an input of the scaled dot-product attention. The self-attention function \\(\\mathcal{F}\\) is computed by" + }, + { + "type": "equation", + "bbox": [ + 0.17, + 0.778, + 0.47, + 0.814 + ], + "angle": 0, + "content": "\\[\n\\mathcal {F} (v) = \\operatorname {s o f t m a x} \\left(\\frac {v ^ {\\top} \\cdot v}{\\sqrt {n}}\\right) \\cdot v. \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.826, + 0.473, + 0.901 + ], + "angle": 0, + "content": "The proposed head relation-aware self-attention consists of two different self-attention for inter-head and intra-head relations as illustrated in Fig. 3. We propose the self-attention based on the inter-head relation of the instance features, which is made in order to consider the relation" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.373, + 0.892, + 0.432 + ], + "angle": 0, + "content": "between all detected objects and their different properties, rather than a single detected instance, from the global viewpoint. The self-attention for inter-head relation is computed by" + }, + { + "type": "equation", + "bbox": [ + 0.571, + 0.433, + 0.892, + 0.451 + ], + "angle": 0, + "content": "\\[\n\\mathcal {F} _ {\\text {i n t e r}} (v) = \\mathcal {F} ([ v _ {h m}, v _ {o}, v _ {h}, v _ {s}, v _ {r} ]). \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.454, + 0.892, + 0.53 + ], + "angle": 0, + "content": "On the other hand, we suggest the self-attention for intrahead relation using the individual detection heads. Here we perform the attentions using only local relation in individual detection heads designed for different properties (e.g., orientation, size, etc.) and concatenate them. Its equation is" + }, + { + "type": "equation", + "bbox": [ + 0.509, + 0.552, + 0.892, + 0.57 + ], + "angle": 0, + "content": "\\[\n\\mathcal {F} _ {\\text {i n t r a}} (v) = \\left[ \\mathcal {F} \\left(v _ {h m}\\right), \\mathcal {F} \\left(v _ {o}\\right), \\mathcal {F} \\left(v _ {h}\\right), \\mathcal {F} \\left(v _ {s}\\right), \\mathcal {F} \\left(v _ {r}\\right) \\right]. \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.577, + 0.892, + 0.652 + ], + "angle": 0, + "content": "We concatenate the outputs of the self-attention and apply the fusion layer to calculate a final attention score that considers the relation between the detection heads and objects. The head relation-aware self-attention equation \\(\\mathcal{F}_{RA}\\) is derived by:" + }, + { + "type": "equation", + "bbox": [ + 0.569, + 0.66, + 0.892, + 0.678 + ], + "angle": 0, + "content": "\\[\n\\mathcal {F} _ {R A} (v) = \\mathcal {G} \\left(\\left[ \\mathcal {F} _ {\\text {i n t e r}} (v), \\mathcal {F} _ {\\text {i n t r a}} (v) \\right]\\right), \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.685, + 0.892, + 0.745 + ], + "angle": 0, + "content": "where \\(\\mathcal{G}\\) is the fusion layer, e.g., \\(1 \\times 1\\) convolution layer. The student network indirectly takes the teacher's knowledge by learning the relation between the multiple detection head of the teacher network through head attention loss as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.587, + 0.754, + 0.892, + 0.771 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {a t t n}} = \\mathcal {S} \\left(\\mathcal {F} _ {R A} \\left(v _ {t}\\right), \\mathcal {F} _ {R A} \\left(v _ {s}\\right)\\right), \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.778, + 0.891, + 0.808 + ], + "angle": 0, + "content": "where \\( v_{t} \\) and \\( v_{s} \\) are the object center-head features of the teacher and the student, respectively." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.809, + 0.808, + 0.824 + ], + "angle": 0, + "content": "Consequently, the overall loss is derived by" + }, + { + "type": "equation", + "bbox": [ + 0.556, + 0.832, + 0.892, + 0.849 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {t o t a l}} = \\alpha \\mathcal {L} _ {\\text {s u p}} + \\beta \\left(\\mathcal {L} _ {\\text {i t}} + \\mathcal {L} _ {\\text {c r}} + \\mathcal {L} _ {\\text {a t t n}}\\right), \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.856, + 0.892, + 0.901 + ], + "angle": 0, + "content": "where \\(\\mathcal{L}_{sup}\\) is the supervised loss that consists of focal loss and regression loss, and \\(\\alpha\\) and \\(\\beta\\) are the balancing parameters, which we set as 1 for simplicity." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "13544" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.077, + 0.09, + 0.419, + 0.108 + ], + "angle": 0, + "content": "4. Experimental Results and Discussions" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.115, + 0.281, + 0.133 + ], + "angle": 0, + "content": "4.1. Environment Settings" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.139, + 0.47, + 0.336 + ], + "angle": 0, + "content": "Waymo Waymo open dataset [30] is one of the large-scale datasets for autonomous driving, which is captured by the synchronized and calibrated high-quality LiDAR and camera across a range of urban and suburban geographies. This dataset provides 798 training scenes and 202 validation scenes obtained by detecting all the objects within a \\(75\\mathrm{m}\\) radius; it has a total of 3 object categories (e.g., vehicle, pedestrian, and cyclist) which have 6.1M, 2.8M, and 67K sets, respectively. The mean Average Precision (mAP) and mAP weighted by heading accuracy (mAPH) are the official metrics for Waymo evaluation. mAPH is a metric that gives more weight to the heading than it does to the sizes, and it accounts for the direction of the object." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.336, + 0.47, + 0.488 + ], + "angle": 0, + "content": "nuScenes nuScenes dataset [1] is another large-scale dataset used for autonomous driving. This dataset contains 1,000 driving sequences. 700, 150, and 150 sequences are used for training, validation, and testing, respectively. Each sequence is captured approximately 20 seconds with 20 FPS using the 32-lane LiDAR. Its evaluation metrics are the average precision (AP) and nuScenes detection score (NDS). NDS is a weighted average of mAP and true positive metrics which measures the quality of the detections in terms of box location, size, orientation, attributes, and velocity." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.489, + 0.471, + 0.82 + ], + "angle": 0, + "content": "Implementation details Following the pillar-based CenterPoint [43] as the teacher network, we use an Adam optimizer [12] with a weight decay of 0.01 and a cosine annealing strategy [27] to adjust the learning rate. We set 0.0003 for initial learning rate, 0.003 for max learning rate, and 0.95 for momentum. The networks have been trained for 36 epochs on \\(8 \\times \\mathrm{V}100\\) GPUs with a batch size of 32. For Waymo dataset, we set the detection range to \\([-74.88\\mathrm{m}, 74.88\\mathrm{m}]\\) for the X and Y axes, \\([-2\\mathrm{m}, 4\\mathrm{m}]\\) for the Z-axis, and a grid size of \\((0.32\\mathrm{m}, 0.32\\mathrm{m})\\). In experiments on nuScenes dataset, we used a \\((0.2\\mathrm{m}, 0.2\\mathrm{m})\\) grid and set the detection range to \\([-51.2\\mathrm{m}, 51.2\\mathrm{m}]\\) for the X and Y-axes, \\([-5\\mathrm{m}, 3\\mathrm{m}]\\) for the Z-axis, and a grid size of \\((0.2\\mathrm{m}, 0.2\\mathrm{m})\\). Compared to the teacher network, the student network has \\(1/4\\) less channel capacity of backbone network. Our channel-wise autoencoder consists of three \\(1 \\times 1\\) convolution layers as the encoder and three \\(1 \\times 1\\) convolution layers as the decoder and the number of filters are 128, 64, 32 in encoder layers and 64, 128, 384 in decoder layers. The student's input buffer layer increases the channel size of 196 to 384 and the teacher's output buffer layer decreases the channel size 384 to 196." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.832, + 0.407, + 0.849 + ], + "angle": 0, + "content": "4.2. Overall KD Performance Comparison" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.856, + 0.47, + 0.901 + ], + "angle": 0, + "content": "We validate the performance of our method compared with well-known KD methods on the Waymo and nuScenes datasets. We re-implement the seven KD methods from 2D" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.258 + ], + "angle": 0, + "content": "classification-based KD to 3D detection-based KD in this paper. We set the baseline by applying the Kullback-Leibler (KL) divergence loss [9] to the center heatmap head and \\( l_{1} \\) loss to the other regression heads. FitNet [20] is a method that mimics the intermediate outputs of layers and we apply it to the output of the backbone for simplicity. We also simply extend EOD-KD [2], one of the 2D object detection KDs, to 3D object detection. We apply TOFD [45], a 3D classification-based KD, to our detection task and straightforwardly use SE-SSD [47], Object DGCNN [34], and SparseKD [38] for 3D object detection KD." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.259, + 0.893, + 0.516 + ], + "angle": 0, + "content": "Table 1 shows that our method almost outperforms other KD methods on mAP and mAPH values for level 1 and level 2 under all three categories of objects. Especially, our performance improvement of mAPH is better than other methods, which indicates our method guides the student network well where the detected objects are facing. To verify the generality of the proposed method, we make additional comparison results using the nuScenes dataset, another large-scale 3D dataset for autonomous driving, in Table 2. Compared with the other methods, our method achieves the best accuracy under the NDS and mAP metrics in the nuScenes validation set. Specifically, when the student network shows \\(50.24\\%\\) NDS and \\(38.52\\%\\) mAP, our method achieves \\(53.90\\%\\) \\((+3.66\\%)\\) NDS and \\(41.33\\%\\) \\((+2.81\\%)\\) mAP. In detail, our method outperforms the other methods for the most of object classes except the construction vehicle and the bicycle." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.528, + 0.665, + 0.542 + ], + "angle": 0, + "content": "4.3. Ablation Studies" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.552, + 0.892, + 0.687 + ], + "angle": 0, + "content": "To analyze of our proposed method in detail, we conduct ablation studies on the Waymo dataset, and the whole performances are measured by mAPH at level 2 for simplicity. For the qualitative analysis, we visualize the map-view feature at each stage to validate the what kinds of knowledge are transferred from the teacher to the student by the proposed method. For simple visualization, we apply the \\( L_{1} \\) normalization to the map-view feature in the channel direction." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.689, + 0.892, + 0.84 + ], + "angle": 0, + "content": "As shown in Fig. 4, the objects and backgrounds are well activated in the example image of the teacher output. On the other hand, the encoder output is activated by further highlighting the coarse positions of the target objects. When looking at the decoder output, we can see that all the fine surrounding information is represented again. At this point, it is worth noting that compared to the teacher output, the target objects are highlighted a little more. From these visual comparisons, we can infer how our method successfully transfers the object-centered knowledge to the student." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.892, + 0.901 + ], + "angle": 0, + "content": "We explore the buffer layer that matches the channel size of the channel-wise autoencoder without the head attention loss. As shown in Table 3, we compare the three types for the buffer layer: (1) \\( S \\rightarrow T \\) is the upsampling method that in-" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "13545" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.089, + 0.892, + 0.117 + ], + "angle": 0, + "content": "Table 1. Waymo evaluation. Comparisons with different KD methods in the Waymo validation set. The best accuracy is indicated in bold, and the second-best accuracy is underlined." + }, + { + "type": "table", + "bbox": [ + 0.082, + 0.12, + 0.89, + 0.311 + ], + "angle": 0, + "content": "
MethodVehiclePedestrianCyclist
Level 1Level 2Level 1Level 2Level 1Level 2
mAPmAPHmAPmAPHmAPmAPHmAPmAPHmAPmAPHmAPmAPH
Teacher [43]73.7273.1765.6165.1172.4361.7264.7354.9964.3062.6161.9160.28
Student (1/4)64.2263.5656.2155.6263.7253.2256.1446.7853.0151.7250.9949.75
Baseline64.7864.0556.9256.2664.8552.9857.3746.7554.7152.4652.6550.48
FitNet [20]65.1164.3857.2456.5864.8953.2957.3747.0054.9152.6152.8450.63
EOD-KD [2]66.5065.7958.5657.9265.9954.5858.4848.2555.1852.9353.1050.94
SE-SSD [47]65.9565.2258.0557.4065.3953.9857.9247.6955.0152.9852.9450.99
TOFD [45]64.0963.4356.1355.5566.2454.9858.5048.4554.9553.0652.8651.04
Obj. DGCNN [34]66.0765.3859.2758.5565.9854.4459.4249.1154.6552.6253.1350.93
SparseKD [38]65.2564.5956.9756.3867.4454.5459.2447.8355.5453.4553.6351.61
Ours67.4366.7259.4458.8167.2656.0259.7349.6156.0954.2453.9652.19
" + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.322, + 0.892, + 0.35 + ], + "angle": 0, + "content": "Table 2. nuScenes evaluation. Comparisons with different KD methods in the nuScenes validation set. The best accuracy is indicated in bold, and the second-best accuracy is underlined." + }, + { + "type": "table", + "bbox": [ + 0.082, + 0.352, + 0.89, + 0.513 + ], + "angle": 0, + "content": "
MethodNDSmAPcartruckbustrailercon. veh.ped.motor.bicycletr. conebarrier
Teacher [43]60.1650.2584.0453.4864.2931.9012.5078.9344.0118.1854.8760.30
Student (1/4)50.2438.5277.8538.1851.3822.333.9571.5123.903.5143.0349.56
Baseline51.4839.1978.7237.9050.4722.423.5172.2926.254.6544.9150.77
FitNet [20]51.4238.9078.3037.4050.4022.203.8072.1025.704.2544.2050.60
EOD-KD [2]52.4939.8278.4038.6050.9022.703.9073.2028.205.3045.0051.97
SE-SSD [47]52.2139.5378.6938.5649.8123.703.7272.8628.274.2544.2451.18
TOFD [45]52.8840.5779.0639.7352.0324.513.5673.5129.585.6245.3452.79
Obj. DGCNN [34]52.9140.3478.9539.2453.3723.964.1372.9828.634.9944.7252.46
SparseKD [38]53.0140.2678.7839.5051.8723.643.3073.1729.345.7544.9852.26
Ours53.9041.3379.4840.3854.3526.443.5873.9130.215.3945.9053.70
" + }, + { + "type": "image", + "bbox": [ + 0.096, + 0.523, + 0.273, + 0.629 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.159, + 0.633, + 0.21, + 0.645 + ], + "angle": 0, + "content": "(a) Input" + }, + { + "type": "image", + "bbox": [ + 0.274, + 0.523, + 0.449, + 0.629 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.295, + 0.632, + 0.425, + 0.644 + ], + "angle": 0, + "content": "(b) Teacher output \\((M^t)\\)" + }, + { + "type": "image", + "bbox": [ + 0.097, + 0.646, + 0.271, + 0.751 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.132, + 0.755, + 0.236, + 0.767 + ], + "angle": 0, + "content": "(c) Encoder output" + }, + { + "type": "image", + "bbox": [ + 0.274, + 0.646, + 0.448, + 0.751 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.307, + 0.754, + 0.413, + 0.767 + ], + "angle": 0, + "content": "(d) Decoder output" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.777, + 0.47, + 0.833 + ], + "angle": 0, + "content": "Figure 4. Feature visualization on the proposed channel-wise autoencoder. (a) an example input image and (b) the output feature of the teacher network. (c) and (d) are the output images of encoder and decoder of the teacher, respectively." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.84, + 0.473, + 0.901 + ], + "angle": 0, + "content": "creases the student's map-view feature to the teacher's feature. (2) \\( T \\to S \\) is the downsampling method that decreases the teacher's feature to the student's feature. (3) \\( (S + T) / 2 \\) is that the teacher's feature is downsampled and the stu" + }, + { + "type": "table_caption", + "bbox": [ + 0.549, + 0.524, + 0.843, + 0.537 + ], + "angle": 0, + "content": "Table 3. Buffer layer for different channel size." + }, + { + "type": "table", + "bbox": [ + 0.513, + 0.539, + 0.882, + 0.607 + ], + "angle": 0, + "content": "
MethodVehiclePedestrianCyclistAvg.
S → T58.4148.9051.9053.07
T → S58.6248.7851.7553.05
(S + T) / 258.4748.8451.5452.95
" + }, + { + "type": "table_caption", + "bbox": [ + 0.5, + 0.616, + 0.892, + 0.643 + ], + "angle": 0, + "content": "Table 4. Effect of shared and non-shared parameters for the autoencoder." + }, + { + "type": "table", + "bbox": [ + 0.508, + 0.645, + 0.885, + 0.698 + ], + "angle": 0, + "content": "
MethodVehiclePedestrianCyclistAvg.
Non-shared56.2645.8548.2350.11
Shared58.4148.9051.9053.07
" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.719, + 0.892, + 0.763 + ], + "angle": 0, + "content": "dent's feature is upsampled to the median size. The experiments show that the upsampling method performs better when considering all the classes." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.765, + 0.892, + 0.854 + ], + "angle": 0, + "content": "In Table 4, we observe the performance difference when the autoencoder parameters are shared or not. From the result, we can conclude that the shared parameters achieve better performance because what we want to is for the student to learn the teacher's knowledge, not the independent model." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.856, + 0.892, + 0.9 + ], + "angle": 0, + "content": "We investigate improvements made by our interchange transfer for KD without the head attention loss as shown in Table 5. Self-reconstruction is a method wherein the de" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.946, + 0.52, + 0.957 + ], + "angle": 0, + "content": "13546" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.089, + 0.47, + 0.117 + ], + "angle": 0, + "content": "Table 5. Comparison of different reconstruction methods for the autoencoder." + }, + { + "type": "table", + "bbox": [ + 0.079, + 0.118, + 0.47, + 0.173 + ], + "angle": 0, + "content": "
MethodVehiclePedestrianCyclistAvg.
Self Recon.56.5747.2650.2951.37
Ours58.4148.9051.9053.07
" + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.183, + 0.469, + 0.224 + ], + "angle": 0, + "content": "Table 6. Comparison of KD methods for the multiple detection head. KL loss and \\( l_{1} \\) loss denote that directly apply the loss function to all detection heads for KD." + }, + { + "type": "table", + "bbox": [ + 0.079, + 0.226, + 0.469, + 0.375 + ], + "angle": 0, + "content": "
MethodVehiclePedestrianCyclistAvg.
Student55.6246.7849.7550.72
Baseline56.2646.7550.4851.16
KL loss [9]55.9245.0847.4949.50
l1loss55.6245.1048.7349.82
AT [44]56.8547.3450.3651.52
Linter56.4146.9050.9051.40
Lintra57.2047.1951.2351.87
Lattn57.1047.3451.7952.08
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.383, + 0.469, + 0.488 + ], + "angle": 0, + "content": "coder uses the corresponding input for the reconstruction and our interchange reconstruction is a method wherein the proposed \\(\\mathcal{L}_{it}\\) objective transfers the reconstructed knowledge to the opponent network. Our interchange transfer-based reconstruction achieves better results and note that our main task is not the reconstruction but the 3D object-based knowledge transfer for KD." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.49, + 0.469, + 0.672 + ], + "angle": 0, + "content": "3D detection [4] [6] [43] [21] has the multiple detection head. To prove the superiority of the proposed head attention objective for 3D object detection, we make the KD comparison results against only multiple detection head without the autoencoder, as shown in Table 6. Since the heatmap head classifies objects and other heads regress 3D bounding box information, Applying KL loss and \\( l_{1} \\) loss to all detection heads has a negative effect. However, it is required to consider the relation of detection heads. In this respect, our method achieves better performance than the other KD methods which directly mimic the output of detection heads or simply employ attention mechanism." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.673, + 0.469, + 0.838 + ], + "angle": 0, + "content": "Table 7 shows the overall effect of the proposed losses on the KD performances. We set up the experiments by adding each loss based on the supervised loss \\(\\mathcal{L}_{\\mathrm{sup}}\\). Specifically, the interchange transfer loss \\(\\mathcal{L}_{it}\\) improves on an average of \\(1.41\\%\\) mAPH and the compressed representation loss \\(\\mathcal{L}_{cr}\\) leads to a \\(0.94\\%\\) performance improvement. In the end, the head attention loss \\(\\mathcal{L}_{\\mathrm{attn}}\\) helps to improve the performance and the final average mAPH is \\(53.54\\%\\). We conclude that each proposed loss contributes positively to performance improvement in the 3D object detection-based KD task." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.84, + 0.469, + 0.901 + ], + "angle": 0, + "content": "From Table 8, we observed quantitative comparisons of the computational complexity between the student network and the teacher network. Specifically, the student network, which reduced the channel by \\(1/4\\), decreased about" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.089, + 0.892, + 0.117 + ], + "angle": 0, + "content": "Table 7. Ablation results from investigating effects of different components." + }, + { + "type": "table", + "bbox": [ + 0.502, + 0.118, + 0.892, + 0.187 + ], + "angle": 0, + "content": "
LsupLitLcrLattnVehiclePedestrianCyclistAvg.
55.6246.7849.7550.72
57.4148.2050.7752.13
58.4148.9051.9053.07
58.8149.6152.1953.54
" + }, + { + "type": "table_caption", + "bbox": [ + 0.5, + 0.196, + 0.892, + 0.224 + ], + "angle": 0, + "content": "Table 8. Quantitative evaluation for model efficiency on Waymo dataset." + }, + { + "type": "table", + "bbox": [ + 0.503, + 0.226, + 0.892, + 0.351 + ], + "angle": 0, + "content": "
MethodParams (M)FLOPS (G)mAPH / L2
PointPillars [13]4.8255.057.05
SECOND [35]5.384.557.23
Part-A2[24]4.687.157.43
IA-SSD [46]2.746.158.08
SparseKD-v0.64 [38]5.285.158.89
Teacher [43]5.2333.960.13
Ours: Student (1/2)1.5130.159.04
Ours: Student (1/4)0.645.153.54
" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.366, + 0.892, + 0.548 + ], + "angle": 0, + "content": "8.6 times compared to the parameters of the teacher, and FLOPS was reduced by 7.4 times. Above all, we should not overlook the fact that the performance of the student improved from \\(50.72\\%\\) to \\(53.54\\%\\) mAPH/L2 by our KD method. Furthermore, we apply our method to the student whose channel was reduced by half. The student's performance increases to \\(59.04\\%\\), and the parameters and FLOPS compared to the teacher are reduced by 3.5 times and 2.6 times, respectively. Compared to lightweight network-based methods [13] [35] [24] [46], our student networks are able to derive stable performance with fewer parameters and FLOPS in 3D object detection." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.565, + 0.618, + 0.58 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.592, + 0.892, + 0.802 + ], + "angle": 0, + "content": "In this paper, we propose a novel KD method that transfers knowledge to produce a lightweight point cloud detector. Our main method involves interchange transfer, which learns coarse knowledge by increasing the similarity of the compressed feature and fine knowledge by decompressing the map-view feature of the other side using the channel-wise autoencoder. Moreover, we introduce a method to guide multiple detection head using head relation-aware self-attention, which refines knowledge by considering the relation of instances and properties. Ablation studies demonstrate the effectiveness of our proposed algorithm, and extensive experiments on the two large-scale open datasets verify that our proposed method achieves competitive performance against state-of-the-art methods." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.803, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Acknowledgement. This work was partly supported by NRF-2022R1A2C1091402, BK21 FOUR program of the NRF of Korea funded by the Ministry of Education (NRF5199991014091), and IITP grant funded by the Korea government(MSIT) (No.2021-0-00951, Development of Cloud based Autonomous Driving AI learning Software; No. 2021-0-02068, Artificial Intelligence Innovation Hub). W. Hwang is the corresponding author." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "13547" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.079, + 0.09, + 0.176, + 0.107 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.473, + 0.198 + ], + "angle": 0, + "content": "[1] Holger Caesar, Varun Bankiti, Alex H Lang, Sourabh Vora, Venice Erin Liong, Qiang Xu, Anush Krishnan, Yu Pan, Giancarlo Baldan, and Oscar Beijbom. nuscenes: A multimodal dataset for autonomous driving. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11621-11631, 2020. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.2, + 0.472, + 0.255 + ], + "angle": 0, + "content": "[2] Guobin Chen, Wongun Choi, Xiang Yu, Tony Han, and Manmohan Chandraker. Learning efficient object detection models with knowledge distillation. Advances in neural information processing systems, 30, 2017. 2, 3, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.256, + 0.471, + 0.298 + ], + "angle": 0, + "content": "[3] Yilun Chen, Shu Liu, Xiaoyong Shen, and Jiaya Jia. Fast point r-cnn. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9775-9784, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.299, + 0.471, + 0.368 + ], + "angle": 0, + "content": "[4] Xiyang Dai, Yinpeng Chen, Bin Xiao, Dongdong Chen, Mengchen Liu, Lu Yuan, and Lei Zhang. Dynamic head: Unifying object detection heads with attentions. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7373-7382, 2021. 2, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.368, + 0.471, + 0.45 + ], + "angle": 0, + "content": "[5] Martin Engelcke, Dushyant Rao, Dominic Zeng Wang, Chi Hay Tong, and Ingmar Posner. Vote3deep: Fast object detection in 3d point clouds using efficient convolutional neural networks. In 2017 IEEE International Conference on Robotics and Automation (ICRA), pages 1355-1361. IEEE, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.452, + 0.471, + 0.507 + ], + "angle": 0, + "content": "[6] Runzhou Ge, Zhuangzhuang Ding, Yihan Hu, Yu Wang, Sijia Chen, Li Huang, and Yuan Li. Afdet: Anchor free one stage 3d object detection. arXiv preprint arXiv:2006.12671, 2020. 2, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.508, + 0.471, + 0.577 + ], + "angle": 0, + "content": "[7] Xiaoyang Guo, Shaoshuai Shi, Xiaogang Wang, and Hongsheng Li. Liga-stereo: Learning lidar geometry aware representations for stereo-based 3d detector. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3153-3163, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.579, + 0.471, + 0.647 + ], + "angle": 0, + "content": "[8] Chenhang He, Hui Zeng, Jianqiang Huang, Xian-Sheng Hua, and Lei Zhang. Structure aware single-stage 3d object detection from point cloud. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11873-11882, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.649, + 0.471, + 0.69 + ], + "angle": 0, + "content": "[9] Geoffrey Hinton, Oriol Vinyals, Jeff Dean, et al. Distilling the knowledge in a neural network. arXiv preprint arXiv:1503.02531, 2(7), 2015. 1, 3, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.691, + 0.472, + 0.773 + ], + "angle": 0, + "content": "[10] Zhen Huang, Xu Shen, Jun Xing, Tongliang Liu, Xinmei Tian, Houqiang Li, Bing Deng, Jianqiang Huang, and Xian-Sheng Hua. Revisiting knowledge distillation: An inheritance and exploration framework. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3579-3588, 2021. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.775, + 0.471, + 0.829 + ], + "angle": 0, + "content": "[11] Jangho Kim, SeongUk Park, and Nojun Kwak. Paraphrasing complex network: Network compression via factor transfer. Advances in neural information processing systems, 31, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.831, + 0.471, + 0.871 + ], + "angle": 0, + "content": "[12] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.873, + 0.471, + 0.903 + ], + "angle": 0, + "content": "[13] Alex H Lang, Sourabh Vora, Holger Caesar, Lubing Zhou, Jiong Yang, and Oscar Beijbom. Pointpillars: Fast encoders" + }, + { + "type": "list", + "bbox": [ + 0.079, + 0.115, + 0.473, + 0.903 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.895, + 0.135 + ], + "angle": 0, + "content": "for object detection from point clouds. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12697-12705, 2019. 1, 2, 3, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.136, + 0.892, + 0.189 + ], + "angle": 0, + "content": "[14] Bo Li. 3d fully convolutional network for vehicle detection in point cloud. In 2017 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pages 1513-1518. IEEE, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.191, + 0.892, + 0.245 + ], + "angle": 0, + "content": "[15] Quanquan Li, Shengying Jin, and Junjie Yan. Mimicking very efficient network for object detection. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 6356-6364, 2017. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.247, + 0.892, + 0.315 + ], + "angle": 0, + "content": "[16] Seyed Iman Mirzadeh, Mehrdad Farajtabar, Ang Li, Nir Levine, Akihiro Matsukawa, and Hassan Ghasemzadeh. Improved knowledge distillation via teacher assistant. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 5191-5198, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.317, + 0.892, + 0.384 + ], + "angle": 0, + "content": "[17] Jiquan Ngiam, Benjamin Caine, Wei Han, Brandon Yang, Yuning Chai, Pei Sun, Yin Zhou, Xi Yi, Ouais Alsharif, Patrick Nguyen, et al. Starnet: Targeted computation for object detection in point clouds. arXiv preprint arXiv:1908.11069, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.385, + 0.892, + 0.442 + ], + "angle": 0, + "content": "[18] Charles R Qi, Wei Liu, Chenxia Wu, Hao Su, and Leonidas J Guibas. Frustum pointnets for 3d object detection from rgb data. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 918-927, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.442, + 0.892, + 0.509 + ], + "angle": 0, + "content": "[19] Charles R Qi, Hao Su, Kaichun Mo, and Leonidas J Guibas. Pointnet: Deep learning on point sets for 3d classification and segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 652-660, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.511, + 0.892, + 0.566 + ], + "angle": 0, + "content": "[20] Adriana Romero, Nicolas Ballas, Samira Ebrahimi Kahou, Antoine Chassang, Carlo Gatta, and Yoshua Bengio. Fitnets: Hints for thin deep nets. arXiv preprint arXiv:1412.6550, 2014. 1, 3, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.567, + 0.892, + 0.637 + ], + "angle": 0, + "content": "[21] Danila Rukhovich, Anna Vorontsova, and Anton Konushin. Imvoxelnet: Image to voxels projection for monocular and multi-view general-purpose 3d object detection. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 2397-2406, 2022. 2, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.638, + 0.892, + 0.706 + ], + "angle": 0, + "content": "[22] Shaoshuai Shi, Chaoxu Guo, Li Jiang, Zhe Wang, Jianping Shi, Xiaogang Wang, and Hongsheng Li. Pv-rcnn: Pointvoxel feature set abstraction for 3d object detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10529–10538, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.707, + 0.892, + 0.773 + ], + "angle": 0, + "content": "[23] Shaoshuai Shi, Xiaogang Wang, and Hongsheng Li. Pointrcnn: 3d object proposal generation and detection from point cloud. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 770-779, 2019. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.775, + 0.892, + 0.845 + ], + "angle": 0, + "content": "[24] Shaoshuai Shi, Zhe Wang, Jianping Shi, Xiaogang Wang, and Hongsheng Li. From points to parts: 3d object detection from point cloud with part-aware and part-aggregation network. IEEE transactions on pattern analysis and machine intelligence, 43(8):2647-2664, 2020. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.846, + 0.892, + 0.903 + ], + "angle": 0, + "content": "[25] Weijing Shi and Raj Rajkumar. Point-gnn: Graph neural network for 3d object detection in a point cloud. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 1711-1719, 2020. 2" + }, + { + "type": "list", + "bbox": [ + 0.504, + 0.093, + 0.895, + 0.903 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "13548" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.161 + ], + "angle": 0, + "content": "[26] Martin Simony, Stefan Milzy, Karl Amendey, and Horst-Michael Gross. Complex-yolo: An euler-region-proposal for real-time 3d object detection on point clouds. In Proceedings of the European Conference on Computer Vision (ECCV) Workshops, pages 0–0, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.163, + 0.47, + 0.207 + ], + "angle": 0, + "content": "[27] Leslie N Smith. Cyclical learning rates for training neural networks. In 2017 IEEE winter conference on applications of computer vision (WACV), pages 464-472. IEEE, 2017. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.207, + 0.47, + 0.275 + ], + "angle": 0, + "content": "[28] Wonchul Son, Jaemin Na, Junyong Choi, and Wonjun Hwang. Densely guided knowledge distillation using multiple teacher assistants. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9395-9404, 2021. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.277, + 0.47, + 0.332 + ], + "angle": 0, + "content": "[29] Shuran Song and Jianxiong Xiao. Deep sliding shapes for amodal 3d object detection in rgb-d images. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 808-816, 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.334, + 0.47, + 0.417 + ], + "angle": 0, + "content": "[30] Pei Sun, Henrik Kretzschmar, Xerxes Dotiwalla, Aurelien Chouard, Vijaysai Patnaik, Paul Tsui, James Guo, Yin Zhou, Yuning Chai, Benjamin Caine, et al. Scalability in perception for autonomous driving: Waymo open dataset. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2446-2454, 2020. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.418, + 0.47, + 0.474 + ], + "angle": 0, + "content": "[31] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 30, 2017. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.476, + 0.47, + 0.544 + ], + "angle": 0, + "content": "[32] Tao Wang, Li Yuan, Xiaopeng Zhang, and Jiashi Feng. Distilling object detectors with fine-grained feature imitation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4933-4942, 2019. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.546, + 0.47, + 0.616 + ], + "angle": 0, + "content": "[33] Yue Wang, Alireza Fathi, Abhijit Kundu, David A Ross, Caroline Pantofaru, Tom Funkhouser, and Justin Solomon. Pillar-based object detection for autonomous driving. In European Conference on Computer Vision, pages 18-34. Springer, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.617, + 0.47, + 0.657 + ], + "angle": 0, + "content": "[34] Yue Wang and Justin M Solomon. Object dgenn: 3d object detection using dynamic graphs. Advances in Neural Information Processing Systems, 34, 2021. 3, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.66, + 0.47, + 0.7 + ], + "angle": 0, + "content": "[35] Yan Yan, Yuxing Mao, and Bo Li. Second: Sparsely embedded convolutional detection. Sensors, 18(10):3337, 2018. 1, 2, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.702, + 0.47, + 0.758 + ], + "angle": 0, + "content": "[36] Bin Yang, Wenjie Luo, and Raquel Urtasun. Pixor: Realtime 3d object detection from point clouds. In Proceedings of the IEEE conference on Computer Vision and Pattern Recognition, pages 7652-7660, 2018. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.759, + 0.47, + 0.814 + ], + "angle": 0, + "content": "[37] Jing Yang, Brais Martinez, Adrian Bulat, and Georgios Tzimiropoulos. Knowledge distillation via softmax regression representation learning. In International Conference on Learning Representations, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.817, + 0.47, + 0.871 + ], + "angle": 0, + "content": "[38] Jihan Yang, Shaoshuai Shi, Runyu Ding, Zhe Wang, and Xiaojuan Qi. Towards efficient 3d object detection with knowledge distillation. arXiv preprint arXiv:2205.15156, 2022. 3, 4, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.873, + 0.47, + 0.902 + ], + "angle": 0, + "content": "[39] Zetong Yang, Yanan Sun, Shu Liu, and Jiaya Jia. 3dssd: Point-based 3d single stage object detector. In Proceedings" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.535, + 0.093, + 0.892, + 0.12 + ], + "angle": 0, + "content": "of the IEEE/CVF conference on computer vision and pattern recognition, pages 11040-11048, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.122, + 0.892, + 0.178 + ], + "angle": 0, + "content": "[40] Zetong Yang, Yanan Sun, Shu Liu, Xiaoyong Shen, and Jiaya Jia. Std: Sparse-to-dense 3d object detector for point cloud. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 1951-1960, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.179, + 0.892, + 0.234 + ], + "angle": 0, + "content": "[41] Maosheng Ye, Shuangjie Xu, and Tongyi Cao. Hvnet: Hybrid voxel network for lidar based 3d object detection. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 1631-1640, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.235, + 0.892, + 0.303 + ], + "angle": 0, + "content": "[42] Zeng Yihan, Chunwei Wang, Yunbo Wang, Hang Xu, Chaoqiang Ye, Zhen Yang, and Chao Ma. Learning transferable features for point cloud detection via 3d contrastive cotraining. Advances in Neural Information Processing Systems, 34, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.305, + 0.892, + 0.361 + ], + "angle": 0, + "content": "[43] Tianwei Yin, Xingyi Zhou, and Philipp Krahenbuhl. Center-based 3d object detection and tracking. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11784-11793, 2021. 1, 2, 3, 4, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.362, + 0.892, + 0.431 + ], + "angle": 0, + "content": "[44] Sergey Zagoruyko and Nikos Komodakis. Paying more attention to attention: Improving the performance of convolutional neural networks via attention transfer. 5th international conference on Learning Representations, Apr. 2017. 1, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.433, + 0.892, + 0.487 + ], + "angle": 0, + "content": "[45] Linfeng Zhang, Yukang Shi, Zuoqiang Shi, Kaisheng Ma, and Chenglong Bao. Task-oriented feature distillation. Advances in Neural Information Processing Systems, 33:14759-14771, 2020. 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.489, + 0.892, + 0.571 + ], + "angle": 0, + "content": "[46] Yifan Zhang, Qingyong Hu, Guoquan Xu, Yanxin Ma, Jianwei Wan, and Yulan Guo. Not all points are equal: Learning highly efficient point-based detectors for 3d lidar point clouds. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18953-18962, 2022. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.573, + 0.892, + 0.642 + ], + "angle": 0, + "content": "[47] Wu Zheng, Weiliang Tang, Li Jiang, and Chi-Wing Fu. Sessd: Self-ensembling single-stage object detector from point cloud. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14494–14503, 2021. 3, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.644, + 0.892, + 0.713 + ], + "angle": 0, + "content": "[48] Yin Zhou, Pei Sun, Yu Zhang, Dragomir Anguelov, Jiyang Gao, Tom Ouyang, James Guo, Jiquan Ngiam, and Vijay Vasudevan. End-to-end multi-view fusion for 3d object detection in lidar point clouds. In Conference on Robot Learning, pages 923-932. PMLR, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.714, + 0.894, + 0.77 + ], + "angle": 0, + "content": "[49] Yin Zhou and Oncel Tuzel. Voxelnet: End-to-end learning for point cloud based 3d object detection. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4490-4499, 2018. 1, 2, 3" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.77 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "13549" + } + ] +] \ No newline at end of file diff --git a/2023/itKD_ Interchange Transfer-Based Knowledge Distillation for 3D Object Detection/0c6f9efe-676e-4d3a-914a-2dc77bae5a75_origin.pdf b/2023/itKD_ Interchange Transfer-Based Knowledge Distillation for 3D Object Detection/0c6f9efe-676e-4d3a-914a-2dc77bae5a75_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..2ec95f6f7a1419ea7481af2b257c77bf9ffca06d --- /dev/null +++ b/2023/itKD_ Interchange Transfer-Based Knowledge Distillation for 3D Object Detection/0c6f9efe-676e-4d3a-914a-2dc77bae5a75_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:291efe76a827a9785878f5f55d7efab97ff703cb55ecdb77436d8030f223b804 +size 3147661 diff --git a/2023/itKD_ Interchange Transfer-Based Knowledge Distillation for 3D Object Detection/full.md b/2023/itKD_ Interchange Transfer-Based Knowledge Distillation for 3D Object Detection/full.md new file mode 100644 index 0000000000000000000000000000000000000000..75e208c69b4d00e67933f2560ad2be39e3a2b702 --- /dev/null +++ b/2023/itKD_ Interchange Transfer-Based Knowledge Distillation for 3D Object Detection/full.md @@ -0,0 +1,300 @@ +# itKD: Interchange Transfer-based Knowledge Distillation for 3D Object Detection + +Hyeon Cho $^{1}$ , Junyong Choi $^{1,2}$ , Geonwoo Baek $^{1}$ , and Wonjun Hwang $^{1,3}$ + +$^{1}$ Ajou University, $^{2}$ Hyundai Motor Company, $^{3}$ Naver AI Lab + +ch0104@ajou.ac.kr, chldusxkr@hyundai.com, bkw0622@ajou.ac.kr, wjhwang@ajou.ac.kr + +# Abstract + +Point-cloud based 3D object detectors recently have achieved remarkable progress. However, most studies are limited to the development of network architectures for improving only their accuracy without consideration of the computational efficiency. In this paper, we first propose an autoencoder-style framework comprising channel-wise compression and decompression via interchange transfer-based knowledge distillation. To learn the map-view feature of a teacher network, the features from teacher and student networks are independently passed through the shared autoencoder; here, we use a compressed representation loss that binds the channel-wised compression knowledge from both student and teacher networks as a kind of regularization. The decompressed features are transferred in opposite directions to reduce the gap in the interchange reconstructions. Lastly, we present an head attention loss to match the 3D object detection information drawn by the multi-head self-attention mechanism. Through extensive experiments, we verify that our method can train the lightweight model that is well-aligned with the 3D point cloud detection task and we demonstrate its superiority using the well-known public datasets; e.g., Waymo and nuScenes. + +# 1. Introduction + +Convolutional neural network (CNN)-based 3D object detection methods using point clouds [13] [35] [36] [43] [49] have attracted wide attention based on their outstanding performance for self-driving cars. Recent CNN-based works have required more computational complexity to achieve higher precision under the various wild situation. Some studies [23] [36] [43] have proposed methods to improve the speed of 3D object detection through which the non-maximum suppression (NMS) or anchor procedures are removed but the network parameters are still large. + +![](images/fd151daba5e023596b2a4639e0ae5b201339f012a5bbd5fbe7d0d4786d001c5f.jpg) +Figure 1. Performance comparison between teacher and student networks for a point-cloud based 3D object detection. The top example images are qualitatively compared between the results of teacher, student and our networks. Specifically, the first row images are an input sample with labels and the center heatmap head of the teacher network. The second row examples are responses of teacher, student, and ours for the yellow circle on the heatmap (or the blue dash circle on the input). The bottom image quantitatively shows the computational complexity and the corresponding accuracy of teacher, student and our networks, respectively. Best viewed in color. + +Knowledge distillation (KD) is one of the parameter compression techniques, which can effectively train a compact student network through the guidance of a deep teacher network, as shown in the example images of Fig. 1. Starting with Hinton's work [9], many KD studies [10] [20] [28] [44] have transferred the discriminative teacher knowledge to the student network for classification tasks. From the viewpoint of the detection task, KD should be extended to the regression problem, including the object locations, which is not easy to straight-forwardly apply the classification-based KD methods to the detection task. To alleviate this problem, KD methods for object detection have been developed for mimicking the output of the backbone network [15] (e.g., region + +proposal network) or individual detection head [2] [32]. Nevertheless, these methods have only been studied for detecting 2D image-based objects, and there is a limit to applying them to sparse 3D point cloud-based data that have not object-specific color information but only 3D position-based object structure information. + +Taking a closer look at differences between 2D and 3D data, there is a large gap in that 2D object detection usually predicts 2D object locations based on inherent color information with the corresponding appearances, but 3D object detection estimates 3D object boxes from inputs consisting of only 3D point clouds. Moreover, the number of the point clouds constituting objects varies depending on the distances and presence of occlusions [42]. Another challenge in 3D object detection for KD is that, compared to 2D object detection, 3D object detection methods [4] [6] [43] [21] have more detection head components such as 3D boxes, and orientations. These detection heads are highly correlated with each other and represent different 3D characteristics. In this respect, when transferring the detection heads of the teacher network to the student network using KD, it is required to guide the distilled knowledge under the consideration of the correlation among the multiple detection head components. + +In this paper, we propose a novel interchange transfer-based KD (itKD) method designed for the lightweight point-cloud based 3D object detection. The proposed itKD comprises two modules: (1) a channel-wise autoencoder based on the interchange transfer of reconstructed knowledge and (2) a head relation-aware self-attention on multiple 3D detection heads. First of all, through a channel-wise compressing and decompressing processes for KD, the interchange transfer-based autoencoder effectively represents the map-view features from the viewpoint of 3D representation centric-knowledge. Specifically, the encoder provides an efficient representation by compressing the map-view feature in the channel direction to preserve the spatial positions of the objects and the learning of the student network could be regularized by the distilled position information of objects in the teacher network. For transferring the interchange knowledge to the opposite networks, the decoder of the student network reconstructs the map-view feature under the guidance of the teacher network while the reconstruction of the teacher network is guided by the map-view feature of the student network. As a result, the student network can effectively learn how to represent the 3D map-view feature of the teacher. Furthermore, to refine the teacher's object detection results as well as its representation, our proposed head relation-aware self-attention gives a chance to learn the pivotal information that should be taught to the student network for improving the 3D detection results by considering the inter-head relation among the multiple detection head and the intra-head relation of + +the individual detection head. + +In this way, we implement a unified KD framework to successfully learn the 3D representation and 3D detection results of the teacher network for the lightweight 3D point cloud object detection. We also conduct extensive ablation studies for thoroughly validating our approach in Waymo and nuScenes datasets. The results reveal the outstanding potential of our approach for transferring distilled knowledge that can be utilized to improve the performance of 3D point cloud object detection models. + +Our contributions are summarized as follows: + +- For learning the 3D representation-centric knowledge from the teacher network, we propose the channelwise autoencoder regularized in the compressed domain and the interchange knowledge transfer method wherein the reconstructed features are guided by the opposite networks. +- For detection head-centric knowledge of the teacher, we suggest the head relation-aware self-attention which can efficiently distill the detection properties under the consideration of the inter-head relation and intra-head relation of the multiple 3D detection heads. +- Our work is the best attempt to reduce the parameters of point cloud-based 3D object detection using KD. Additionally, we validate its superiority using two large datasets that reflect real-world driving conditions, e.g., Waymo and NuScenes. + +# 2. Related Works + +# 2.1. 3D Object Detection based on Point Cloud + +During the last few years, encouraged by the success of CNNs, the development of object detectors using CNNs is developing rapidly. Recently, many 3D object detectors have been studied and they can be briefly categorized by how they extract representations from point clouds; e.g., grid-based [35] [36] [49] [13] [43], point-based [18] [23] [17] [25] [39] and hybrid-based [3] [40] [8] [48] [22] methods. In detail, Vote3Deep [5] thoroughly exploited feature-centric voting to build CNNs for detecting objects in point clouds. In [29], they have studied on the task of amodal 3D object detection in RGB-D images, where a 3D region proposal network (RPN) to learn objectness from geometric shapes and the joint object recognition network to extract geometric features in 3D and color features in 2D. The 3D fully convolutional network [14] was straightforwardly applied to point cloud data for vehicle detection. In the early days, VoxelNet [49] has designed an end-to-end trainable detector based on learning-based voxelization using fully connected layers. In [35], they encoded the point cloud by VoxelNet and used the sparse convolution to achieve the fast detection. HVNet [41] fused the multi-scale voxel feature encoder at the point-wise level and projected into multi- + +ple pseudo-image feature maps for solving the various sizes of the feature map. In [26], they replaced the point cloud with a grid-based bird's-eye view (BEV) RGB-map and utilized YOLOv2 to detect the 3D objects. FIXOR [36] converted the point cloud to a 3D BEV map and carried out the real-time 3D object detection with an RPN-free single-stage based model. + +Recently, PointPillars (PP)-based method [13] utilized the PointNet [19] to learn the representation of point clouds organized in vertical columns for achieving the fast 3D object detection. To boost both performance and speed over PP, a pillar-based method [33] that incorporated a cylindrical projection into multi-view feature learning was proposed. More recently, CenterPoint [43] was introduced as an anchor-free detector that predicted the center of an object using a PP or VoxelNet-based feature encoder. In this paper, we employ the backbone architecture using CenterPoint because it is simple, near real-time, and achieves good performance in the wild situation. + +# 2.2. Knowledge Distillation + +KD is one of the methods used for compressing deep neural networks and its fundamental key is to imitate the knowledge extracted from the teacher network, which has heavy parameters as well as good accuracy. Hinton et al. [9] performed a knowledge transfer using KL divergence; FitNet [20] proposed a method for teaching student networks by imitating intermediate layers. On the other hand, TAKD [16] and DGKD [28] used multiple teacher networks for transferring more knowledge to the student network in spite of large parameter gaps. Recently, some studies have been proposed using the layers shared between the teacher and the student networks for KD. Specifically, in [37], KD was performed through softmax regression as the student and teacher networks shared the same classifier. IEKD [10] proposed a method to split the student network into inheritance and exploration parts and mimic the compact teacher knowledge through a shared latent feature space via an autoencoder. + +Beyond its use in classification, KD for detection should transfer the regression knowledge regarding the positions of the objects to the student network. For this purpose, a KD for 2D object detection [15] was first proposed using feature map mimic learning. In [2], they transferred the detection knowledge of the teacher network using hint learning for an RPN, weighted cross-entropy loss for classification, and bound regression loss for regression. Recently, Wang et al. [32] proposed a KD framework for detection by utilizing the cross-location discrepancy of feature responses through fine-grained feature imitation. + +As far as we know, there are few KD studies [7] [47] [34] [38] on point cloud-based 3D object detection so far. However, looking at similar studies on 3D knowledge trans + +fer, SE-SSD [47] presented a knowledge distillation-based self-ensembling method for exploiting soft and hard targets with constraints to jointly optimize the model without extra computational cost during inference time. Object-DGCNN [34] proposed a NMS-free 3D object detection via dynamic graphs and a set-to-set distillation. They used the set-to-set distillation method for improving the performance without the consideration of the model compression. Another latest study is SparseKD [38] which suggested a label KD method that distills a few pivotal positions determined by teacher classification response to enhance the logit KD method. On the other hand, in this paper, we are more interest in how to make the student network lighter, or lower computational complexity, by using the KD for 3D object detection. + +# 3. Methodology + +# 3.1. Background + +The 3D point cloud object detection methods [13] [49] generally consists of three components; a point cloud encoder, a backbone network, and detection heads. In this paper, we employ CenterPoint [43] network as a backbone architecture. Since the parameter size of the backbone network is the largest among components of the 3D object detector, we aim to construct the student network by reducing the channel sizes of the backbone network for efficient network. We design our method to teach the student 3D representation-centric knowledge and detection head-centric knowledge of the teacher network, respectively. + +# 3.2. Interchange Transfer + +We adopt an autoencoder framework to effectively transfer the meaningful distilled knowledge regarding 3D detection from the teacher to the student network. The traditional encoder-based KD methods [10] [11] have been limited to the classification task, which transfers only compressed categorical knowledge to the student network. However, from the viewpoint of the detection task, the main KD goal of this paper is transferring the distilled knowledge regarding not only categorical features but also object location-related features. Particularly, unlike 2D detectors, 3D object detectors should regress more location information such as object orientations, 3D box sizes, etc., and it results in increasing the importance of how to transfer the 3D location features to the student network successfully. + +For this purpose, we transfer the backbone knowledge that contains 3D object representation from the teacher network to the student through the compressed and reconstructed knowledge domains. As shown in Fig. 2, we in + +![](images/6d270483b17383a09ed06897d8dd184d913465b5861a6c55e98422a7b11ef053.jpg) +Figure 2. Overview of the proposed knowledge distillation method. The teacher and student networks take the same point clouds as inputs. Then, the map-view features $M^t$ and $M^s$ are extracted from the teacher and student networks, respectively. The channel-wise autoencoder transfers the knowledge obtained from $M^t$ to $M^s$ by using the compressed representation loss and interchange transfer loss consecutively. The head relation-aware self-attention provides the relation-aware knowledge of multiple detection head to the student network using the attention head loss. The dotted lines of the modules denote that there are shared network parameters between the teacher and student networks. The light-yellow boxes are buffer layers for sampling the features to match the channel sizes of networks. + +produce a channel-wise autoencoder which consists of an encoder in which the channel dimension of the autoencoder is gradually decreased and a decoder in the form of increasing the channel dimension. Note that spatial features play a pivotal role in the detection task and we try to preserve the spatial information by encoding features in the channel direction. We propose a compressed representation loss to coarsely guide location information of the objects to the student network in Fig. 2, and the compressed representation loss has an effect similar to the regularization of the autoencoder that binds the coordinates of the objectness between the teacher and student networks. The compressed representation loss function $\mathcal{L}_{cr}$ is represented as follows: + +$$ +\begin{array}{l} \mathcal {L} _ {c r} = m _ {o b j} \circ \mathcal {S} \left[ E \left(\theta_ {e n c}, M ^ {t}\right), E \left(\theta_ {e n c}, M ^ {s}\right) \right] \tag {1} \\ = m _ {o b j} \circ \mathcal {S} \left[ M _ {e n c} ^ {t}, M _ {e n c} ^ {s} \right], \\ \end{array} +$$ + +where $E$ is a shared encoder, which has the parameters $\theta_{enc}$ , and $S$ denotes $l_1$ loss as a similarity measure. $M^t$ and $M^s$ are outputs of the teacher and student backbones, respectively. $m_{obj}$ represents a binary mask to indicate object locations in backbone output like [38] and $\circ$ is an element-wise product. + +After performing the coarse representation-based knowledge distillation in a compressed domain, the fine representation features of the teacher network are required to teach the student network from the viewpoint of 3D object detection. In this respect, the decoder reconstructs the fine map + +view features in the channel direction from the compressed features. Through the proposed interchange transfer loss, the reconstructed features are guided from the opposite networks, not their own stem networks, as shown in Fig. 2. Specifically, since the teacher network is frozen and we use the shared autoencoder for both student and teacher networks, we can teach the reconstructed fine features from the student network to resemble the output of the teacher network $M^t$ rather than the student $M^s$ . Moreover, the reconstructed fine features from the teacher network can guide the student's output, $M^s$ at the same time. The proposed interchange transfer loss $\mathcal{L}_{it}$ is defined as follows: + +$$ +\mathcal {L} _ {t \rightarrow s} = \mathcal {S} \left[ M ^ {s}, D \left(\theta_ {\text {d e c}}, M _ {\text {e n c}} ^ {t}\right)\right], \tag {2} +$$ + +$$ +\mathcal {L} _ {s \rightarrow t} = \mathcal {S} \left[ M ^ {t}, D \left(\theta_ {\text {d e c}}, M _ {\text {e n c}} ^ {s}\right)\right], \tag {3} +$$ + +$$ +\mathcal {L} _ {i t} = \mathcal {L} _ {s \rightarrow t} + \mathcal {L} _ {t \rightarrow s}, \tag {4} +$$ + +where $D$ is the decoder that contains the network parameter $\theta_{dec}$ , which is a shared parameter. We hereby present the representation-based KD for 3D object detection in both compressed and decompressed domains to guide the student network to learn the map-view feature of the teacher network efficiently. + +# 3.3. Head Relation-Aware Self-Attention + +Fundamentally, our backbone network, e.g., Center-Point [43], has various types of 3D object characteristics + +![](images/535309497c413003f5efdc77034d7ee76e0a19cbd4470c32fbff243586d83e47.jpg) +Figure 3. Head Relation-Aware Self-Attention. We make the object center-head feature from object center locations in the detection head feature and use it as different shaped inputs to self-attention for inter-head relation and intra-head relation. In the self-attention for inter-head relation, we use the object center-head feature as an input for the self-attention. In the self-attention for intra-head relation, the detection heads are separately used for the independent self-attention functions. The outputs of the self-attention are concatenated by $\mathbb{C}$ operations and the head relation-aware self-attention is generated through the fusion layer. + +on detection heads. Specifically, the locations, size, and direction of an object are different properties, but they are inevitably correlated to each other because they come from the same object. However, the traditional KD methods [2] [34] were only concerned with how the student network straight-forwardly mimicked the outputs of the teacher network without considering the relation among the detection heads. To overcome this problem, we make use of the relation of detection heads as a major factor for the detection head-centric KD. + +Our proposed head relation-aware self-attention is directly inspired by the multi-head self-attention [31] in order to learn the relation between the multiple detection head. As shown in Fig. 3, we first extract $i$ -th instance feature $v^{i} \in \mathbb{R}^{c}$ , where $c$ is the channel size, from the center location of the object in the detection head feature. Note that, since the instance feature is extracted from the multiple detection head, it has several object properties such as a class-specific heatmap $v_{hm}^{i}$ , a sub-voxel location refinement $v_{o}^{i}$ , a height-above-ground $v_{h}^{i}$ , a 3D size $v_{s}^{i}$ , and a yaw rotation angle $v_{r}^{i}$ . When there are a total of $n$ objects, we combine them to make an object center-head feature $v \in \mathbb{R}^{n \times c}$ . We use the same object center-head feature $v$ of dimension $n$ for query, key, and value, which are an input of the scaled dot-product attention. The self-attention function $\mathcal{F}$ is computed by + +$$ +\mathcal {F} (v) = \operatorname {s o f t m a x} \left(\frac {v ^ {\top} \cdot v}{\sqrt {n}}\right) \cdot v. \tag {5} +$$ + +The proposed head relation-aware self-attention consists of two different self-attention for inter-head and intra-head relations as illustrated in Fig. 3. We propose the self-attention based on the inter-head relation of the instance features, which is made in order to consider the relation + +between all detected objects and their different properties, rather than a single detected instance, from the global viewpoint. The self-attention for inter-head relation is computed by + +$$ +\mathcal {F} _ {\text {i n t e r}} (v) = \mathcal {F} ([ v _ {h m}, v _ {o}, v _ {h}, v _ {s}, v _ {r} ]). \tag {6} +$$ + +On the other hand, we suggest the self-attention for intrahead relation using the individual detection heads. Here we perform the attentions using only local relation in individual detection heads designed for different properties (e.g., orientation, size, etc.) and concatenate them. Its equation is + +$$ +\mathcal {F} _ {\text {i n t r a}} (v) = \left[ \mathcal {F} \left(v _ {h m}\right), \mathcal {F} \left(v _ {o}\right), \mathcal {F} \left(v _ {h}\right), \mathcal {F} \left(v _ {s}\right), \mathcal {F} \left(v _ {r}\right) \right]. \tag {7} +$$ + +We concatenate the outputs of the self-attention and apply the fusion layer to calculate a final attention score that considers the relation between the detection heads and objects. The head relation-aware self-attention equation $\mathcal{F}_{RA}$ is derived by: + +$$ +\mathcal {F} _ {R A} (v) = \mathcal {G} \left(\left[ \mathcal {F} _ {\text {i n t e r}} (v), \mathcal {F} _ {\text {i n t r a}} (v) \right]\right), \tag {8} +$$ + +where $\mathcal{G}$ is the fusion layer, e.g., $1 \times 1$ convolution layer. The student network indirectly takes the teacher's knowledge by learning the relation between the multiple detection head of the teacher network through head attention loss as follows: + +$$ +\mathcal {L} _ {\text {a t t n}} = \mathcal {S} \left(\mathcal {F} _ {R A} \left(v _ {t}\right), \mathcal {F} _ {R A} \left(v _ {s}\right)\right), \tag {9} +$$ + +where $v_{t}$ and $v_{s}$ are the object center-head features of the teacher and the student, respectively. + +Consequently, the overall loss is derived by + +$$ +\mathcal {L} _ {\text {t o t a l}} = \alpha \mathcal {L} _ {\text {s u p}} + \beta \left(\mathcal {L} _ {\text {i t}} + \mathcal {L} _ {\text {c r}} + \mathcal {L} _ {\text {a t t n}}\right), \tag {10} +$$ + +where $\mathcal{L}_{sup}$ is the supervised loss that consists of focal loss and regression loss, and $\alpha$ and $\beta$ are the balancing parameters, which we set as 1 for simplicity. + +# 4. Experimental Results and Discussions + +# 4.1. Environment Settings + +Waymo Waymo open dataset [30] is one of the large-scale datasets for autonomous driving, which is captured by the synchronized and calibrated high-quality LiDAR and camera across a range of urban and suburban geographies. This dataset provides 798 training scenes and 202 validation scenes obtained by detecting all the objects within a $75\mathrm{m}$ radius; it has a total of 3 object categories (e.g., vehicle, pedestrian, and cyclist) which have 6.1M, 2.8M, and 67K sets, respectively. The mean Average Precision (mAP) and mAP weighted by heading accuracy (mAPH) are the official metrics for Waymo evaluation. mAPH is a metric that gives more weight to the heading than it does to the sizes, and it accounts for the direction of the object. + +nuScenes nuScenes dataset [1] is another large-scale dataset used for autonomous driving. This dataset contains 1,000 driving sequences. 700, 150, and 150 sequences are used for training, validation, and testing, respectively. Each sequence is captured approximately 20 seconds with 20 FPS using the 32-lane LiDAR. Its evaluation metrics are the average precision (AP) and nuScenes detection score (NDS). NDS is a weighted average of mAP and true positive metrics which measures the quality of the detections in terms of box location, size, orientation, attributes, and velocity. + +Implementation details Following the pillar-based CenterPoint [43] as the teacher network, we use an Adam optimizer [12] with a weight decay of 0.01 and a cosine annealing strategy [27] to adjust the learning rate. We set 0.0003 for initial learning rate, 0.003 for max learning rate, and 0.95 for momentum. The networks have been trained for 36 epochs on $8 \times \mathrm{V}100$ GPUs with a batch size of 32. For Waymo dataset, we set the detection range to $[-74.88\mathrm{m}, 74.88\mathrm{m}]$ for the X and Y axes, $[-2\mathrm{m}, 4\mathrm{m}]$ for the Z-axis, and a grid size of $(0.32\mathrm{m}, 0.32\mathrm{m})$ . In experiments on nuScenes dataset, we used a $(0.2\mathrm{m}, 0.2\mathrm{m})$ grid and set the detection range to $[-51.2\mathrm{m}, 51.2\mathrm{m}]$ for the X and Y-axes, $[-5\mathrm{m}, 3\mathrm{m}]$ for the Z-axis, and a grid size of $(0.2\mathrm{m}, 0.2\mathrm{m})$ . Compared to the teacher network, the student network has $1/4$ less channel capacity of backbone network. Our channel-wise autoencoder consists of three $1 \times 1$ convolution layers as the encoder and three $1 \times 1$ convolution layers as the decoder and the number of filters are 128, 64, 32 in encoder layers and 64, 128, 384 in decoder layers. The student's input buffer layer increases the channel size of 196 to 384 and the teacher's output buffer layer decreases the channel size 384 to 196. + +# 4.2. Overall KD Performance Comparison + +We validate the performance of our method compared with well-known KD methods on the Waymo and nuScenes datasets. We re-implement the seven KD methods from 2D + +classification-based KD to 3D detection-based KD in this paper. We set the baseline by applying the Kullback-Leibler (KL) divergence loss [9] to the center heatmap head and $l_{1}$ loss to the other regression heads. FitNet [20] is a method that mimics the intermediate outputs of layers and we apply it to the output of the backbone for simplicity. We also simply extend EOD-KD [2], one of the 2D object detection KDs, to 3D object detection. We apply TOFD [45], a 3D classification-based KD, to our detection task and straightforwardly use SE-SSD [47], Object DGCNN [34], and SparseKD [38] for 3D object detection KD. + +Table 1 shows that our method almost outperforms other KD methods on mAP and mAPH values for level 1 and level 2 under all three categories of objects. Especially, our performance improvement of mAPH is better than other methods, which indicates our method guides the student network well where the detected objects are facing. To verify the generality of the proposed method, we make additional comparison results using the nuScenes dataset, another large-scale 3D dataset for autonomous driving, in Table 2. Compared with the other methods, our method achieves the best accuracy under the NDS and mAP metrics in the nuScenes validation set. Specifically, when the student network shows $50.24\%$ NDS and $38.52\%$ mAP, our method achieves $53.90\%$ $(+3.66\%)$ NDS and $41.33\%$ $(+2.81\%)$ mAP. In detail, our method outperforms the other methods for the most of object classes except the construction vehicle and the bicycle. + +# 4.3. Ablation Studies + +To analyze of our proposed method in detail, we conduct ablation studies on the Waymo dataset, and the whole performances are measured by mAPH at level 2 for simplicity. For the qualitative analysis, we visualize the map-view feature at each stage to validate the what kinds of knowledge are transferred from the teacher to the student by the proposed method. For simple visualization, we apply the $L_{1}$ normalization to the map-view feature in the channel direction. + +As shown in Fig. 4, the objects and backgrounds are well activated in the example image of the teacher output. On the other hand, the encoder output is activated by further highlighting the coarse positions of the target objects. When looking at the decoder output, we can see that all the fine surrounding information is represented again. At this point, it is worth noting that compared to the teacher output, the target objects are highlighted a little more. From these visual comparisons, we can infer how our method successfully transfers the object-centered knowledge to the student. + +We explore the buffer layer that matches the channel size of the channel-wise autoencoder without the head attention loss. As shown in Table 3, we compare the three types for the buffer layer: (1) $S \rightarrow T$ is the upsampling method that in- + +Table 1. Waymo evaluation. Comparisons with different KD methods in the Waymo validation set. The best accuracy is indicated in bold, and the second-best accuracy is underlined. + +
MethodVehiclePedestrianCyclist
Level 1Level 2Level 1Level 2Level 1Level 2
mAPmAPHmAPmAPHmAPmAPHmAPmAPHmAPmAPHmAPmAPH
Teacher [43]73.7273.1765.6165.1172.4361.7264.7354.9964.3062.6161.9160.28
Student (1/4)64.2263.5656.2155.6263.7253.2256.1446.7853.0151.7250.9949.75
Baseline64.7864.0556.9256.2664.8552.9857.3746.7554.7152.4652.6550.48
FitNet [20]65.1164.3857.2456.5864.8953.2957.3747.0054.9152.6152.8450.63
EOD-KD [2]66.5065.7958.5657.9265.9954.5858.4848.2555.1852.9353.1050.94
SE-SSD [47]65.9565.2258.0557.4065.3953.9857.9247.6955.0152.9852.9450.99
TOFD [45]64.0963.4356.1355.5566.2454.9858.5048.4554.9553.0652.8651.04
Obj. DGCNN [34]66.0765.3859.2758.5565.9854.4459.4249.1154.6552.6253.1350.93
SparseKD [38]65.2564.5956.9756.3867.4454.5459.2447.8355.5453.4553.6351.61
Ours67.4366.7259.4458.8167.2656.0259.7349.6156.0954.2453.9652.19
+ +Table 2. nuScenes evaluation. Comparisons with different KD methods in the nuScenes validation set. The best accuracy is indicated in bold, and the second-best accuracy is underlined. + +
MethodNDSmAPcartruckbustrailercon. veh.ped.motor.bicycletr. conebarrier
Teacher [43]60.1650.2584.0453.4864.2931.9012.5078.9344.0118.1854.8760.30
Student (1/4)50.2438.5277.8538.1851.3822.333.9571.5123.903.5143.0349.56
Baseline51.4839.1978.7237.9050.4722.423.5172.2926.254.6544.9150.77
FitNet [20]51.4238.9078.3037.4050.4022.203.8072.1025.704.2544.2050.60
EOD-KD [2]52.4939.8278.4038.6050.9022.703.9073.2028.205.3045.0051.97
SE-SSD [47]52.2139.5378.6938.5649.8123.703.7272.8628.274.2544.2451.18
TOFD [45]52.8840.5779.0639.7352.0324.513.5673.5129.585.6245.3452.79
Obj. DGCNN [34]52.9140.3478.9539.2453.3723.964.1372.9828.634.9944.7252.46
SparseKD [38]53.0140.2678.7839.5051.8723.643.3073.1729.345.7544.9852.26
Ours53.9041.3379.4840.3854.3526.443.5873.9130.215.3945.9053.70
+ +![](images/80e990b6ad24ecda37c55bb800204ee04ce5abaf1ee1fe120253260bac5e5570.jpg) + +![](images/af145eb234aaf14eb731dc65b35ac889d5912d6647354036b87edc55f3ee0d4c.jpg) +(b) Teacher output $(M^t)$ + +![](images/7834374594e0224e6c18ec4ac004861e6dc676bdc03cb7fae7929afc66109bd4.jpg) +(a) Input +(c) Encoder output +Figure 4. Feature visualization on the proposed channel-wise autoencoder. (a) an example input image and (b) the output feature of the teacher network. (c) and (d) are the output images of encoder and decoder of the teacher, respectively. + +![](images/85010ca490235b842b38dd2f64829c1a656f8d2a99107e24823c6c348ddb8650.jpg) +(d) Decoder output + +creases the student's map-view feature to the teacher's feature. (2) $T \to S$ is the downsampling method that decreases the teacher's feature to the student's feature. (3) $(S + T) / 2$ is that the teacher's feature is downsampled and the stu + +Table 3. Buffer layer for different channel size. + +
MethodVehiclePedestrianCyclistAvg.
S → T58.4148.9051.9053.07
T → S58.6248.7851.7553.05
(S + T) / 258.4748.8451.5452.95
+ +Table 4. Effect of shared and non-shared parameters for the autoencoder. + +
MethodVehiclePedestrianCyclistAvg.
Non-shared56.2645.8548.2350.11
Shared58.4148.9051.9053.07
+ +dent's feature is upsampled to the median size. The experiments show that the upsampling method performs better when considering all the classes. + +In Table 4, we observe the performance difference when the autoencoder parameters are shared or not. From the result, we can conclude that the shared parameters achieve better performance because what we want to is for the student to learn the teacher's knowledge, not the independent model. + +We investigate improvements made by our interchange transfer for KD without the head attention loss as shown in Table 5. Self-reconstruction is a method wherein the de + +Table 5. Comparison of different reconstruction methods for the autoencoder. + +
MethodVehiclePedestrianCyclistAvg.
Self Recon.56.5747.2650.2951.37
Ours58.4148.9051.9053.07
+ +Table 6. Comparison of KD methods for the multiple detection head. KL loss and $l_{1}$ loss denote that directly apply the loss function to all detection heads for KD. + +
MethodVehiclePedestrianCyclistAvg.
Student55.6246.7849.7550.72
Baseline56.2646.7550.4851.16
KL loss [9]55.9245.0847.4949.50
l1loss55.6245.1048.7349.82
AT [44]56.8547.3450.3651.52
Linter56.4146.9050.9051.40
Lintra57.2047.1951.2351.87
Lattn57.1047.3451.7952.08
+ +coder uses the corresponding input for the reconstruction and our interchange reconstruction is a method wherein the proposed $\mathcal{L}_{it}$ objective transfers the reconstructed knowledge to the opponent network. Our interchange transfer-based reconstruction achieves better results and note that our main task is not the reconstruction but the 3D object-based knowledge transfer for KD. + +3D detection [4] [6] [43] [21] has the multiple detection head. To prove the superiority of the proposed head attention objective for 3D object detection, we make the KD comparison results against only multiple detection head without the autoencoder, as shown in Table 6. Since the heatmap head classifies objects and other heads regress 3D bounding box information, Applying KL loss and $l_{1}$ loss to all detection heads has a negative effect. However, it is required to consider the relation of detection heads. In this respect, our method achieves better performance than the other KD methods which directly mimic the output of detection heads or simply employ attention mechanism. + +Table 7 shows the overall effect of the proposed losses on the KD performances. We set up the experiments by adding each loss based on the supervised loss $\mathcal{L}_{\mathrm{sup}}$ . Specifically, the interchange transfer loss $\mathcal{L}_{it}$ improves on an average of $1.41\%$ mAPH and the compressed representation loss $\mathcal{L}_{cr}$ leads to a $0.94\%$ performance improvement. In the end, the head attention loss $\mathcal{L}_{\mathrm{attn}}$ helps to improve the performance and the final average mAPH is $53.54\%$ . We conclude that each proposed loss contributes positively to performance improvement in the 3D object detection-based KD task. + +From Table 8, we observed quantitative comparisons of the computational complexity between the student network and the teacher network. Specifically, the student network, which reduced the channel by $1/4$ , decreased about + +Table 7. Ablation results from investigating effects of different components. + +
LsupLitLcrLattnVehiclePedestrianCyclistAvg.
55.6246.7849.7550.72
57.4148.2050.7752.13
58.4148.9051.9053.07
58.8149.6152.1953.54
+ +Table 8. Quantitative evaluation for model efficiency on Waymo dataset. + +
MethodParams (M)FLOPS (G)mAPH / L2
PointPillars [13]4.8255.057.05
SECOND [35]5.384.557.23
Part-A2[24]4.687.157.43
IA-SSD [46]2.746.158.08
SparseKD-v0.64 [38]5.285.158.89
Teacher [43]5.2333.960.13
Ours: Student (1/2)1.5130.159.04
Ours: Student (1/4)0.645.153.54
+ +8.6 times compared to the parameters of the teacher, and FLOPS was reduced by 7.4 times. Above all, we should not overlook the fact that the performance of the student improved from $50.72\%$ to $53.54\%$ mAPH/L2 by our KD method. Furthermore, we apply our method to the student whose channel was reduced by half. The student's performance increases to $59.04\%$ , and the parameters and FLOPS compared to the teacher are reduced by 3.5 times and 2.6 times, respectively. Compared to lightweight network-based methods [13] [35] [24] [46], our student networks are able to derive stable performance with fewer parameters and FLOPS in 3D object detection. + +# 5. Conclusion + +In this paper, we propose a novel KD method that transfers knowledge to produce a lightweight point cloud detector. Our main method involves interchange transfer, which learns coarse knowledge by increasing the similarity of the compressed feature and fine knowledge by decompressing the map-view feature of the other side using the channel-wise autoencoder. Moreover, we introduce a method to guide multiple detection head using head relation-aware self-attention, which refines knowledge by considering the relation of instances and properties. Ablation studies demonstrate the effectiveness of our proposed algorithm, and extensive experiments on the two large-scale open datasets verify that our proposed method achieves competitive performance against state-of-the-art methods. + +Acknowledgement. This work was partly supported by NRF-2022R1A2C1091402, BK21 FOUR program of the NRF of Korea funded by the Ministry of Education (NRF5199991014091), and IITP grant funded by the Korea government(MSIT) (No.2021-0-00951, Development of Cloud based Autonomous Driving AI learning Software; No. 2021-0-02068, Artificial Intelligence Innovation Hub). W. Hwang is the corresponding author. + +# References + +[1] Holger Caesar, Varun Bankiti, Alex H Lang, Sourabh Vora, Venice Erin Liong, Qiang Xu, Anush Krishnan, Yu Pan, Giancarlo Baldan, and Oscar Beijbom. nuscenes: A multimodal dataset for autonomous driving. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11621-11631, 2020. 6 +[2] Guobin Chen, Wongun Choi, Xiang Yu, Tony Han, and Manmohan Chandraker. Learning efficient object detection models with knowledge distillation. Advances in neural information processing systems, 30, 2017. 2, 3, 5, 6, 7 +[3] Yilun Chen, Shu Liu, Xiaoyong Shen, and Jiaya Jia. Fast point r-cnn. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9775-9784, 2019. 2 +[4] Xiyang Dai, Yinpeng Chen, Bin Xiao, Dongdong Chen, Mengchen Liu, Lu Yuan, and Lei Zhang. Dynamic head: Unifying object detection heads with attentions. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7373-7382, 2021. 2, 8 +[5] Martin Engelcke, Dushyant Rao, Dominic Zeng Wang, Chi Hay Tong, and Ingmar Posner. Vote3deep: Fast object detection in 3d point clouds using efficient convolutional neural networks. In 2017 IEEE International Conference on Robotics and Automation (ICRA), pages 1355-1361. IEEE, 2017. 2 +[6] Runzhou Ge, Zhuangzhuang Ding, Yihan Hu, Yu Wang, Sijia Chen, Li Huang, and Yuan Li. Afdet: Anchor free one stage 3d object detection. arXiv preprint arXiv:2006.12671, 2020. 2, 8 +[7] Xiaoyang Guo, Shaoshuai Shi, Xiaogang Wang, and Hongsheng Li. Liga-stereo: Learning lidar geometry aware representations for stereo-based 3d detector. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3153-3163, 2021. 3 +[8] Chenhang He, Hui Zeng, Jianqiang Huang, Xian-Sheng Hua, and Lei Zhang. Structure aware single-stage 3d object detection from point cloud. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11873-11882, 2020. 2 +[9] Geoffrey Hinton, Oriol Vinyals, Jeff Dean, et al. Distilling the knowledge in a neural network. arXiv preprint arXiv:1503.02531, 2(7), 2015. 1, 3, 6, 8 +[10] Zhen Huang, Xu Shen, Jun Xing, Tongliang Liu, Xinmei Tian, Houqiang Li, Bing Deng, Jianqiang Huang, and Xian-Sheng Hua. Revisiting knowledge distillation: An inheritance and exploration framework. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3579-3588, 2021. 1, 3 +[11] Jangho Kim, SeongUk Park, and Nojun Kwak. Paraphrasing complex network: Network compression via factor transfer. Advances in neural information processing systems, 31, 2018. 3 +[12] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. 6 +[13] Alex H Lang, Sourabh Vora, Holger Caesar, Lubing Zhou, Jiong Yang, and Oscar Beijbom. Pointpillars: Fast encoders + +for object detection from point clouds. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12697-12705, 2019. 1, 2, 3, 8 +[14] Bo Li. 3d fully convolutional network for vehicle detection in point cloud. In 2017 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pages 1513-1518. IEEE, 2017. 2 +[15] Quanquan Li, Shengying Jin, and Junjie Yan. Mimicking very efficient network for object detection. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 6356-6364, 2017. 1, 3 +[16] Seyed Iman Mirzadeh, Mehrdad Farajtabar, Ang Li, Nir Levine, Akihiro Matsukawa, and Hassan Ghasemzadeh. Improved knowledge distillation via teacher assistant. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 5191-5198, 2020. 3 +[17] Jiquan Ngiam, Benjamin Caine, Wei Han, Brandon Yang, Yuning Chai, Pei Sun, Yin Zhou, Xi Yi, Ouais Alsharif, Patrick Nguyen, et al. Starnet: Targeted computation for object detection in point clouds. arXiv preprint arXiv:1908.11069, 2019. 2 +[18] Charles R Qi, Wei Liu, Chenxia Wu, Hao Su, and Leonidas J Guibas. Frustum pointnets for 3d object detection from rgb data. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 918-927, 2018. 2 +[19] Charles R Qi, Hao Su, Kaichun Mo, and Leonidas J Guibas. Pointnet: Deep learning on point sets for 3d classification and segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 652-660, 2017. 3 +[20] Adriana Romero, Nicolas Ballas, Samira Ebrahimi Kahou, Antoine Chassang, Carlo Gatta, and Yoshua Bengio. Fitnets: Hints for thin deep nets. arXiv preprint arXiv:1412.6550, 2014. 1, 3, 6, 7 +[21] Danila Rukhovich, Anna Vorontsova, and Anton Konushin. Imvoxelnet: Image to voxels projection for monocular and multi-view general-purpose 3d object detection. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 2397-2406, 2022. 2, 8 +[22] Shaoshuai Shi, Chaoxu Guo, Li Jiang, Zhe Wang, Jianping Shi, Xiaogang Wang, and Hongsheng Li. Pv-rcnn: Pointvoxel feature set abstraction for 3d object detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10529–10538, 2020. 2 +[23] Shaoshuai Shi, Xiaogang Wang, and Hongsheng Li. Pointrcnn: 3d object proposal generation and detection from point cloud. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 770-779, 2019. 1, 2 +[24] Shaoshuai Shi, Zhe Wang, Jianping Shi, Xiaogang Wang, and Hongsheng Li. From points to parts: 3d object detection from point cloud with part-aware and part-aggregation network. IEEE transactions on pattern analysis and machine intelligence, 43(8):2647-2664, 2020. 8 +[25] Weijing Shi and Raj Rajkumar. Point-gnn: Graph neural network for 3d object detection in a point cloud. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 1711-1719, 2020. 2 + +[26] Martin Simony, Stefan Milzy, Karl Amendey, and Horst-Michael Gross. Complex-yolo: An euler-region-proposal for real-time 3d object detection on point clouds. In Proceedings of the European Conference on Computer Vision (ECCV) Workshops, pages 0–0, 2018. 3 +[27] Leslie N Smith. Cyclical learning rates for training neural networks. In 2017 IEEE winter conference on applications of computer vision (WACV), pages 464-472. IEEE, 2017. 6 +[28] Wonchul Son, Jaemin Na, Junyong Choi, and Wonjun Hwang. Densely guided knowledge distillation using multiple teacher assistants. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9395-9404, 2021. 1, 3 +[29] Shuran Song and Jianxiong Xiao. Deep sliding shapes for amodal 3d object detection in rgb-d images. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 808-816, 2016. 2 +[30] Pei Sun, Henrik Kretzschmar, Xerxes Dotiwalla, Aurelien Chouard, Vijaysai Patnaik, Paul Tsui, James Guo, Yin Zhou, Yuning Chai, Benjamin Caine, et al. Scalability in perception for autonomous driving: Waymo open dataset. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2446-2454, 2020. 6 +[31] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 30, 2017. 5 +[32] Tao Wang, Li Yuan, Xiaopeng Zhang, and Jiashi Feng. Distilling object detectors with fine-grained feature imitation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4933-4942, 2019. 2, 3 +[33] Yue Wang, Alireza Fathi, Abhijit Kundu, David A Ross, Caroline Pantofaru, Tom Funkhouser, and Justin Solomon. Pillar-based object detection for autonomous driving. In European Conference on Computer Vision, pages 18-34. Springer, 2020. 3 +[34] Yue Wang and Justin M Solomon. Object dgenn: 3d object detection using dynamic graphs. Advances in Neural Information Processing Systems, 34, 2021. 3, 5, 6, 7 +[35] Yan Yan, Yuxing Mao, and Bo Li. Second: Sparsely embedded convolutional detection. Sensors, 18(10):3337, 2018. 1, 2, 8 +[36] Bin Yang, Wenjie Luo, and Raquel Urtasun. Pixor: Realtime 3d object detection from point clouds. In Proceedings of the IEEE conference on Computer Vision and Pattern Recognition, pages 7652-7660, 2018. 1, 2, 3 +[37] Jing Yang, Brais Martinez, Adrian Bulat, and Georgios Tzimiropoulos. Knowledge distillation via softmax regression representation learning. In International Conference on Learning Representations, 2020. 3 +[38] Jihan Yang, Shaoshuai Shi, Runyu Ding, Zhe Wang, and Xiaojuan Qi. Towards efficient 3d object detection with knowledge distillation. arXiv preprint arXiv:2205.15156, 2022. 3, 4, 6, 7, 8 +[39] Zetong Yang, Yanan Sun, Shu Liu, and Jiaya Jia. 3dssd: Point-based 3d single stage object detector. In Proceedings + +of the IEEE/CVF conference on computer vision and pattern recognition, pages 11040-11048, 2020. 2 +[40] Zetong Yang, Yanan Sun, Shu Liu, Xiaoyong Shen, and Jiaya Jia. Std: Sparse-to-dense 3d object detector for point cloud. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 1951-1960, 2019. 2 +[41] Maosheng Ye, Shuangjie Xu, and Tongyi Cao. Hvnet: Hybrid voxel network for lidar based 3d object detection. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 1631-1640, 2020. 2 +[42] Zeng Yihan, Chunwei Wang, Yunbo Wang, Hang Xu, Chaoqiang Ye, Zhen Yang, and Chao Ma. Learning transferable features for point cloud detection via 3d contrastive cotraining. Advances in Neural Information Processing Systems, 34, 2021. 2 +[43] Tianwei Yin, Xingyi Zhou, and Philipp Krahenbuhl. Center-based 3d object detection and tracking. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11784-11793, 2021. 1, 2, 3, 4, 6, 7, 8 +[44] Sergey Zagoruyko and Nikos Komodakis. Paying more attention to attention: Improving the performance of convolutional neural networks via attention transfer. 5th international conference on Learning Representations, Apr. 2017. 1, 8 +[45] Linfeng Zhang, Yukang Shi, Zuoqiang Shi, Kaisheng Ma, and Chenglong Bao. Task-oriented feature distillation. Advances in Neural Information Processing Systems, 33:14759-14771, 2020. 6, 7 +[46] Yifan Zhang, Qingyong Hu, Guoquan Xu, Yanxin Ma, Jianwei Wan, and Yulan Guo. Not all points are equal: Learning highly efficient point-based detectors for 3d lidar point clouds. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18953-18962, 2022. 8 +[47] Wu Zheng, Weiliang Tang, Li Jiang, and Chi-Wing Fu. Sessd: Self-ensembling single-stage object detector from point cloud. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14494–14503, 2021. 3, 6, 7 +[48] Yin Zhou, Pei Sun, Yu Zhang, Dragomir Anguelov, Jiyang Gao, Tom Ouyang, James Guo, Jiquan Ngiam, and Vijay Vasudevan. End-to-end multi-view fusion for 3d object detection in lidar point clouds. In Conference on Robot Learning, pages 923-932. PMLR, 2020. 2 +[49] Yin Zhou and Oncel Tuzel. Voxelnet: End-to-end learning for point cloud based 3d object detection. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4490-4499, 2018. 1, 2, 3 \ No newline at end of file diff --git a/2023/itKD_ Interchange Transfer-Based Knowledge Distillation for 3D Object Detection/images.zip b/2023/itKD_ Interchange Transfer-Based Knowledge Distillation for 3D Object Detection/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..c74f92657bdef3ce80f6adf4b88202eff841365e --- /dev/null +++ b/2023/itKD_ Interchange Transfer-Based Knowledge Distillation for 3D Object Detection/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f9a17286addae4b5626b2f01ea7bedf88c155b13fdc2417777a97100d11426a7 +size 602607 diff --git a/2023/itKD_ Interchange Transfer-Based Knowledge Distillation for 3D Object Detection/layout.json b/2023/itKD_ Interchange Transfer-Based Knowledge Distillation for 3D Object Detection/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..37006bb71150446e2b69aed7d16d89075584dd55 --- /dev/null +++ b/2023/itKD_ Interchange Transfer-Based Knowledge Distillation for 3D Object Detection/layout.json @@ -0,0 +1,8153 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 75, + 102, + 519, + 138 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 102, + 519, + 138 + ], + "spans": [ + { + "bbox": [ + 75, + 102, + 519, + 138 + ], + "type": "text", + "content": "itKD: Interchange Transfer-based Knowledge Distillation for 3D Object Detection" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 124, + 160, + 468, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 124, + 160, + 468, + 175 + ], + "spans": [ + { + "bbox": [ + 124, + 160, + 468, + 175 + ], + "type": "text", + "content": "Hyeon Cho" + }, + { + "bbox": [ + 124, + 160, + 468, + 175 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 124, + 160, + 468, + 175 + ], + "type": "text", + "content": ", Junyong Choi" + }, + { + "bbox": [ + 124, + 160, + 468, + 175 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 124, + 160, + 468, + 175 + ], + "type": "text", + "content": ", Geonwoo Baek" + }, + { + "bbox": [ + 124, + 160, + 468, + 175 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 124, + 160, + 468, + 175 + ], + "type": "text", + "content": ", and Wonjun Hwang" + }, + { + "bbox": [ + 124, + 160, + 468, + 175 + ], + "type": "inline_equation", + "content": "^{1,3}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 149, + 175, + 443, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 175, + 443, + 190 + ], + "spans": [ + { + "bbox": [ + 149, + 175, + 443, + 190 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 149, + 175, + 443, + 190 + ], + "type": "text", + "content": " Ajou University, " + }, + { + "bbox": [ + 149, + 175, + 443, + 190 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 149, + 175, + 443, + 190 + ], + "type": "text", + "content": "Hyundai Motor Company, " + }, + { + "bbox": [ + 149, + 175, + 443, + 190 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 149, + 175, + 443, + 190 + ], + "type": "text", + "content": "Naver AI Lab" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 78, + 191, + 512, + 203 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 191, + 512, + 203 + ], + "spans": [ + { + "bbox": [ + 78, + 191, + 512, + 203 + ], + "type": "text", + "content": "ch0104@ajou.ac.kr, chldusxkr@hyundai.com, bkw0622@ajou.ac.kr, wjhwang@ajou.ac.kr" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 143, + 231, + 192, + 243 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 231, + 192, + 243 + ], + "spans": [ + { + "bbox": [ + 143, + 231, + 192, + 243 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 256, + 290, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 256, + 290, + 520 + ], + "spans": [ + { + "bbox": [ + 46, + 256, + 290, + 520 + ], + "type": "text", + "content": "Point-cloud based 3D object detectors recently have achieved remarkable progress. However, most studies are limited to the development of network architectures for improving only their accuracy without consideration of the computational efficiency. In this paper, we first propose an autoencoder-style framework comprising channel-wise compression and decompression via interchange transfer-based knowledge distillation. To learn the map-view feature of a teacher network, the features from teacher and student networks are independently passed through the shared autoencoder; here, we use a compressed representation loss that binds the channel-wised compression knowledge from both student and teacher networks as a kind of regularization. The decompressed features are transferred in opposite directions to reduce the gap in the interchange reconstructions. Lastly, we present an head attention loss to match the 3D object detection information drawn by the multi-head self-attention mechanism. Through extensive experiments, we verify that our method can train the lightweight model that is well-aligned with the 3D point cloud detection task and we demonstrate its superiority using the well-known public datasets; e.g., Waymo and nuScenes." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 544, + 128, + 555 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 544, + 128, + 555 + ], + "spans": [ + { + "bbox": [ + 47, + 544, + 128, + 555 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 563, + 287, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 563, + 287, + 684 + ], + "spans": [ + { + "bbox": [ + 46, + 563, + 287, + 684 + ], + "type": "text", + "content": "Convolutional neural network (CNN)-based 3D object detection methods using point clouds [13] [35] [36] [43] [49] have attracted wide attention based on their outstanding performance for self-driving cars. Recent CNN-based works have required more computational complexity to achieve higher precision under the various wild situation. Some studies [23] [36] [43] have proposed methods to improve the speed of 3D object detection through which the non-maximum suppression (NMS) or anchor procedures are removed but the network parameters are still large." + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 317, + 232, + 532, + 422 + ], + "blocks": [ + { + "bbox": [ + 317, + 232, + 532, + 422 + ], + "lines": [ + { + "bbox": [ + 317, + 232, + 532, + 422 + ], + "spans": [ + { + "bbox": [ + 317, + 232, + 532, + 422 + ], + "type": "image", + "image_path": "fd151daba5e023596b2a4639e0ae5b201339f012a5bbd5fbe7d0d4786d001c5f.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 425, + 547, + 545 + ], + "lines": [ + { + "bbox": [ + 304, + 425, + 547, + 545 + ], + "spans": [ + { + "bbox": [ + 304, + 425, + 547, + 545 + ], + "type": "text", + "content": "Figure 1. Performance comparison between teacher and student networks for a point-cloud based 3D object detection. The top example images are qualitatively compared between the results of teacher, student and our networks. Specifically, the first row images are an input sample with labels and the center heatmap head of the teacher network. The second row examples are responses of teacher, student, and ours for the yellow circle on the heatmap (or the blue dash circle on the input). The bottom image quantitatively shows the computational complexity and the corresponding accuracy of teacher, student and our networks, respectively. Best viewed in color." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 557, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 557, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 557, + 547, + 715 + ], + "type": "text", + "content": "Knowledge distillation (KD) is one of the parameter compression techniques, which can effectively train a compact student network through the guidance of a deep teacher network, as shown in the example images of Fig. 1. Starting with Hinton's work [9], many KD studies [10] [20] [28] [44] have transferred the discriminative teacher knowledge to the student network for classification tasks. From the viewpoint of the detection task, KD should be extended to the regression problem, including the object locations, which is not easy to straight-forwardly apply the classification-based KD methods to the detection task. To alleviate this problem, KD methods for object detection have been developed for mimicking the output of the backbone network [15] (e.g., region" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 693, + 287, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 693, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 46, + 693, + 287, + 712 + ], + "type": "text", + "content": "1Our code is available at https://github.com/hyeon-jo/interchange-transfer-KD." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "text", + "content": "13540" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "type": "text", + "content": "proposal network) or individual detection head [2] [32]. Nevertheless, these methods have only been studied for detecting 2D image-based objects, and there is a limit to applying them to sparse 3D point cloud-based data that have not object-specific color information but only 3D position-based object structure information." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 147, + 288, + 363 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 147, + 288, + 363 + ], + "spans": [ + { + "bbox": [ + 46, + 147, + 288, + 363 + ], + "type": "text", + "content": "Taking a closer look at differences between 2D and 3D data, there is a large gap in that 2D object detection usually predicts 2D object locations based on inherent color information with the corresponding appearances, but 3D object detection estimates 3D object boxes from inputs consisting of only 3D point clouds. Moreover, the number of the point clouds constituting objects varies depending on the distances and presence of occlusions [42]. Another challenge in 3D object detection for KD is that, compared to 2D object detection, 3D object detection methods [4] [6] [43] [21] have more detection head components such as 3D boxes, and orientations. These detection heads are highly correlated with each other and represent different 3D characteristics. In this respect, when transferring the detection heads of the teacher network to the student network using KD, it is required to guide the distilled knowledge under the consideration of the correlation among the multiple detection head components." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 367, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 367, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 367, + 288, + 713 + ], + "type": "text", + "content": "In this paper, we propose a novel interchange transfer-based KD (itKD) method designed for the lightweight point-cloud based 3D object detection. The proposed itKD comprises two modules: (1) a channel-wise autoencoder based on the interchange transfer of reconstructed knowledge and (2) a head relation-aware self-attention on multiple 3D detection heads. First of all, through a channel-wise compressing and decompressing processes for KD, the interchange transfer-based autoencoder effectively represents the map-view features from the viewpoint of 3D representation centric-knowledge. Specifically, the encoder provides an efficient representation by compressing the map-view feature in the channel direction to preserve the spatial positions of the objects and the learning of the student network could be regularized by the distilled position information of objects in the teacher network. For transferring the interchange knowledge to the opposite networks, the decoder of the student network reconstructs the map-view feature under the guidance of the teacher network while the reconstruction of the teacher network is guided by the map-view feature of the student network. As a result, the student network can effectively learn how to represent the 3D map-view feature of the teacher. Furthermore, to refine the teacher's object detection results as well as its representation, our proposed head relation-aware self-attention gives a chance to learn the pivotal information that should be taught to the student network for improving the 3D detection results by considering the inter-head relation among the multiple detection head and the intra-head relation of" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 305, + 72, + 426, + 83 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 72, + 426, + 83 + ], + "spans": [ + { + "bbox": [ + 305, + 72, + 426, + 83 + ], + "type": "text", + "content": "the individual detection head." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 304, + 84, + 545, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 84, + 545, + 191 + ], + "spans": [ + { + "bbox": [ + 304, + 84, + 545, + 191 + ], + "type": "text", + "content": "In this way, we implement a unified KD framework to successfully learn the 3D representation and 3D detection results of the teacher network for the lightweight 3D point cloud object detection. We also conduct extensive ablation studies for thoroughly validating our approach in Waymo and nuScenes datasets. The results reveal the outstanding potential of our approach for transferring distilled knowledge that can be utilized to improve the performance of 3D point cloud object detection models." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 317, + 193, + 503, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 193, + 503, + 204 + ], + "spans": [ + { + "bbox": [ + 317, + 193, + 503, + 204 + ], + "type": "text", + "content": "Our contributions are summarized as follows:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 317, + 209, + 545, + 399 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 317, + 209, + 545, + 279 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 209, + 545, + 279 + ], + "spans": [ + { + "bbox": [ + 317, + 209, + 545, + 279 + ], + "type": "text", + "content": "- For learning the 3D representation-centric knowledge from the teacher network, we propose the channelwise autoencoder regularized in the compressed domain and the interchange knowledge transfer method wherein the reconstructed features are guided by the opposite networks." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 317, + 280, + 545, + 339 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 280, + 545, + 339 + ], + "spans": [ + { + "bbox": [ + 317, + 280, + 545, + 339 + ], + "type": "text", + "content": "- For detection head-centric knowledge of the teacher, we suggest the head relation-aware self-attention which can efficiently distill the detection properties under the consideration of the inter-head relation and intra-head relation of the multiple 3D detection heads." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 317, + 340, + 545, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 340, + 545, + 399 + ], + "spans": [ + { + "bbox": [ + 317, + 340, + 545, + 399 + ], + "type": "text", + "content": "- Our work is the best attempt to reduce the parameters of point cloud-based 3D object detection using KD. Additionally, we validate its superiority using two large datasets that reflect real-world driving conditions, e.g., Waymo and NuScenes." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 411, + 397, + 423 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 411, + 397, + 423 + ], + "spans": [ + { + "bbox": [ + 306, + 411, + 397, + 423 + ], + "type": "text", + "content": "2. Related Works" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 431, + 526, + 444 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 431, + 526, + 444 + ], + "spans": [ + { + "bbox": [ + 306, + 431, + 526, + 444 + ], + "type": "text", + "content": "2.1. 3D Object Detection based on Point Cloud" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 450, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 450, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 450, + 545, + 713 + ], + "type": "text", + "content": "During the last few years, encouraged by the success of CNNs, the development of object detectors using CNNs is developing rapidly. Recently, many 3D object detectors have been studied and they can be briefly categorized by how they extract representations from point clouds; e.g., grid-based [35] [36] [49] [13] [43], point-based [18] [23] [17] [25] [39] and hybrid-based [3] [40] [8] [48] [22] methods. In detail, Vote3Deep [5] thoroughly exploited feature-centric voting to build CNNs for detecting objects in point clouds. In [29], they have studied on the task of amodal 3D object detection in RGB-D images, where a 3D region proposal network (RPN) to learn objectness from geometric shapes and the joint object recognition network to extract geometric features in 3D and color features in 2D. The 3D fully convolutional network [14] was straightforwardly applied to point cloud data for vehicle detection. In the early days, VoxelNet [49] has designed an end-to-end trainable detector based on learning-based voxelization using fully connected layers. In [35], they encoded the point cloud by VoxelNet and used the sparse convolution to achieve the fast detection. HVNet [41] fused the multi-scale voxel feature encoder at the point-wise level and projected into multi-" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "13541" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 288, + 155 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 288, + 155 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 288, + 155 + ], + "type": "text", + "content": "ple pseudo-image feature maps for solving the various sizes of the feature map. In [26], they replaced the point cloud with a grid-based bird's-eye view (BEV) RGB-map and utilized YOLOv2 to detect the 3D objects. FIXOR [36] converted the point cloud to a 3D BEV map and carried out the real-time 3D object detection with an RPN-free single-stage based model." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 156, + 288, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 156, + 288, + 300 + ], + "spans": [ + { + "bbox": [ + 46, + 156, + 288, + 300 + ], + "type": "text", + "content": "Recently, PointPillars (PP)-based method [13] utilized the PointNet [19] to learn the representation of point clouds organized in vertical columns for achieving the fast 3D object detection. To boost both performance and speed over PP, a pillar-based method [33] that incorporated a cylindrical projection into multi-view feature learning was proposed. More recently, CenterPoint [43] was introduced as an anchor-free detector that predicted the center of an object using a PP or VoxelNet-based feature encoder. In this paper, we employ the backbone architecture using CenterPoint because it is simple, near real-time, and achieves good performance in the wild situation." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 310, + 177, + 323 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 310, + 177, + 323 + ], + "spans": [ + { + "bbox": [ + 47, + 310, + 177, + 323 + ], + "type": "text", + "content": "2.2. Knowledge Distillation" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 328, + 287, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 328, + 287, + 544 + ], + "spans": [ + { + "bbox": [ + 46, + 328, + 287, + 544 + ], + "type": "text", + "content": "KD is one of the methods used for compressing deep neural networks and its fundamental key is to imitate the knowledge extracted from the teacher network, which has heavy parameters as well as good accuracy. Hinton et al. [9] performed a knowledge transfer using KL divergence; FitNet [20] proposed a method for teaching student networks by imitating intermediate layers. On the other hand, TAKD [16] and DGKD [28] used multiple teacher networks for transferring more knowledge to the student network in spite of large parameter gaps. Recently, some studies have been proposed using the layers shared between the teacher and the student networks for KD. Specifically, in [37], KD was performed through softmax regression as the student and teacher networks shared the same classifier. IEKD [10] proposed a method to split the student network into inheritance and exploration parts and mimic the compact teacher knowledge through a shared latent feature space via an autoencoder." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 544, + 287, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 544, + 287, + 675 + ], + "spans": [ + { + "bbox": [ + 46, + 544, + 287, + 675 + ], + "type": "text", + "content": "Beyond its use in classification, KD for detection should transfer the regression knowledge regarding the positions of the objects to the student network. For this purpose, a KD for 2D object detection [15] was first proposed using feature map mimic learning. In [2], they transferred the detection knowledge of the teacher network using hint learning for an RPN, weighted cross-entropy loss for classification, and bound regression loss for regression. Recently, Wang et al. [32] proposed a KD framework for detection by utilizing the cross-location discrepancy of feature responses through fine-grained feature imitation." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "type": "text", + "content": "As far as we know, there are few KD studies [7] [47] [34] [38] on point cloud-based 3D object detection so far. However, looking at similar studies on 3D knowledge trans" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 72, + 545, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 251 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 251 + ], + "type": "text", + "content": "fer, SE-SSD [47] presented a knowledge distillation-based self-ensembling method for exploiting soft and hard targets with constraints to jointly optimize the model without extra computational cost during inference time. Object-DGCNN [34] proposed a NMS-free 3D object detection via dynamic graphs and a set-to-set distillation. They used the set-to-set distillation method for improving the performance without the consideration of the model compression. Another latest study is SparseKD [38] which suggested a label KD method that distills a few pivotal positions determined by teacher classification response to enhance the logit KD method. On the other hand, in this paper, we are more interest in how to make the student network lighter, or lower computational complexity, by using the KD for 3D object detection." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 263, + 388, + 277 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 263, + 388, + 277 + ], + "spans": [ + { + "bbox": [ + 306, + 263, + 388, + 277 + ], + "type": "text", + "content": "3. Methodology" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 283, + 386, + 295 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 283, + 386, + 295 + ], + "spans": [ + { + "bbox": [ + 306, + 283, + 386, + 295 + ], + "type": "text", + "content": "3.1. Background" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 301, + 545, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 301, + 545, + 434 + ], + "spans": [ + { + "bbox": [ + 304, + 301, + 545, + 434 + ], + "type": "text", + "content": "The 3D point cloud object detection methods [13] [49] generally consists of three components; a point cloud encoder, a backbone network, and detection heads. In this paper, we employ CenterPoint [43] network as a backbone architecture. Since the parameter size of the backbone network is the largest among components of the 3D object detector, we aim to construct the student network by reducing the channel sizes of the backbone network for efficient network. We design our method to teach the student 3D representation-centric knowledge and detection head-centric knowledge of the teacher network, respectively." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 441, + 427, + 453 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 441, + 427, + 453 + ], + "spans": [ + { + "bbox": [ + 306, + 441, + 427, + 453 + ], + "type": "text", + "content": "3.2. Interchange Transfer" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 460, + 545, + 627 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 460, + 545, + 627 + ], + "spans": [ + { + "bbox": [ + 304, + 460, + 545, + 627 + ], + "type": "text", + "content": "We adopt an autoencoder framework to effectively transfer the meaningful distilled knowledge regarding 3D detection from the teacher to the student network. The traditional encoder-based KD methods [10] [11] have been limited to the classification task, which transfers only compressed categorical knowledge to the student network. However, from the viewpoint of the detection task, the main KD goal of this paper is transferring the distilled knowledge regarding not only categorical features but also object location-related features. Particularly, unlike 2D detectors, 3D object detectors should regress more location information such as object orientations, 3D box sizes, etc., and it results in increasing the importance of how to transfer the 3D location features to the student network successfully." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 628, + 545, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 628, + 545, + 676 + ], + "spans": [ + { + "bbox": [ + 304, + 628, + 545, + 676 + ], + "type": "text", + "content": "For this purpose, we transfer the backbone knowledge that contains 3D object representation from the teacher network to the student through the compressed and reconstructed knowledge domains. As shown in Fig. 2, we in" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 306, + 683, + 545, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 683, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 683, + 545, + 713 + ], + "type": "text", + "content": "2The total parameter size of the 3D detector is about " + }, + { + "bbox": [ + 306, + 683, + 545, + 713 + ], + "type": "inline_equation", + "content": "5.2\\mathrm{M}" + }, + { + "bbox": [ + 306, + 683, + 545, + 713 + ], + "type": "text", + "content": " and the backbone size is approximately " + }, + { + "bbox": [ + 306, + 683, + 545, + 713 + ], + "type": "inline_equation", + "content": "4.8\\mathrm{M}" + }, + { + "bbox": [ + 306, + 683, + 545, + 713 + ], + "type": "text", + "content": ", which is " + }, + { + "bbox": [ + 306, + 683, + 545, + 713 + ], + "type": "inline_equation", + "content": "92\\%" + }, + { + "bbox": [ + 306, + 683, + 545, + 713 + ], + "type": "text", + "content": ". Further details are found in the supplementary material." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "13542" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 98, + 72, + 497, + 302 + ], + "blocks": [ + { + "bbox": [ + 98, + 72, + 497, + 302 + ], + "lines": [ + { + "bbox": [ + 98, + 72, + 497, + 302 + ], + "spans": [ + { + "bbox": [ + 98, + 72, + 497, + 302 + ], + "type": "image", + "image_path": "6d270483b17383a09ed06897d8dd184d913465b5861a6c55e98422a7b11ef053.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 305, + 547, + 373 + ], + "lines": [ + { + "bbox": [ + 46, + 305, + 547, + 373 + ], + "spans": [ + { + "bbox": [ + 46, + 305, + 547, + 373 + ], + "type": "text", + "content": "Figure 2. Overview of the proposed knowledge distillation method. The teacher and student networks take the same point clouds as inputs. Then, the map-view features " + }, + { + "bbox": [ + 46, + 305, + 547, + 373 + ], + "type": "inline_equation", + "content": "M^t" + }, + { + "bbox": [ + 46, + 305, + 547, + 373 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 305, + 547, + 373 + ], + "type": "inline_equation", + "content": "M^s" + }, + { + "bbox": [ + 46, + 305, + 547, + 373 + ], + "type": "text", + "content": " are extracted from the teacher and student networks, respectively. The channel-wise autoencoder transfers the knowledge obtained from " + }, + { + "bbox": [ + 46, + 305, + 547, + 373 + ], + "type": "inline_equation", + "content": "M^t" + }, + { + "bbox": [ + 46, + 305, + 547, + 373 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 46, + 305, + 547, + 373 + ], + "type": "inline_equation", + "content": "M^s" + }, + { + "bbox": [ + 46, + 305, + 547, + 373 + ], + "type": "text", + "content": " by using the compressed representation loss and interchange transfer loss consecutively. The head relation-aware self-attention provides the relation-aware knowledge of multiple detection head to the student network using the attention head loss. The dotted lines of the modules denote that there are shared network parameters between the teacher and student networks. The light-yellow boxes are buffer layers for sampling the features to match the channel sizes of networks." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 381, + 289, + 537 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 381, + 289, + 537 + ], + "spans": [ + { + "bbox": [ + 46, + 381, + 289, + 537 + ], + "type": "text", + "content": "produce a channel-wise autoencoder which consists of an encoder in which the channel dimension of the autoencoder is gradually decreased and a decoder in the form of increasing the channel dimension. Note that spatial features play a pivotal role in the detection task and we try to preserve the spatial information by encoding features in the channel direction. We propose a compressed representation loss to coarsely guide location information of the objects to the student network in Fig. 2, and the compressed representation loss has an effect similar to the regularization of the autoencoder that binds the coordinates of the objectness between the teacher and student networks. The compressed representation loss function " + }, + { + "bbox": [ + 46, + 381, + 289, + 537 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{cr}" + }, + { + "bbox": [ + 46, + 381, + 289, + 537 + ], + "type": "text", + "content": " is represented as follows:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 75, + 544, + 287, + 574 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 544, + 287, + 574 + ], + "spans": [ + { + "bbox": [ + 75, + 544, + 287, + 574 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} _ {c r} = m _ {o b j} \\circ \\mathcal {S} \\left[ E \\left(\\theta_ {e n c}, M ^ {t}\\right), E \\left(\\theta_ {e n c}, M ^ {s}\\right) \\right] \\tag {1} \\\\ = m _ {o b j} \\circ \\mathcal {S} \\left[ M _ {e n c} ^ {t}, M _ {e n c} ^ {s} \\right], \\\\ \\end{array}", + "image_path": "811f617b4f9eff51554e7b35d417b79cf55135a4046329970e77309ddf3437f7.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 582, + 287, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 582, + 287, + 653 + ], + "spans": [ + { + "bbox": [ + 46, + 582, + 287, + 653 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 582, + 287, + 653 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 46, + 582, + 287, + 653 + ], + "type": "text", + "content": " is a shared encoder, which has the parameters " + }, + { + "bbox": [ + 46, + 582, + 287, + 653 + ], + "type": "inline_equation", + "content": "\\theta_{enc}" + }, + { + "bbox": [ + 46, + 582, + 287, + 653 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 46, + 582, + 287, + 653 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 46, + 582, + 287, + 653 + ], + "type": "text", + "content": " denotes " + }, + { + "bbox": [ + 46, + 582, + 287, + 653 + ], + "type": "inline_equation", + "content": "l_1" + }, + { + "bbox": [ + 46, + 582, + 287, + 653 + ], + "type": "text", + "content": " loss as a similarity measure. " + }, + { + "bbox": [ + 46, + 582, + 287, + 653 + ], + "type": "inline_equation", + "content": "M^t" + }, + { + "bbox": [ + 46, + 582, + 287, + 653 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 582, + 287, + 653 + ], + "type": "inline_equation", + "content": "M^s" + }, + { + "bbox": [ + 46, + 582, + 287, + 653 + ], + "type": "text", + "content": " are outputs of the teacher and student backbones, respectively. " + }, + { + "bbox": [ + 46, + 582, + 287, + 653 + ], + "type": "inline_equation", + "content": "m_{obj}" + }, + { + "bbox": [ + 46, + 582, + 287, + 653 + ], + "type": "text", + "content": " represents a binary mask to indicate object locations in backbone output like [38] and " + }, + { + "bbox": [ + 46, + 582, + 287, + 653 + ], + "type": "inline_equation", + "content": "\\circ" + }, + { + "bbox": [ + 46, + 582, + 287, + 653 + ], + "type": "text", + "content": " is an element-wise product." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 654, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 654, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 654, + 288, + 714 + ], + "type": "text", + "content": "After performing the coarse representation-based knowledge distillation in a compressed domain, the fine representation features of the teacher network are required to teach the student network from the viewpoint of 3D object detection. In this respect, the decoder reconstructs the fine map" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 381, + 547, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 381, + 547, + 525 + ], + "spans": [ + { + "bbox": [ + 304, + 381, + 547, + 525 + ], + "type": "text", + "content": "view features in the channel direction from the compressed features. Through the proposed interchange transfer loss, the reconstructed features are guided from the opposite networks, not their own stem networks, as shown in Fig. 2. Specifically, since the teacher network is frozen and we use the shared autoencoder for both student and teacher networks, we can teach the reconstructed fine features from the student network to resemble the output of the teacher network " + }, + { + "bbox": [ + 304, + 381, + 547, + 525 + ], + "type": "inline_equation", + "content": "M^t" + }, + { + "bbox": [ + 304, + 381, + 547, + 525 + ], + "type": "text", + "content": " rather than the student " + }, + { + "bbox": [ + 304, + 381, + 547, + 525 + ], + "type": "inline_equation", + "content": "M^s" + }, + { + "bbox": [ + 304, + 381, + 547, + 525 + ], + "type": "text", + "content": ". Moreover, the reconstructed fine features from the teacher network can guide the student's output, " + }, + { + "bbox": [ + 304, + 381, + 547, + 525 + ], + "type": "inline_equation", + "content": "M^s" + }, + { + "bbox": [ + 304, + 381, + 547, + 525 + ], + "type": "text", + "content": " at the same time. The proposed interchange transfer loss " + }, + { + "bbox": [ + 304, + 381, + 547, + 525 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{it}" + }, + { + "bbox": [ + 304, + 381, + 547, + 525 + ], + "type": "text", + "content": " is defined as follows:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 359, + 534, + 545, + 548 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 359, + 534, + 545, + 548 + ], + "spans": [ + { + "bbox": [ + 359, + 534, + 545, + 548 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {t \\rightarrow s} = \\mathcal {S} \\left[ M ^ {s}, D \\left(\\theta_ {\\text {d e c}}, M _ {\\text {e n c}} ^ {t}\\right)\\right], \\tag {2}", + "image_path": "3570c42953c711f5331177809868bf5034082210a3508c5bce996f8ee7254499.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 359, + 555, + 545, + 569 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 359, + 555, + 545, + 569 + ], + "spans": [ + { + "bbox": [ + 359, + 555, + 545, + 569 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {s \\rightarrow t} = \\mathcal {S} \\left[ M ^ {t}, D \\left(\\theta_ {\\text {d e c}}, M _ {\\text {e n c}} ^ {s}\\right)\\right], \\tag {3}", + "image_path": "54624ee39d43268217f53bbf266f1012633a7c005ed3dfe4e42ddb7c106c9abb.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 382, + 572, + 545, + 585 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 382, + 572, + 545, + 585 + ], + "spans": [ + { + "bbox": [ + 382, + 572, + 545, + 585 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {i t} = \\mathcal {L} _ {s \\rightarrow t} + \\mathcal {L} _ {t \\rightarrow s}, \\tag {4}", + "image_path": "fcc223dc34749487d527c751000458b82b9ea2bfba9ae1034358047deb4ea6e8.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 591, + 547, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 591, + 547, + 662 + ], + "spans": [ + { + "bbox": [ + 304, + 591, + 547, + 662 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 591, + 547, + 662 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 304, + 591, + 547, + 662 + ], + "type": "text", + "content": " is the decoder that contains the network parameter " + }, + { + "bbox": [ + 304, + 591, + 547, + 662 + ], + "type": "inline_equation", + "content": "\\theta_{dec}" + }, + { + "bbox": [ + 304, + 591, + 547, + 662 + ], + "type": "text", + "content": ", which is a shared parameter. We hereby present the representation-based KD for 3D object detection in both compressed and decompressed domains to guide the student network to learn the map-view feature of the teacher network efficiently." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 670, + 499, + 682 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 670, + 499, + 682 + ], + "spans": [ + { + "bbox": [ + 306, + 670, + 499, + 682 + ], + "type": "text", + "content": "3.3. Head Relation-Aware Self-Attention" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 689, + 546, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 689, + 546, + 714 + ], + "spans": [ + { + "bbox": [ + 305, + 689, + 546, + 714 + ], + "type": "text", + "content": "Fundamentally, our backbone network, e.g., Center-Point [43], has various types of 3D object characteristics" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "13543" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 100, + 72, + 496, + 228 + ], + "blocks": [ + { + "bbox": [ + 100, + 72, + 496, + 228 + ], + "lines": [ + { + "bbox": [ + 100, + 72, + 496, + 228 + ], + "spans": [ + { + "bbox": [ + 100, + 72, + 496, + 228 + ], + "type": "image", + "image_path": "535309497c413003f5efdc77034d7ee76e0a19cbd4470c32fbff243586d83e47.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 230, + 547, + 288 + ], + "lines": [ + { + "bbox": [ + 45, + 230, + 547, + 288 + ], + "spans": [ + { + "bbox": [ + 45, + 230, + 547, + 288 + ], + "type": "text", + "content": "Figure 3. Head Relation-Aware Self-Attention. We make the object center-head feature from object center locations in the detection head feature and use it as different shaped inputs to self-attention for inter-head relation and intra-head relation. In the self-attention for inter-head relation, we use the object center-head feature as an input for the self-attention. In the self-attention for intra-head relation, the detection heads are separately used for the independent self-attention functions. The outputs of the self-attention are concatenated by " + }, + { + "bbox": [ + 45, + 230, + 547, + 288 + ], + "type": "inline_equation", + "content": "\\mathbb{C}" + }, + { + "bbox": [ + 45, + 230, + 547, + 288 + ], + "type": "text", + "content": " operations and the head relation-aware self-attention is generated through the fusion layer." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 295, + 289, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 295, + 289, + 413 + ], + "spans": [ + { + "bbox": [ + 46, + 295, + 289, + 413 + ], + "type": "text", + "content": "on detection heads. Specifically, the locations, size, and direction of an object are different properties, but they are inevitably correlated to each other because they come from the same object. However, the traditional KD methods [2] [34] were only concerned with how the student network straight-forwardly mimicked the outputs of the teacher network without considering the relation among the detection heads. To overcome this problem, we make use of the relation of detection heads as a major factor for the detection head-centric KD." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 415, + 289, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 415, + 289, + 608 + ], + "spans": [ + { + "bbox": [ + 46, + 415, + 289, + 608 + ], + "type": "text", + "content": "Our proposed head relation-aware self-attention is directly inspired by the multi-head self-attention [31] in order to learn the relation between the multiple detection head. As shown in Fig. 3, we first extract " + }, + { + "bbox": [ + 46, + 415, + 289, + 608 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 46, + 415, + 289, + 608 + ], + "type": "text", + "content": "-th instance feature " + }, + { + "bbox": [ + 46, + 415, + 289, + 608 + ], + "type": "inline_equation", + "content": "v^{i} \\in \\mathbb{R}^{c}" + }, + { + "bbox": [ + 46, + 415, + 289, + 608 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 46, + 415, + 289, + 608 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 46, + 415, + 289, + 608 + ], + "type": "text", + "content": " is the channel size, from the center location of the object in the detection head feature. Note that, since the instance feature is extracted from the multiple detection head, it has several object properties such as a class-specific heatmap " + }, + { + "bbox": [ + 46, + 415, + 289, + 608 + ], + "type": "inline_equation", + "content": "v_{hm}^{i}" + }, + { + "bbox": [ + 46, + 415, + 289, + 608 + ], + "type": "text", + "content": ", a sub-voxel location refinement " + }, + { + "bbox": [ + 46, + 415, + 289, + 608 + ], + "type": "inline_equation", + "content": "v_{o}^{i}" + }, + { + "bbox": [ + 46, + 415, + 289, + 608 + ], + "type": "text", + "content": ", a height-above-ground " + }, + { + "bbox": [ + 46, + 415, + 289, + 608 + ], + "type": "inline_equation", + "content": "v_{h}^{i}" + }, + { + "bbox": [ + 46, + 415, + 289, + 608 + ], + "type": "text", + "content": ", a 3D size " + }, + { + "bbox": [ + 46, + 415, + 289, + 608 + ], + "type": "inline_equation", + "content": "v_{s}^{i}" + }, + { + "bbox": [ + 46, + 415, + 289, + 608 + ], + "type": "text", + "content": ", and a yaw rotation angle " + }, + { + "bbox": [ + 46, + 415, + 289, + 608 + ], + "type": "inline_equation", + "content": "v_{r}^{i}" + }, + { + "bbox": [ + 46, + 415, + 289, + 608 + ], + "type": "text", + "content": ". When there are a total of " + }, + { + "bbox": [ + 46, + 415, + 289, + 608 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 46, + 415, + 289, + 608 + ], + "type": "text", + "content": " objects, we combine them to make an object center-head feature " + }, + { + "bbox": [ + 46, + 415, + 289, + 608 + ], + "type": "inline_equation", + "content": "v \\in \\mathbb{R}^{n \\times c}" + }, + { + "bbox": [ + 46, + 415, + 289, + 608 + ], + "type": "text", + "content": ". We use the same object center-head feature " + }, + { + "bbox": [ + 46, + 415, + 289, + 608 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 46, + 415, + 289, + 608 + ], + "type": "text", + "content": " of dimension " + }, + { + "bbox": [ + 46, + 415, + 289, + 608 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 46, + 415, + 289, + 608 + ], + "type": "text", + "content": " for query, key, and value, which are an input of the scaled dot-product attention. The self-attention function " + }, + { + "bbox": [ + 46, + 415, + 289, + 608 + ], + "type": "inline_equation", + "content": "\\mathcal{F}" + }, + { + "bbox": [ + 46, + 415, + 289, + 608 + ], + "type": "text", + "content": " is computed by" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 616, + 287, + 644 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 616, + 287, + 644 + ], + "spans": [ + { + "bbox": [ + 104, + 616, + 287, + 644 + ], + "type": "interline_equation", + "content": "\\mathcal {F} (v) = \\operatorname {s o f t m a x} \\left(\\frac {v ^ {\\top} \\cdot v}{\\sqrt {n}}\\right) \\cdot v. \\tag {5}", + "image_path": "aebb37d31e72f1879a0d721c1a9fe1ed5b8507acbe042ad970006deca67a8b43.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 654, + 289, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 654, + 289, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 654, + 289, + 713 + ], + "type": "text", + "content": "The proposed head relation-aware self-attention consists of two different self-attention for inter-head and intra-head relations as illustrated in Fig. 3. We propose the self-attention based on the inter-head relation of the instance features, which is made in order to consider the relation" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 295, + 545, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 295, + 545, + 342 + ], + "spans": [ + { + "bbox": [ + 304, + 295, + 545, + 342 + ], + "type": "text", + "content": "between all detected objects and their different properties, rather than a single detected instance, from the global viewpoint. The self-attention for inter-head relation is computed by" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 349, + 342, + 545, + 357 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 349, + 342, + 545, + 357 + ], + "spans": [ + { + "bbox": [ + 349, + 342, + 545, + 357 + ], + "type": "interline_equation", + "content": "\\mathcal {F} _ {\\text {i n t e r}} (v) = \\mathcal {F} ([ v _ {h m}, v _ {o}, v _ {h}, v _ {s}, v _ {r} ]). \\tag {6}", + "image_path": "edbb4061bd5fa66c61cf4ad53b4d764cc80b1b3064aa163429b8794cc155d75f.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 359, + 545, + 419 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 359, + 545, + 419 + ], + "spans": [ + { + "bbox": [ + 304, + 359, + 545, + 419 + ], + "type": "text", + "content": "On the other hand, we suggest the self-attention for intrahead relation using the individual detection heads. Here we perform the attentions using only local relation in individual detection heads designed for different properties (e.g., orientation, size, etc.) and concatenate them. Its equation is" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 311, + 437, + 545, + 451 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 437, + 545, + 451 + ], + "spans": [ + { + "bbox": [ + 311, + 437, + 545, + 451 + ], + "type": "interline_equation", + "content": "\\mathcal {F} _ {\\text {i n t r a}} (v) = \\left[ \\mathcal {F} \\left(v _ {h m}\\right), \\mathcal {F} \\left(v _ {o}\\right), \\mathcal {F} \\left(v _ {h}\\right), \\mathcal {F} \\left(v _ {s}\\right), \\mathcal {F} \\left(v _ {r}\\right) \\right]. \\tag {7}", + "image_path": "cf02660824080b575f41de6e0e0475c2f044f0f3ddbac134ed8ae74ab34ca8ea.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 456, + 545, + 516 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 456, + 545, + 516 + ], + "spans": [ + { + "bbox": [ + 304, + 456, + 545, + 516 + ], + "type": "text", + "content": "We concatenate the outputs of the self-attention and apply the fusion layer to calculate a final attention score that considers the relation between the detection heads and objects. The head relation-aware self-attention equation " + }, + { + "bbox": [ + 304, + 456, + 545, + 516 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{RA}" + }, + { + "bbox": [ + 304, + 456, + 545, + 516 + ], + "type": "text", + "content": " is derived by:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 348, + 522, + 545, + 536 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 348, + 522, + 545, + 536 + ], + "spans": [ + { + "bbox": [ + 348, + 522, + 545, + 536 + ], + "type": "interline_equation", + "content": "\\mathcal {F} _ {R A} (v) = \\mathcal {G} \\left(\\left[ \\mathcal {F} _ {\\text {i n t e r}} (v), \\mathcal {F} _ {\\text {i n t r a}} (v) \\right]\\right), \\tag {8}", + "image_path": "021ca4903d8388d329aff685d32e3bd725c00babc6b13ed609552de68bd9c4a6.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 542, + 545, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 542, + 545, + 590 + ], + "spans": [ + { + "bbox": [ + 304, + 542, + 545, + 590 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 542, + 545, + 590 + ], + "type": "inline_equation", + "content": "\\mathcal{G}" + }, + { + "bbox": [ + 304, + 542, + 545, + 590 + ], + "type": "text", + "content": " is the fusion layer, e.g., " + }, + { + "bbox": [ + 304, + 542, + 545, + 590 + ], + "type": "inline_equation", + "content": "1 \\times 1" + }, + { + "bbox": [ + 304, + 542, + 545, + 590 + ], + "type": "text", + "content": " convolution layer. The student network indirectly takes the teacher's knowledge by learning the relation between the multiple detection head of the teacher network through head attention loss as follows:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 359, + 597, + 545, + 610 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 359, + 597, + 545, + 610 + ], + "spans": [ + { + "bbox": [ + 359, + 597, + 545, + 610 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {a t t n}} = \\mathcal {S} \\left(\\mathcal {F} _ {R A} \\left(v _ {t}\\right), \\mathcal {F} _ {R A} \\left(v _ {s}\\right)\\right), \\tag {9}", + "image_path": "33f57385c48e72da2dea543d68f721cd377b88ebc1bfada900e500e9082bad38.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 616, + 545, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 616, + 545, + 639 + ], + "spans": [ + { + "bbox": [ + 304, + 616, + 545, + 639 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 616, + 545, + 639 + ], + "type": "inline_equation", + "content": "v_{t}" + }, + { + "bbox": [ + 304, + 616, + 545, + 639 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 616, + 545, + 639 + ], + "type": "inline_equation", + "content": "v_{s}" + }, + { + "bbox": [ + 304, + 616, + 545, + 639 + ], + "type": "text", + "content": " are the object center-head features of the teacher and the student, respectively." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 317, + 640, + 494, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 640, + 494, + 652 + ], + "spans": [ + { + "bbox": [ + 317, + 640, + 494, + 652 + ], + "type": "text", + "content": "Consequently, the overall loss is derived by" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 340, + 658, + 545, + 672 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 340, + 658, + 545, + 672 + ], + "spans": [ + { + "bbox": [ + 340, + 658, + 545, + 672 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {t o t a l}} = \\alpha \\mathcal {L} _ {\\text {s u p}} + \\beta \\left(\\mathcal {L} _ {\\text {i t}} + \\mathcal {L} _ {\\text {c r}} + \\mathcal {L} _ {\\text {a t t n}}\\right), \\tag {10}", + "image_path": "18a871364c2b59db6917dd19ab629cfedfef31757d4f66a51aa6730bcdb52297.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{sup}" + }, + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "text", + "content": " is the supervised loss that consists of focal loss and regression loss, and " + }, + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "text", + "content": " are the balancing parameters, which we set as 1 for simplicity." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "13544" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 71, + 256, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 71, + 256, + 85 + ], + "spans": [ + { + "bbox": [ + 47, + 71, + 256, + 85 + ], + "type": "text", + "content": "4. Experimental Results and Discussions" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 91, + 171, + 105 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 91, + 171, + 105 + ], + "spans": [ + { + "bbox": [ + 47, + 91, + 171, + 105 + ], + "type": "text", + "content": "4.1. Environment Settings" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 110, + 287, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 110, + 287, + 266 + ], + "spans": [ + { + "bbox": [ + 46, + 110, + 287, + 266 + ], + "type": "text", + "content": "Waymo Waymo open dataset [30] is one of the large-scale datasets for autonomous driving, which is captured by the synchronized and calibrated high-quality LiDAR and camera across a range of urban and suburban geographies. This dataset provides 798 training scenes and 202 validation scenes obtained by detecting all the objects within a " + }, + { + "bbox": [ + 46, + 110, + 287, + 266 + ], + "type": "inline_equation", + "content": "75\\mathrm{m}" + }, + { + "bbox": [ + 46, + 110, + 287, + 266 + ], + "type": "text", + "content": " radius; it has a total of 3 object categories (e.g., vehicle, pedestrian, and cyclist) which have 6.1M, 2.8M, and 67K sets, respectively. The mean Average Precision (mAP) and mAP weighted by heading accuracy (mAPH) are the official metrics for Waymo evaluation. mAPH is a metric that gives more weight to the heading than it does to the sizes, and it accounts for the direction of the object." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 266, + 287, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 266, + 287, + 386 + ], + "spans": [ + { + "bbox": [ + 46, + 266, + 287, + 386 + ], + "type": "text", + "content": "nuScenes nuScenes dataset [1] is another large-scale dataset used for autonomous driving. This dataset contains 1,000 driving sequences. 700, 150, and 150 sequences are used for training, validation, and testing, respectively. Each sequence is captured approximately 20 seconds with 20 FPS using the 32-lane LiDAR. Its evaluation metrics are the average precision (AP) and nuScenes detection score (NDS). NDS is a weighted average of mAP and true positive metrics which measures the quality of the detections in terms of box location, size, orientation, attributes, and velocity." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 387, + 288, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 387, + 288, + 649 + ], + "spans": [ + { + "bbox": [ + 46, + 387, + 288, + 649 + ], + "type": "text", + "content": "Implementation details Following the pillar-based CenterPoint [43] as the teacher network, we use an Adam optimizer [12] with a weight decay of 0.01 and a cosine annealing strategy [27] to adjust the learning rate. We set 0.0003 for initial learning rate, 0.003 for max learning rate, and 0.95 for momentum. The networks have been trained for 36 epochs on " + }, + { + "bbox": [ + 46, + 387, + 288, + 649 + ], + "type": "inline_equation", + "content": "8 \\times \\mathrm{V}100" + }, + { + "bbox": [ + 46, + 387, + 288, + 649 + ], + "type": "text", + "content": " GPUs with a batch size of 32. For Waymo dataset, we set the detection range to " + }, + { + "bbox": [ + 46, + 387, + 288, + 649 + ], + "type": "inline_equation", + "content": "[-74.88\\mathrm{m}, 74.88\\mathrm{m}]" + }, + { + "bbox": [ + 46, + 387, + 288, + 649 + ], + "type": "text", + "content": " for the X and Y axes, " + }, + { + "bbox": [ + 46, + 387, + 288, + 649 + ], + "type": "inline_equation", + "content": "[-2\\mathrm{m}, 4\\mathrm{m}]" + }, + { + "bbox": [ + 46, + 387, + 288, + 649 + ], + "type": "text", + "content": " for the Z-axis, and a grid size of " + }, + { + "bbox": [ + 46, + 387, + 288, + 649 + ], + "type": "inline_equation", + "content": "(0.32\\mathrm{m}, 0.32\\mathrm{m})" + }, + { + "bbox": [ + 46, + 387, + 288, + 649 + ], + "type": "text", + "content": ". In experiments on nuScenes dataset, we used a " + }, + { + "bbox": [ + 46, + 387, + 288, + 649 + ], + "type": "inline_equation", + "content": "(0.2\\mathrm{m}, 0.2\\mathrm{m})" + }, + { + "bbox": [ + 46, + 387, + 288, + 649 + ], + "type": "text", + "content": " grid and set the detection range to " + }, + { + "bbox": [ + 46, + 387, + 288, + 649 + ], + "type": "inline_equation", + "content": "[-51.2\\mathrm{m}, 51.2\\mathrm{m}]" + }, + { + "bbox": [ + 46, + 387, + 288, + 649 + ], + "type": "text", + "content": " for the X and Y-axes, " + }, + { + "bbox": [ + 46, + 387, + 288, + 649 + ], + "type": "inline_equation", + "content": "[-5\\mathrm{m}, 3\\mathrm{m}]" + }, + { + "bbox": [ + 46, + 387, + 288, + 649 + ], + "type": "text", + "content": " for the Z-axis, and a grid size of " + }, + { + "bbox": [ + 46, + 387, + 288, + 649 + ], + "type": "inline_equation", + "content": "(0.2\\mathrm{m}, 0.2\\mathrm{m})" + }, + { + "bbox": [ + 46, + 387, + 288, + 649 + ], + "type": "text", + "content": ". Compared to the teacher network, the student network has " + }, + { + "bbox": [ + 46, + 387, + 288, + 649 + ], + "type": "inline_equation", + "content": "1/4" + }, + { + "bbox": [ + 46, + 387, + 288, + 649 + ], + "type": "text", + "content": " less channel capacity of backbone network. Our channel-wise autoencoder consists of three " + }, + { + "bbox": [ + 46, + 387, + 288, + 649 + ], + "type": "inline_equation", + "content": "1 \\times 1" + }, + { + "bbox": [ + 46, + 387, + 288, + 649 + ], + "type": "text", + "content": " convolution layers as the encoder and three " + }, + { + "bbox": [ + 46, + 387, + 288, + 649 + ], + "type": "inline_equation", + "content": "1 \\times 1" + }, + { + "bbox": [ + 46, + 387, + 288, + 649 + ], + "type": "text", + "content": " convolution layers as the decoder and the number of filters are 128, 64, 32 in encoder layers and 64, 128, 384 in decoder layers. The student's input buffer layer increases the channel size of 196 to 384 and the teacher's output buffer layer decreases the channel size 384 to 196." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 658, + 249, + 672 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 658, + 249, + 672 + ], + "spans": [ + { + "bbox": [ + 47, + 658, + 249, + 672 + ], + "type": "text", + "content": "4.2. Overall KD Performance Comparison" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "type": "text", + "content": "We validate the performance of our method compared with well-known KD methods on the Waymo and nuScenes datasets. We re-implement the seven KD methods from 2D" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "text", + "content": "classification-based KD to 3D detection-based KD in this paper. We set the baseline by applying the Kullback-Leibler (KL) divergence loss [9] to the center heatmap head and " + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "inline_equation", + "content": "l_{1}" + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "text", + "content": " loss to the other regression heads. FitNet [20] is a method that mimics the intermediate outputs of layers and we apply it to the output of the backbone for simplicity. We also simply extend EOD-KD [2], one of the 2D object detection KDs, to 3D object detection. We apply TOFD [45], a 3D classification-based KD, to our detection task and straightforwardly use SE-SSD [47], Object DGCNN [34], and SparseKD [38] for 3D object detection KD." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 205, + 546, + 408 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 205, + 546, + 408 + ], + "spans": [ + { + "bbox": [ + 304, + 205, + 546, + 408 + ], + "type": "text", + "content": "Table 1 shows that our method almost outperforms other KD methods on mAP and mAPH values for level 1 and level 2 under all three categories of objects. Especially, our performance improvement of mAPH is better than other methods, which indicates our method guides the student network well where the detected objects are facing. To verify the generality of the proposed method, we make additional comparison results using the nuScenes dataset, another large-scale 3D dataset for autonomous driving, in Table 2. Compared with the other methods, our method achieves the best accuracy under the NDS and mAP metrics in the nuScenes validation set. Specifically, when the student network shows " + }, + { + "bbox": [ + 304, + 205, + 546, + 408 + ], + "type": "inline_equation", + "content": "50.24\\%" + }, + { + "bbox": [ + 304, + 205, + 546, + 408 + ], + "type": "text", + "content": " NDS and " + }, + { + "bbox": [ + 304, + 205, + 546, + 408 + ], + "type": "inline_equation", + "content": "38.52\\%" + }, + { + "bbox": [ + 304, + 205, + 546, + 408 + ], + "type": "text", + "content": " mAP, our method achieves " + }, + { + "bbox": [ + 304, + 205, + 546, + 408 + ], + "type": "inline_equation", + "content": "53.90\\%" + }, + { + "bbox": [ + 304, + 205, + 546, + 408 + ], + "type": "inline_equation", + "content": "(+3.66\\%)" + }, + { + "bbox": [ + 304, + 205, + 546, + 408 + ], + "type": "text", + "content": " NDS and " + }, + { + "bbox": [ + 304, + 205, + 546, + 408 + ], + "type": "inline_equation", + "content": "41.33\\%" + }, + { + "bbox": [ + 304, + 205, + 546, + 408 + ], + "type": "inline_equation", + "content": "(+2.81\\%)" + }, + { + "bbox": [ + 304, + 205, + 546, + 408 + ], + "type": "text", + "content": " mAP. In detail, our method outperforms the other methods for the most of object classes except the construction vehicle and the bicycle." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 418, + 406, + 429 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 418, + 406, + 429 + ], + "spans": [ + { + "bbox": [ + 306, + 418, + 406, + 429 + ], + "type": "text", + "content": "4.3. Ablation Studies" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 437, + 545, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 437, + 545, + 544 + ], + "spans": [ + { + "bbox": [ + 304, + 437, + 545, + 544 + ], + "type": "text", + "content": "To analyze of our proposed method in detail, we conduct ablation studies on the Waymo dataset, and the whole performances are measured by mAPH at level 2 for simplicity. For the qualitative analysis, we visualize the map-view feature at each stage to validate the what kinds of knowledge are transferred from the teacher to the student by the proposed method. For simple visualization, we apply the " + }, + { + "bbox": [ + 304, + 437, + 545, + 544 + ], + "type": "inline_equation", + "content": "L_{1}" + }, + { + "bbox": [ + 304, + 437, + 545, + 544 + ], + "type": "text", + "content": " normalization to the map-view feature in the channel direction." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 545, + 545, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 545, + 545, + 665 + ], + "spans": [ + { + "bbox": [ + 304, + 545, + 545, + 665 + ], + "type": "text", + "content": "As shown in Fig. 4, the objects and backgrounds are well activated in the example image of the teacher output. On the other hand, the encoder output is activated by further highlighting the coarse positions of the target objects. When looking at the decoder output, we can see that all the fine surrounding information is represented again. At this point, it is worth noting that compared to the teacher output, the target objects are highlighted a little more. From these visual comparisons, we can infer how our method successfully transfers the object-centered knowledge to the student." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "content": "We explore the buffer layer that matches the channel size of the channel-wise autoencoder without the head attention loss. As shown in Table 3, we compare the three types for the buffer layer: (1) " + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "inline_equation", + "content": "S \\rightarrow T" + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "content": " is the upsampling method that in-" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "13545" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 50, + 95, + 544, + 246 + ], + "blocks": [ + { + "bbox": [ + 47, + 70, + 545, + 92 + ], + "lines": [ + { + "bbox": [ + 47, + 70, + 545, + 92 + ], + "spans": [ + { + "bbox": [ + 47, + 70, + 545, + 92 + ], + "type": "text", + "content": "Table 1. Waymo evaluation. Comparisons with different KD methods in the Waymo validation set. The best accuracy is indicated in bold, and the second-best accuracy is underlined." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 50, + 95, + 544, + 246 + ], + "lines": [ + { + "bbox": [ + 50, + 95, + 544, + 246 + ], + "spans": [ + { + "bbox": [ + 50, + 95, + 544, + 246 + ], + "type": "table", + "html": "
MethodVehiclePedestrianCyclist
Level 1Level 2Level 1Level 2Level 1Level 2
mAPmAPHmAPmAPHmAPmAPHmAPmAPHmAPmAPHmAPmAPH
Teacher [43]73.7273.1765.6165.1172.4361.7264.7354.9964.3062.6161.9160.28
Student (1/4)64.2263.5656.2155.6263.7253.2256.1446.7853.0151.7250.9949.75
Baseline64.7864.0556.9256.2664.8552.9857.3746.7554.7152.4652.6550.48
FitNet [20]65.1164.3857.2456.5864.8953.2957.3747.0054.9152.6152.8450.63
EOD-KD [2]66.5065.7958.5657.9265.9954.5858.4848.2555.1852.9353.1050.94
SE-SSD [47]65.9565.2258.0557.4065.3953.9857.9247.6955.0152.9852.9450.99
TOFD [45]64.0963.4356.1355.5566.2454.9858.5048.4554.9553.0652.8651.04
Obj. DGCNN [34]66.0765.3859.2758.5565.9854.4459.4249.1154.6552.6253.1350.93
SparseKD [38]65.2564.5956.9756.3867.4454.5459.2447.8355.5453.4553.6351.61
Ours67.4366.7259.4458.8167.2656.0259.7349.6156.0954.2453.9652.19
", + "image_path": "b4ae11852cf8a581801c2120641f0cd0208f8e4b6153246d07d165c81bbcd9a7.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 50, + 278, + 544, + 406 + ], + "blocks": [ + { + "bbox": [ + 47, + 255, + 545, + 277 + ], + "lines": [ + { + "bbox": [ + 47, + 255, + 545, + 277 + ], + "spans": [ + { + "bbox": [ + 47, + 255, + 545, + 277 + ], + "type": "text", + "content": "Table 2. nuScenes evaluation. Comparisons with different KD methods in the nuScenes validation set. The best accuracy is indicated in bold, and the second-best accuracy is underlined." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 50, + 278, + 544, + 406 + ], + "lines": [ + { + "bbox": [ + 50, + 278, + 544, + 406 + ], + "spans": [ + { + "bbox": [ + 50, + 278, + 544, + 406 + ], + "type": "table", + "html": "
MethodNDSmAPcartruckbustrailercon. veh.ped.motor.bicycletr. conebarrier
Teacher [43]60.1650.2584.0453.4864.2931.9012.5078.9344.0118.1854.8760.30
Student (1/4)50.2438.5277.8538.1851.3822.333.9571.5123.903.5143.0349.56
Baseline51.4839.1978.7237.9050.4722.423.5172.2926.254.6544.9150.77
FitNet [20]51.4238.9078.3037.4050.4022.203.8072.1025.704.2544.2050.60
EOD-KD [2]52.4939.8278.4038.6050.9022.703.9073.2028.205.3045.0051.97
SE-SSD [47]52.2139.5378.6938.5649.8123.703.7272.8628.274.2544.2451.18
TOFD [45]52.8840.5779.0639.7352.0324.513.5673.5129.585.6245.3452.79
Obj. DGCNN [34]52.9140.3478.9539.2453.3723.964.1372.9828.634.9944.7252.46
SparseKD [38]53.0140.2678.7839.5051.8723.643.3073.1729.345.7544.9852.26
Ours53.9041.3379.4840.3854.3526.443.5873.9130.215.3945.9053.70
", + "image_path": "1b83ee9b0276affc2b0494216fcc8f6b04411dc54d34695819e41779b29e102c.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 58, + 414, + 167, + 498 + ], + "blocks": [ + { + "bbox": [ + 58, + 414, + 167, + 498 + ], + "lines": [ + { + "bbox": [ + 58, + 414, + 167, + 498 + ], + "spans": [ + { + "bbox": [ + 58, + 414, + 167, + 498 + ], + "type": "image", + "image_path": "80e990b6ad24ecda37c55bb800204ee04ce5abaf1ee1fe120253260bac5e5570.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 167, + 414, + 274, + 498 + ], + "blocks": [ + { + "bbox": [ + 167, + 414, + 274, + 498 + ], + "lines": [ + { + "bbox": [ + 167, + 414, + 274, + 498 + ], + "spans": [ + { + "bbox": [ + 167, + 414, + 274, + 498 + ], + "type": "image", + "image_path": "af145eb234aaf14eb731dc65b35ac889d5912d6647354036b87edc55f3ee0d4c.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 180, + 500, + 260, + 510 + ], + "lines": [ + { + "bbox": [ + 180, + 500, + 260, + 510 + ], + "spans": [ + { + "bbox": [ + 180, + 500, + 260, + 510 + ], + "type": "text", + "content": "(b) Teacher output " + }, + { + "bbox": [ + 180, + 500, + 260, + 510 + ], + "type": "inline_equation", + "content": "(M^t)" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 59, + 511, + 165, + 594 + ], + "blocks": [ + { + "bbox": [ + 97, + 501, + 128, + 510 + ], + "lines": [ + { + "bbox": [ + 97, + 501, + 128, + 510 + ], + "spans": [ + { + "bbox": [ + 97, + 501, + 128, + 510 + ], + "type": "text", + "content": "(a) Input" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 59, + 511, + 165, + 594 + ], + "lines": [ + { + "bbox": [ + 59, + 511, + 165, + 594 + ], + "spans": [ + { + "bbox": [ + 59, + 511, + 165, + 594 + ], + "type": "image", + "image_path": "7834374594e0224e6c18ec4ac004861e6dc676bdc03cb7fae7929afc66109bd4.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 80, + 597, + 144, + 607 + ], + "lines": [ + { + "bbox": [ + 80, + 597, + 144, + 607 + ], + "spans": [ + { + "bbox": [ + 80, + 597, + 144, + 607 + ], + "type": "text", + "content": "(c) Encoder output" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 46, + 615, + 287, + 659 + ], + "lines": [ + { + "bbox": [ + 46, + 615, + 287, + 659 + ], + "spans": [ + { + "bbox": [ + 46, + 615, + 287, + 659 + ], + "type": "text", + "content": "Figure 4. Feature visualization on the proposed channel-wise autoencoder. (a) an example input image and (b) the output feature of the teacher network. (c) and (d) are the output images of encoder and decoder of the teacher, respectively." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 167, + 511, + 274, + 594 + ], + "blocks": [ + { + "bbox": [ + 167, + 511, + 274, + 594 + ], + "lines": [ + { + "bbox": [ + 167, + 511, + 274, + 594 + ], + "spans": [ + { + "bbox": [ + 167, + 511, + 274, + 594 + ], + "type": "image", + "image_path": "85010ca490235b842b38dd2f64829c1a656f8d2a99107e24823c6c348ddb8650.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 187, + 597, + 252, + 607 + ], + "lines": [ + { + "bbox": [ + 187, + 597, + 252, + 607 + ], + "spans": [ + { + "bbox": [ + 187, + 597, + 252, + 607 + ], + "type": "text", + "content": "(d) Decoder output" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 665, + 289, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 665, + 289, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 665, + 289, + 713 + ], + "type": "text", + "content": "creases the student's map-view feature to the teacher's feature. (2) " + }, + { + "bbox": [ + 46, + 665, + 289, + 713 + ], + "type": "inline_equation", + "content": "T \\to S" + }, + { + "bbox": [ + 46, + 665, + 289, + 713 + ], + "type": "text", + "content": " is the downsampling method that decreases the teacher's feature to the student's feature. (3) " + }, + { + "bbox": [ + 46, + 665, + 289, + 713 + ], + "type": "inline_equation", + "content": "(S + T) / 2" + }, + { + "bbox": [ + 46, + 665, + 289, + 713 + ], + "type": "text", + "content": " is that the teacher's feature is downsampled and the stu" + } + ] + } + ], + "index": 13 + }, + { + "type": "table", + "bbox": [ + 313, + 426, + 539, + 480 + ], + "blocks": [ + { + "bbox": [ + 335, + 415, + 515, + 425 + ], + "lines": [ + { + "bbox": [ + 335, + 415, + 515, + 425 + ], + "spans": [ + { + "bbox": [ + 335, + 415, + 515, + 425 + ], + "type": "text", + "content": "Table 3. Buffer layer for different channel size." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 313, + 426, + 539, + 480 + ], + "lines": [ + { + "bbox": [ + 313, + 426, + 539, + 480 + ], + "spans": [ + { + "bbox": [ + 313, + 426, + 539, + 480 + ], + "type": "table", + "html": "
MethodVehiclePedestrianCyclistAvg.
S → T58.4148.9051.9053.07
T → S58.6248.7851.7553.05
(S + T) / 258.4748.8451.5452.95
", + "image_path": "d68497035f6c60584bef9a79bf856032e3cc065cfdf00c91a70dbf467308b4d7.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "table_body" + } + ], + "index": 15 + }, + { + "type": "table", + "bbox": [ + 310, + 510, + 541, + 552 + ], + "blocks": [ + { + "bbox": [ + 306, + 487, + 545, + 509 + ], + "lines": [ + { + "bbox": [ + 306, + 487, + 545, + 509 + ], + "spans": [ + { + "bbox": [ + 306, + 487, + 545, + 509 + ], + "type": "text", + "content": "Table 4. Effect of shared and non-shared parameters for the autoencoder." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 310, + 510, + 541, + 552 + ], + "lines": [ + { + "bbox": [ + 310, + 510, + 541, + 552 + ], + "spans": [ + { + "bbox": [ + 310, + 510, + 541, + 552 + ], + "type": "table", + "html": "
MethodVehiclePedestrianCyclistAvg.
Non-shared56.2645.8548.2350.11
Shared58.4148.9051.9053.07
", + "image_path": "b245847452da89c4dbd5f04d151a630c1476243549f6a9fefad228ae39cf962d.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "table_body" + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 569, + 545, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 569, + 545, + 604 + ], + "spans": [ + { + "bbox": [ + 304, + 569, + 545, + 604 + ], + "type": "text", + "content": "dent's feature is upsampled to the median size. The experiments show that the upsampling method performs better when considering all the classes." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 605, + 545, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 605, + 545, + 676 + ], + "spans": [ + { + "bbox": [ + 304, + 605, + 545, + 676 + ], + "type": "text", + "content": "In Table 4, we observe the performance difference when the autoencoder parameters are shared or not. From the result, we can conclude that the shared parameters achieve better performance because what we want to is for the student to learn the teacher's knowledge, not the independent model." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 305, + 677, + 545, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 677, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 305, + 677, + 545, + 712 + ], + "type": "text", + "content": "We investigate improvements made by our interchange transfer for KD without the head attention loss as shown in Table 5. Self-reconstruction is a method wherein the de" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "type": "text", + "content": "13546" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 48, + 93, + 287, + 137 + ], + "blocks": [ + { + "bbox": [ + 47, + 70, + 287, + 92 + ], + "lines": [ + { + "bbox": [ + 47, + 70, + 287, + 92 + ], + "spans": [ + { + "bbox": [ + 47, + 70, + 287, + 92 + ], + "type": "text", + "content": "Table 5. Comparison of different reconstruction methods for the autoencoder." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 48, + 93, + 287, + 137 + ], + "lines": [ + { + "bbox": [ + 48, + 93, + 287, + 137 + ], + "spans": [ + { + "bbox": [ + 48, + 93, + 287, + 137 + ], + "type": "table", + "html": "
MethodVehiclePedestrianCyclistAvg.
Self Recon.56.5747.2650.2951.37
Ours58.4148.9051.9053.07
", + "image_path": "6c2555dff997a6447462afe4b370b4df44bdd7781f280788334a88c2f9a95564.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 48, + 178, + 287, + 297 + ], + "blocks": [ + { + "bbox": [ + 47, + 144, + 287, + 177 + ], + "lines": [ + { + "bbox": [ + 47, + 144, + 287, + 177 + ], + "spans": [ + { + "bbox": [ + 47, + 144, + 287, + 177 + ], + "type": "text", + "content": "Table 6. Comparison of KD methods for the multiple detection head. KL loss and " + }, + { + "bbox": [ + 47, + 144, + 287, + 177 + ], + "type": "inline_equation", + "content": "l_{1}" + }, + { + "bbox": [ + 47, + 144, + 287, + 177 + ], + "type": "text", + "content": " loss denote that directly apply the loss function to all detection heads for KD." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 48, + 178, + 287, + 297 + ], + "lines": [ + { + "bbox": [ + 48, + 178, + 287, + 297 + ], + "spans": [ + { + "bbox": [ + 48, + 178, + 287, + 297 + ], + "type": "table", + "html": "
MethodVehiclePedestrianCyclistAvg.
Student55.6246.7849.7550.72
Baseline56.2646.7550.4851.16
KL loss [9]55.9245.0847.4949.50
l1loss55.6245.1048.7349.82
AT [44]56.8547.3450.3651.52
Linter56.4146.9050.9051.40
Lintra57.2047.1951.2351.87
Lattn57.1047.3451.7952.08
", + "image_path": "b74e0aa02246d70805e2c18728cc7898189db39df6691acba1233e104e49d144.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 303, + 287, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 303, + 287, + 386 + ], + "spans": [ + { + "bbox": [ + 46, + 303, + 287, + 386 + ], + "type": "text", + "content": "coder uses the corresponding input for the reconstruction and our interchange reconstruction is a method wherein the proposed " + }, + { + "bbox": [ + 46, + 303, + 287, + 386 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{it}" + }, + { + "bbox": [ + 46, + 303, + 287, + 386 + ], + "type": "text", + "content": " objective transfers the reconstructed knowledge to the opponent network. Our interchange transfer-based reconstruction achieves better results and note that our main task is not the reconstruction but the 3D object-based knowledge transfer for KD." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 388, + 287, + 532 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 388, + 287, + 532 + ], + "spans": [ + { + "bbox": [ + 46, + 388, + 287, + 532 + ], + "type": "text", + "content": "3D detection [4] [6] [43] [21] has the multiple detection head. To prove the superiority of the proposed head attention objective for 3D object detection, we make the KD comparison results against only multiple detection head without the autoencoder, as shown in Table 6. Since the heatmap head classifies objects and other heads regress 3D bounding box information, Applying KL loss and " + }, + { + "bbox": [ + 46, + 388, + 287, + 532 + ], + "type": "inline_equation", + "content": "l_{1}" + }, + { + "bbox": [ + 46, + 388, + 287, + 532 + ], + "type": "text", + "content": " loss to all detection heads has a negative effect. However, it is required to consider the relation of detection heads. In this respect, our method achieves better performance than the other KD methods which directly mimic the output of detection heads or simply employ attention mechanism." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 533, + 287, + 663 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 533, + 287, + 663 + ], + "spans": [ + { + "bbox": [ + 46, + 533, + 287, + 663 + ], + "type": "text", + "content": "Table 7 shows the overall effect of the proposed losses on the KD performances. We set up the experiments by adding each loss based on the supervised loss " + }, + { + "bbox": [ + 46, + 533, + 287, + 663 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{sup}}" + }, + { + "bbox": [ + 46, + 533, + 287, + 663 + ], + "type": "text", + "content": ". Specifically, the interchange transfer loss " + }, + { + "bbox": [ + 46, + 533, + 287, + 663 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{it}" + }, + { + "bbox": [ + 46, + 533, + 287, + 663 + ], + "type": "text", + "content": " improves on an average of " + }, + { + "bbox": [ + 46, + 533, + 287, + 663 + ], + "type": "inline_equation", + "content": "1.41\\%" + }, + { + "bbox": [ + 46, + 533, + 287, + 663 + ], + "type": "text", + "content": " mAPH and the compressed representation loss " + }, + { + "bbox": [ + 46, + 533, + 287, + 663 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{cr}" + }, + { + "bbox": [ + 46, + 533, + 287, + 663 + ], + "type": "text", + "content": " leads to a " + }, + { + "bbox": [ + 46, + 533, + 287, + 663 + ], + "type": "inline_equation", + "content": "0.94\\%" + }, + { + "bbox": [ + 46, + 533, + 287, + 663 + ], + "type": "text", + "content": " performance improvement. In the end, the head attention loss " + }, + { + "bbox": [ + 46, + 533, + 287, + 663 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{attn}}" + }, + { + "bbox": [ + 46, + 533, + 287, + 663 + ], + "type": "text", + "content": " helps to improve the performance and the final average mAPH is " + }, + { + "bbox": [ + 46, + 533, + 287, + 663 + ], + "type": "inline_equation", + "content": "53.54\\%" + }, + { + "bbox": [ + 46, + 533, + 287, + 663 + ], + "type": "text", + "content": ". We conclude that each proposed loss contributes positively to performance improvement in the 3D object detection-based KD task." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 665, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 665, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 665, + 287, + 713 + ], + "type": "text", + "content": "From Table 8, we observed quantitative comparisons of the computational complexity between the student network and the teacher network. Specifically, the student network, which reduced the channel by " + }, + { + "bbox": [ + 46, + 665, + 287, + 713 + ], + "type": "inline_equation", + "content": "1/4" + }, + { + "bbox": [ + 46, + 665, + 287, + 713 + ], + "type": "text", + "content": ", decreased about" + } + ] + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 307, + 93, + 545, + 148 + ], + "blocks": [ + { + "bbox": [ + 305, + 70, + 545, + 92 + ], + "lines": [ + { + "bbox": [ + 305, + 70, + 545, + 92 + ], + "spans": [ + { + "bbox": [ + 305, + 70, + 545, + 92 + ], + "type": "text", + "content": "Table 7. Ablation results from investigating effects of different components." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 307, + 93, + 545, + 148 + ], + "lines": [ + { + "bbox": [ + 307, + 93, + 545, + 148 + ], + "spans": [ + { + "bbox": [ + 307, + 93, + 545, + 148 + ], + "type": "table", + "html": "
LsupLitLcrLattnVehiclePedestrianCyclistAvg.
55.6246.7849.7550.72
57.4148.2050.7752.13
58.4148.9051.9053.07
58.8149.6152.1953.54
", + "image_path": "224cfae5fa4ffe2c1c6bec0b49a043582a729dbeb738742fcacc5c565c5d5795.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 307, + 178, + 545, + 277 + ], + "blocks": [ + { + "bbox": [ + 306, + 155, + 545, + 177 + ], + "lines": [ + { + "bbox": [ + 306, + 155, + 545, + 177 + ], + "spans": [ + { + "bbox": [ + 306, + 155, + 545, + 177 + ], + "type": "text", + "content": "Table 8. Quantitative evaluation for model efficiency on Waymo dataset." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 307, + 178, + 545, + 277 + ], + "lines": [ + { + "bbox": [ + 307, + 178, + 545, + 277 + ], + "spans": [ + { + "bbox": [ + 307, + 178, + 545, + 277 + ], + "type": "table", + "html": "
MethodParams (M)FLOPS (G)mAPH / L2
PointPillars [13]4.8255.057.05
SECOND [35]5.384.557.23
Part-A2[24]4.687.157.43
IA-SSD [46]2.746.158.08
SparseKD-v0.64 [38]5.285.158.89
Teacher [43]5.2333.960.13
Ours: Student (1/2)1.5130.159.04
Ours: Student (1/4)0.645.153.54
", + "image_path": "255f0ff426dbc80d62db3348cc112c91b66f9fe4798bd57962e9322cef7d3bfa.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 289, + 545, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 289, + 545, + 434 + ], + "spans": [ + { + "bbox": [ + 304, + 289, + 545, + 434 + ], + "type": "text", + "content": "8.6 times compared to the parameters of the teacher, and FLOPS was reduced by 7.4 times. Above all, we should not overlook the fact that the performance of the student improved from " + }, + { + "bbox": [ + 304, + 289, + 545, + 434 + ], + "type": "inline_equation", + "content": "50.72\\%" + }, + { + "bbox": [ + 304, + 289, + 545, + 434 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 304, + 289, + 545, + 434 + ], + "type": "inline_equation", + "content": "53.54\\%" + }, + { + "bbox": [ + 304, + 289, + 545, + 434 + ], + "type": "text", + "content": " mAPH/L2 by our KD method. Furthermore, we apply our method to the student whose channel was reduced by half. The student's performance increases to " + }, + { + "bbox": [ + 304, + 289, + 545, + 434 + ], + "type": "inline_equation", + "content": "59.04\\%" + }, + { + "bbox": [ + 304, + 289, + 545, + 434 + ], + "type": "text", + "content": ", and the parameters and FLOPS compared to the teacher are reduced by 3.5 times and 2.6 times, respectively. Compared to lightweight network-based methods [13] [35] [24] [46], our student networks are able to derive stable performance with fewer parameters and FLOPS in 3D object detection." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 447, + 378, + 459 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 447, + 378, + 459 + ], + "spans": [ + { + "bbox": [ + 306, + 447, + 378, + 459 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 468, + 545, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 468, + 545, + 635 + ], + "spans": [ + { + "bbox": [ + 304, + 468, + 545, + 635 + ], + "type": "text", + "content": "In this paper, we propose a novel KD method that transfers knowledge to produce a lightweight point cloud detector. Our main method involves interchange transfer, which learns coarse knowledge by increasing the similarity of the compressed feature and fine knowledge by decompressing the map-view feature of the other side using the channel-wise autoencoder. Moreover, we introduce a method to guide multiple detection head using head relation-aware self-attention, which refines knowledge by considering the relation of instances and properties. Ablation studies demonstrate the effectiveness of our proposed algorithm, and extensive experiments on the two large-scale open datasets verify that our proposed method achieves competitive performance against state-of-the-art methods." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 635, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 635, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 635, + 545, + 713 + ], + "type": "text", + "content": "Acknowledgement. This work was partly supported by NRF-2022R1A2C1091402, BK21 FOUR program of the NRF of Korea funded by the Ministry of Education (NRF5199991014091), and IITP grant funded by the Korea government(MSIT) (No.2021-0-00951, Development of Cloud based Autonomous Driving AI learning Software; No. 2021-0-02068, Artificial Intelligence Innovation Hub). W. Hwang is the corresponding author." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "13547" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 289, + 715 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 53, + 91, + 289, + 156 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 289, + 156 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 289, + 156 + ], + "type": "text", + "content": "[1] Holger Caesar, Varun Bankiti, Alex H Lang, Sourabh Vora, Venice Erin Liong, Qiang Xu, Anush Krishnan, Yu Pan, Giancarlo Baldan, and Oscar Beijbom. nuscenes: A multimodal dataset for autonomous driving. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11621-11631, 2020. 6" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 158, + 288, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 158, + 288, + 201 + ], + "spans": [ + { + "bbox": [ + 53, + 158, + 288, + 201 + ], + "type": "text", + "content": "[2] Guobin Chen, Wongun Choi, Xiang Yu, Tony Han, and Manmohan Chandraker. Learning efficient object detection models with knowledge distillation. Advances in neural information processing systems, 30, 2017. 2, 3, 5, 6, 7" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 202, + 288, + 236 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 202, + 288, + 236 + ], + "spans": [ + { + "bbox": [ + 53, + 202, + 288, + 236 + ], + "type": "text", + "content": "[3] Yilun Chen, Shu Liu, Xiaoyong Shen, and Jiaya Jia. Fast point r-cnn. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9775-9784, 2019. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 236, + 288, + 291 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 236, + 288, + 291 + ], + "spans": [ + { + "bbox": [ + 53, + 236, + 288, + 291 + ], + "type": "text", + "content": "[4] Xiyang Dai, Yinpeng Chen, Bin Xiao, Dongdong Chen, Mengchen Liu, Lu Yuan, and Lei Zhang. Dynamic head: Unifying object detection heads with attentions. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7373-7382, 2021. 2, 8" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 291, + 288, + 356 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 291, + 288, + 356 + ], + "spans": [ + { + "bbox": [ + 53, + 291, + 288, + 356 + ], + "type": "text", + "content": "[5] Martin Engelcke, Dushyant Rao, Dominic Zeng Wang, Chi Hay Tong, and Ingmar Posner. Vote3deep: Fast object detection in 3d point clouds using efficient convolutional neural networks. In 2017 IEEE International Conference on Robotics and Automation (ICRA), pages 1355-1361. IEEE, 2017. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 357, + 288, + 401 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 357, + 288, + 401 + ], + "spans": [ + { + "bbox": [ + 53, + 357, + 288, + 401 + ], + "type": "text", + "content": "[6] Runzhou Ge, Zhuangzhuang Ding, Yihan Hu, Yu Wang, Sijia Chen, Li Huang, and Yuan Li. Afdet: Anchor free one stage 3d object detection. arXiv preprint arXiv:2006.12671, 2020. 2, 8" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 402, + 288, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 402, + 288, + 456 + ], + "spans": [ + { + "bbox": [ + 53, + 402, + 288, + 456 + ], + "type": "text", + "content": "[7] Xiaoyang Guo, Shaoshuai Shi, Xiaogang Wang, and Hongsheng Li. Liga-stereo: Learning lidar geometry aware representations for stereo-based 3d detector. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3153-3163, 2021. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 458, + 288, + 512 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 458, + 288, + 512 + ], + "spans": [ + { + "bbox": [ + 53, + 458, + 288, + 512 + ], + "type": "text", + "content": "[8] Chenhang He, Hui Zeng, Jianqiang Huang, Xian-Sheng Hua, and Lei Zhang. Structure aware single-stage 3d object detection from point cloud. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11873-11882, 2020. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 514, + 288, + 546 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 514, + 288, + 546 + ], + "spans": [ + { + "bbox": [ + 53, + 514, + 288, + 546 + ], + "type": "text", + "content": "[9] Geoffrey Hinton, Oriol Vinyals, Jeff Dean, et al. Distilling the knowledge in a neural network. arXiv preprint arXiv:1503.02531, 2(7), 2015. 1, 3, 6, 8" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 547, + 288, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 547, + 288, + 612 + ], + "spans": [ + { + "bbox": [ + 48, + 547, + 288, + 612 + ], + "type": "text", + "content": "[10] Zhen Huang, Xu Shen, Jun Xing, Tongliang Liu, Xinmei Tian, Houqiang Li, Bing Deng, Jianqiang Huang, and Xian-Sheng Hua. Revisiting knowledge distillation: An inheritance and exploration framework. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3579-3588, 2021. 1, 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 613, + 288, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 613, + 288, + 656 + ], + "spans": [ + { + "bbox": [ + 48, + 613, + 288, + 656 + ], + "type": "text", + "content": "[11] Jangho Kim, SeongUk Park, and Nojun Kwak. Paraphrasing complex network: Network compression via factor transfer. Advances in neural information processing systems, 31, 2018. 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 658, + 288, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 658, + 288, + 689 + ], + "spans": [ + { + "bbox": [ + 48, + 658, + 288, + 689 + ], + "type": "text", + "content": "[12] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. 6" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 691, + 288, + 715 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 691, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 48, + 691, + 288, + 715 + ], + "type": "text", + "content": "[13] Alex H Lang, Sourabh Vora, Holger Caesar, Lubing Zhou, Jiong Yang, and Oscar Beijbom. Pointpillars: Fast encoders" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 308, + 73, + 547, + 715 + ], + "type": "list", + "angle": 0, + "index": 28, + "blocks": [ + { + "bbox": [ + 326, + 73, + 547, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 547, + 106 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 547, + 106 + ], + "type": "text", + "content": "for object detection from point clouds. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12697-12705, 2019. 1, 2, 3, 8" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 107, + 545, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 107, + 545, + 149 + ], + "spans": [ + { + "bbox": [ + 308, + 107, + 545, + 149 + ], + "type": "text", + "content": "[14] Bo Li. 3d fully convolutional network for vehicle detection in point cloud. In 2017 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pages 1513-1518. IEEE, 2017. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 151, + 545, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 151, + 545, + 194 + ], + "spans": [ + { + "bbox": [ + 308, + 151, + 545, + 194 + ], + "type": "text", + "content": "[15] Quanquan Li, Shengying Jin, and Junjie Yan. Mimicking very efficient network for object detection. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 6356-6364, 2017. 1, 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 195, + 545, + 249 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 195, + 545, + 249 + ], + "spans": [ + { + "bbox": [ + 308, + 195, + 545, + 249 + ], + "type": "text", + "content": "[16] Seyed Iman Mirzadeh, Mehrdad Farajtabar, Ang Li, Nir Levine, Akihiro Matsukawa, and Hassan Ghasemzadeh. Improved knowledge distillation via teacher assistant. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 5191-5198, 2020. 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 251, + 545, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 251, + 545, + 304 + ], + "spans": [ + { + "bbox": [ + 308, + 251, + 545, + 304 + ], + "type": "text", + "content": "[17] Jiquan Ngiam, Benjamin Caine, Wei Han, Brandon Yang, Yuning Chai, Pei Sun, Yin Zhou, Xi Yi, Ouais Alsharif, Patrick Nguyen, et al. Starnet: Targeted computation for object detection in point clouds. arXiv preprint arXiv:1908.11069, 2019. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 304, + 545, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 304, + 545, + 350 + ], + "spans": [ + { + "bbox": [ + 308, + 304, + 545, + 350 + ], + "type": "text", + "content": "[18] Charles R Qi, Wei Liu, Chenxia Wu, Hao Su, and Leonidas J Guibas. Frustum pointnets for 3d object detection from rgb data. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 918-927, 2018. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 350, + 545, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 350, + 545, + 403 + ], + "spans": [ + { + "bbox": [ + 308, + 350, + 545, + 403 + ], + "type": "text", + "content": "[19] Charles R Qi, Hao Su, Kaichun Mo, and Leonidas J Guibas. Pointnet: Deep learning on point sets for 3d classification and segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 652-660, 2017. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 404, + 545, + 448 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 404, + 545, + 448 + ], + "spans": [ + { + "bbox": [ + 308, + 404, + 545, + 448 + ], + "type": "text", + "content": "[20] Adriana Romero, Nicolas Ballas, Samira Ebrahimi Kahou, Antoine Chassang, Carlo Gatta, and Yoshua Bengio. Fitnets: Hints for thin deep nets. arXiv preprint arXiv:1412.6550, 2014. 1, 3, 6, 7" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 449, + 545, + 504 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 449, + 545, + 504 + ], + "spans": [ + { + "bbox": [ + 308, + 449, + 545, + 504 + ], + "type": "text", + "content": "[21] Danila Rukhovich, Anna Vorontsova, and Anton Konushin. Imvoxelnet: Image to voxels projection for monocular and multi-view general-purpose 3d object detection. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 2397-2406, 2022. 2, 8" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 505, + 545, + 559 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 505, + 545, + 559 + ], + "spans": [ + { + "bbox": [ + 308, + 505, + 545, + 559 + ], + "type": "text", + "content": "[22] Shaoshuai Shi, Chaoxu Guo, Li Jiang, Zhe Wang, Jianping Shi, Xiaogang Wang, and Hongsheng Li. Pv-rcnn: Pointvoxel feature set abstraction for 3d object detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10529–10538, 2020. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 559, + 545, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 559, + 545, + 612 + ], + "spans": [ + { + "bbox": [ + 308, + 559, + 545, + 612 + ], + "type": "text", + "content": "[23] Shaoshuai Shi, Xiaogang Wang, and Hongsheng Li. Pointrcnn: 3d object proposal generation and detection from point cloud. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 770-779, 2019. 1, 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 613, + 545, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 613, + 545, + 669 + ], + "spans": [ + { + "bbox": [ + 308, + 613, + 545, + 669 + ], + "type": "text", + "content": "[24] Shaoshuai Shi, Zhe Wang, Jianping Shi, Xiaogang Wang, and Hongsheng Li. From points to parts: 3d object detection from point cloud with part-aware and part-aggregation network. IEEE transactions on pattern analysis and machine intelligence, 43(8):2647-2664, 2020. 8" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 670, + 545, + 715 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 670, + 545, + 715 + ], + "spans": [ + { + "bbox": [ + 308, + 670, + 545, + 715 + ], + "type": "text", + "content": "[25] Weijing Shi and Raj Rajkumar. Point-gnn: Graph neural network for 3d object detection in a point cloud. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 1711-1719, 2020. 2" + } + ] + } + ], + "index": 27 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "13548" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 714 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "type": "text", + "content": "[26] Martin Simony, Stefan Milzy, Karl Amendey, and Horst-Michael Gross. Complex-yolo: An euler-region-proposal for real-time 3d object detection on point clouds. In Proceedings of the European Conference on Computer Vision (ECCV) Workshops, pages 0–0, 2018. 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 129, + 287, + 163 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 129, + 287, + 163 + ], + "spans": [ + { + "bbox": [ + 48, + 129, + 287, + 163 + ], + "type": "text", + "content": "[27] Leslie N Smith. Cyclical learning rates for training neural networks. In 2017 IEEE winter conference on applications of computer vision (WACV), pages 464-472. IEEE, 2017. 6" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 163, + 287, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 163, + 287, + 217 + ], + "spans": [ + { + "bbox": [ + 48, + 163, + 287, + 217 + ], + "type": "text", + "content": "[28] Wonchul Son, Jaemin Na, Junyong Choi, and Wonjun Hwang. Densely guided knowledge distillation using multiple teacher assistants. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9395-9404, 2021. 1, 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 219, + 287, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 219, + 287, + 262 + ], + "spans": [ + { + "bbox": [ + 48, + 219, + 287, + 262 + ], + "type": "text", + "content": "[29] Shuran Song and Jianxiong Xiao. Deep sliding shapes for amodal 3d object detection in rgb-d images. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 808-816, 2016. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 264, + 287, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 264, + 287, + 330 + ], + "spans": [ + { + "bbox": [ + 48, + 264, + 287, + 330 + ], + "type": "text", + "content": "[30] Pei Sun, Henrik Kretzschmar, Xerxes Dotiwalla, Aurelien Chouard, Vijaysai Patnaik, Paul Tsui, James Guo, Yin Zhou, Yuning Chai, Benjamin Caine, et al. Scalability in perception for autonomous driving: Waymo open dataset. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2446-2454, 2020. 6" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 331, + 287, + 375 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 331, + 287, + 375 + ], + "spans": [ + { + "bbox": [ + 48, + 331, + 287, + 375 + ], + "type": "text", + "content": "[31] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 30, 2017. 5" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 376, + 287, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 376, + 287, + 430 + ], + "spans": [ + { + "bbox": [ + 48, + 376, + 287, + 430 + ], + "type": "text", + "content": "[32] Tao Wang, Li Yuan, Xiaopeng Zhang, and Jiashi Feng. Distilling object detectors with fine-grained feature imitation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4933-4942, 2019. 2, 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 432, + 287, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 432, + 287, + 487 + ], + "spans": [ + { + "bbox": [ + 48, + 432, + 287, + 487 + ], + "type": "text", + "content": "[33] Yue Wang, Alireza Fathi, Abhijit Kundu, David A Ross, Caroline Pantofaru, Tom Funkhouser, and Justin Solomon. Pillar-based object detection for autonomous driving. In European Conference on Computer Vision, pages 18-34. Springer, 2020. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 488, + 287, + 520 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 488, + 287, + 520 + ], + "spans": [ + { + "bbox": [ + 48, + 488, + 287, + 520 + ], + "type": "text", + "content": "[34] Yue Wang and Justin M Solomon. Object dgenn: 3d object detection using dynamic graphs. Advances in Neural Information Processing Systems, 34, 2021. 3, 5, 6, 7" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 522, + 287, + 554 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 522, + 287, + 554 + ], + "spans": [ + { + "bbox": [ + 48, + 522, + 287, + 554 + ], + "type": "text", + "content": "[35] Yan Yan, Yuxing Mao, and Bo Li. Second: Sparsely embedded convolutional detection. Sensors, 18(10):3337, 2018. 1, 2, 8" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 555, + 287, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 555, + 287, + 600 + ], + "spans": [ + { + "bbox": [ + 48, + 555, + 287, + 600 + ], + "type": "text", + "content": "[36] Bin Yang, Wenjie Luo, and Raquel Urtasun. Pixor: Realtime 3d object detection from point clouds. In Proceedings of the IEEE conference on Computer Vision and Pattern Recognition, pages 7652-7660, 2018. 1, 2, 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 601, + 287, + 644 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 601, + 287, + 644 + ], + "spans": [ + { + "bbox": [ + 48, + 601, + 287, + 644 + ], + "type": "text", + "content": "[37] Jing Yang, Brais Martinez, Adrian Bulat, and Georgios Tzimiropoulos. Knowledge distillation via softmax regression representation learning. In International Conference on Learning Representations, 2020. 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 647, + 287, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 647, + 287, + 689 + ], + "spans": [ + { + "bbox": [ + 48, + 647, + 287, + 689 + ], + "type": "text", + "content": "[38] Jihan Yang, Shaoshuai Shi, Runyu Ding, Zhe Wang, and Xiaojuan Qi. Towards efficient 3d object detection with knowledge distillation. arXiv preprint arXiv:2205.15156, 2022. 3, 4, 6, 7, 8" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 691, + 287, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 691, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 48, + 691, + 287, + 714 + ], + "type": "text", + "content": "[39] Zetong Yang, Yanan Sun, Shu Liu, and Jiaya Jia. 3dssd: Point-based 3d single stage object detector. In Proceedings" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 609 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 327, + 73, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 327, + 73, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 327, + 73, + 545, + 95 + ], + "type": "text", + "content": "of the IEEE/CVF conference on computer vision and pattern recognition, pages 11040-11048, 2020. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 96, + 545, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 96, + 545, + 140 + ], + "spans": [ + { + "bbox": [ + 307, + 96, + 545, + 140 + ], + "type": "text", + "content": "[40] Zetong Yang, Yanan Sun, Shu Liu, Xiaoyong Shen, and Jiaya Jia. Std: Sparse-to-dense 3d object detector for point cloud. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 1951-1960, 2019. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 141, + 545, + 185 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 141, + 545, + 185 + ], + "spans": [ + { + "bbox": [ + 307, + 141, + 545, + 185 + ], + "type": "text", + "content": "[41] Maosheng Ye, Shuangjie Xu, and Tongyi Cao. Hvnet: Hybrid voxel network for lidar based 3d object detection. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 1631-1640, 2020. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 186, + 545, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 186, + 545, + 239 + ], + "spans": [ + { + "bbox": [ + 307, + 186, + 545, + 239 + ], + "type": "text", + "content": "[42] Zeng Yihan, Chunwei Wang, Yunbo Wang, Hang Xu, Chaoqiang Ye, Zhen Yang, and Chao Ma. Learning transferable features for point cloud detection via 3d contrastive cotraining. Advances in Neural Information Processing Systems, 34, 2021. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 241, + 545, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 241, + 545, + 285 + ], + "spans": [ + { + "bbox": [ + 307, + 241, + 545, + 285 + ], + "type": "text", + "content": "[43] Tianwei Yin, Xingyi Zhou, and Philipp Krahenbuhl. Center-based 3d object detection and tracking. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11784-11793, 2021. 1, 2, 3, 4, 6, 7, 8" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 286, + 545, + 341 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 286, + 545, + 341 + ], + "spans": [ + { + "bbox": [ + 308, + 286, + 545, + 341 + ], + "type": "text", + "content": "[44] Sergey Zagoruyko and Nikos Komodakis. Paying more attention to attention: Improving the performance of convolutional neural networks via attention transfer. 5th international conference on Learning Representations, Apr. 2017. 1, 8" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 342, + 545, + 385 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 342, + 545, + 385 + ], + "spans": [ + { + "bbox": [ + 308, + 342, + 545, + 385 + ], + "type": "text", + "content": "[45] Linfeng Zhang, Yukang Shi, Zuoqiang Shi, Kaisheng Ma, and Chenglong Bao. Task-oriented feature distillation. Advances in Neural Information Processing Systems, 33:14759-14771, 2020. 6, 7" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 387, + 545, + 452 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 387, + 545, + 452 + ], + "spans": [ + { + "bbox": [ + 308, + 387, + 545, + 452 + ], + "type": "text", + "content": "[46] Yifan Zhang, Qingyong Hu, Guoquan Xu, Yanxin Ma, Jianwei Wan, and Yulan Guo. Not all points are equal: Learning highly efficient point-based detectors for 3d lidar point clouds. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18953-18962, 2022. 8" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 453, + 545, + 508 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 453, + 545, + 508 + ], + "spans": [ + { + "bbox": [ + 308, + 453, + 545, + 508 + ], + "type": "text", + "content": "[47] Wu Zheng, Weiliang Tang, Li Jiang, and Chi-Wing Fu. Sessd: Self-ensembling single-stage object detector from point cloud. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14494–14503, 2021. 3, 6, 7" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 510, + 545, + 564 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 510, + 545, + 564 + ], + "spans": [ + { + "bbox": [ + 308, + 510, + 545, + 564 + ], + "type": "text", + "content": "[48] Yin Zhou, Pei Sun, Yu Zhang, Dragomir Anguelov, Jiyang Gao, Tom Ouyang, James Guo, Jiquan Ngiam, and Vijay Vasudevan. End-to-end multi-view fusion for 3d object detection in lidar point clouds. In Conference on Robot Learning, pages 923-932. PMLR, 2020. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 565, + 547, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 565, + 547, + 609 + ], + "spans": [ + { + "bbox": [ + 308, + 565, + 547, + 609 + ], + "type": "text", + "content": "[49] Yin Zhou and Oncel Tuzel. Voxelnet: End-to-end learning for point cloud based 3d object detection. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4490-4499, 2018. 1, 2, 3" + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "13549" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/pCON_ Polarimetric Coordinate Networks for Neural Scene Representations/762a8e4d-373c-4bab-83f9-f1ad8a1ae928_content_list.json b/2023/pCON_ Polarimetric Coordinate Networks for Neural Scene Representations/762a8e4d-373c-4bab-83f9-f1ad8a1ae928_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..6424298b0aa085def02a7a5a8741bfc43a3d469d --- /dev/null +++ b/2023/pCON_ Polarimetric Coordinate Networks for Neural Scene Representations/762a8e4d-373c-4bab-83f9-f1ad8a1ae928_content_list.json @@ -0,0 +1,2240 @@ +[ + { + "type": "text", + "text": "pCON: Polarimetric Coordinate Networks for Neural Scene Representations", + "text_level": 1, + "bbox": [ + 98, + 130, + 870, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Henry Peters\\*,1, Yunhao Ba\\*,2, Achuta Kadambi\\*1,2", + "bbox": [ + 282, + 180, + 683, + 198 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1Computer Science Department, University of California, Los Angeles (UCLA)", + "bbox": [ + 169, + 198, + 797, + 215 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{2}$ Electrical and Computer Engineering Department, UCLA", + "bbox": [ + 254, + 215, + 715, + 234 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "hpeters@ucla.edu, yhba@ucla.edu, achuta@ee.ucla.edu", + "bbox": [ + 254, + 236, + 707, + 250 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 286, + 313, + 301 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Neural scene representations have achieved great success in parameterizing and reconstructing images, but current state of the art models are not optimized with the preservation of physical quantities in mind. While current architectures can reconstruct color images correctly, they create artifacts when trying to fit maps of polar quantities. We propose polarimetric coordinate networks (pCON), a new model architecture for neural scene representations aimed at preserving polarimetric information while accurately parameterizing the scene. Our model removes artifacts created by current coordinate network architectures when reconstructing three polarimetric quantities of interest. All code and data can be found at this link: https://visual.ee.ucla.edu/pcon.htm.", + "bbox": [ + 75, + 318, + 472, + 532 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 561, + 209, + 575 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Neural scene representations are a popular and useful tool in many computer vision tasks, but these models are optimized to preserve visual content, not physical information. Current state-of-the-art models create artifacts due to the presence of a large range of spatial frequencies when reconstructing polarimetric data. Many tasks in polarimetric imaging rely on precise measurements, and thus even small artifacts are a hindrance for downstream tasks that would like to leverage neural reconstructions of polarization images. In this work we present pCON, a new architecture for neural scene representations. pCON leverages images' singular value decompositions to effectively allocate network capacity to learning the more difficult spatial frequencies at each pixel. Our model reconstructs polarimetric images without the artifacts introduced by state-of-the-art models.", + "bbox": [ + 75, + 587, + 468, + 813 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The polarization of light passing through a scene contains a wealth of information, and while current neural representations can represent single images accurately, but they produce noticeable visual artifacts when trying to represent", + "bbox": [ + 75, + 814, + 468, + 875 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "multiple polarimetric quantities concurrently.", + "bbox": [ + 500, + 287, + 799, + 303 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We propose a new architecture for neural scene representations that can effectively reconstruct polarimetric images without artifacts. Our model reconstructs color images accurately while also ensuring the quality of three important polarimetric quantities, the degree $(\\rho)$ and angle $(\\phi)$ of linear polarization (DoLP and AoLP), and the unpolarized intensity $I_{un}$ . This information is generally captured using images of a scene taken through linear polarizing filters at four different angles. Instead of learning a representation of these images, our model operates directly on the DoLP, AoLP and unpolarized intensity maps. When learning to fit these images, current coordinate network architectures produce artifacts in the predicted DoLP and unpolarized intensity maps. To alleviate this issue, we take inspiration from traditional image compression techniques and fit images using their singular value decompositions. Images can be compressed by reconstructing them using only a subset of their singular values [28]. By utilizing different, non-overlapping sets of singular values to reconstruct an image, the original image can be recovered by summing the individual reconstructions together. Our model is supervised in a coarse-to-fine manner, which helps the model to represent both the low and high frequency details present in maps of polarimetric quantities without introducint noise or tiling artifacts. A demonstration of the efficacy our model can be seen in Fig. 1 and Table 1. Furthermore, our model is capable of representing images at varying levels of detail, creating a tradeoff between performance and model size without retraining.", + "bbox": [ + 496, + 304, + 893, + 742 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1.1. Contributions", + "text_level": 1, + "bbox": [ + 500, + 755, + 643, + 770 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "To summarize, the contributions of our work include:", + "bbox": [ + 519, + 780, + 870, + 794 + ], + "page_idx": 0 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- a coordinate network architecture for neural scene representations of polarimetric images;", + "- a training strategy for our network which learns a series of representations using different sets of singular values, allowing for a trade-off between performance and model size without retraining;" + ], + "bbox": [ + 517, + 810, + 890, + 898 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Equal contribution.", + "bbox": [ + 94, + 886, + 205, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "16579", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/02728a7bd97bf70d83e908f9c7d4024761e0668f0685986a3e4b5e84b089a572.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 107, + 87, + 256, + 203 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/a723e30dcd4a5344d5b0f93b1f6f63fd91913e15137a08d35fa2997b2ec60923.jpg", + "image_caption": [ + "GT" + ], + "image_footnote": [], + "bbox": [ + 107, + 204, + 254, + 349 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/6ec418072ba275e19e442ec3ff2af11448aa68989cdba9ddf7748a1d2ef590e8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 259, + 88, + 408, + 203 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/698fde1817b754765f8fdf902b1ca60cafd29d67dd04f421813d2bf59ccb134b.jpg", + "image_caption": [ + "SIREN [52]" + ], + "image_footnote": [], + "bbox": [ + 261, + 203, + 406, + 349 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/dbb35906ed7f177af3fc44779e4ae46970f959ec5c25a271294599f1fb91f5b3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 410, + 88, + 558, + 203 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/7184b56b20ddc195b635ac1241896d3248b20ea336cfe88437f2cff2a537304e.jpg", + "image_caption": [ + "ACORN [34]" + ], + "image_footnote": [], + "bbox": [ + 411, + 204, + 557, + 349 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/c9e9aa7da74cec2789601569d4debbc3190a5d1f93f3ddb1fadad321e05326dd.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 560, + 88, + 710, + 203 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/d01da4bf678750ed3a2f92a7850c772288c407bb76790b460574d26e49c6fb4a.jpg", + "image_caption": [ + "ReLU P.E", + "Figure 1. Our model reconstructs the training scene more accurately than other architectures. Our model does not have the noise pattern present in reconstructions from SIREN [52] or a ReLU MLP with positional encoding [38], nor does it show tiling artifacts as in ACORN's [34] prediction." + ], + "image_footnote": [], + "bbox": [ + 562, + 204, + 709, + 349 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/2e6ce86ef45c7a0f29ff4a6718fc698b4411a77fb7ec4a74bec36760446ac21c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 714, + 88, + 862, + 203 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/736438e7bbd7e9e917f46f446a2213d208cd203a763e3145224b1ba67865d43a.jpg", + "image_caption": [ + "Ours" + ], + "image_footnote": [], + "bbox": [ + 714, + 204, + 859, + 349 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "- results demonstrating that our model reconstructs maps of polarimetric quantities without the artifacts created by current state-of-the-art approaches.", + "bbox": [ + 96, + 449, + 468, + 494 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related work", + "text_level": 1, + "bbox": [ + 76, + 513, + 215, + 530 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1. Neural scene representations", + "text_level": 1, + "bbox": [ + 76, + 541, + 334, + 558 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The aim of neural scene representations is to parameterize a two or three dimensional scene in the weights of a neural network in order to accomplish some other task related to the scene. Most papers fall into one of three categories. Explicit representations model the scene directly, which allows them to quickly accomplish tasks such as scene reconstruction [9, 31], novel view synthesis [4, 17, 22, 36, 37, 41, 50, 53, 56] or relighting [60]. However, since the scene is modelled explicitly, these representations require more memory than the alternatives.", + "bbox": [ + 75, + 566, + 468, + 717 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Implicit representations do not model the scene directly, but instead use an MLP to map from a coordinate in either 2D or 3D space to some desired output value. This value could be the observed radiance or pixel intensity [11, 16, 38, 44, 46, 54], occupancy of a pixel or voxel [35, 45], a quantity related to shape [5, 10, 13, 18, 19, 21, 23, 26, 30, 44, 47, 54, 58], or any other quantity of interest. The final category of neural scene representations is a hybrid of the first two. The only work that fits directly into this category is ACORN [34], which accomplishes state-of-the-art performance on image and volume fitting by combining a coordinate network with an explicit grid or voxel represent-", + "bbox": [ + 75, + 719, + 470, + 901 + ], + "page_idx": 1 + }, + { + "type": "table", + "img_path": "images/f1566f989fce43094b50570770020803e1534e5f266cca02bf288e74717dc1cf.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelNoise PatternTiling ArtifactsResizing Artifacts
ACORN [34]MediumHighNot Supported
ReLU w/P.E. [38]MediumNoneYes
SIREN [52]HighNoneYes
ProposedMinimalNoneMinimal
", + "bbox": [ + 501, + 446, + 877, + 534 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Table 1. Our model shows fewer artifacts than current state-of-the-art architectures. Since ACORN divides an image into a discrete grid, in order to query an image at a different resolution it is necessary to also reform the grid. The grid is created online during training, so it is not feasible to query a model at a different resolution without retraining.", + "bbox": [ + 496, + 544, + 893, + 628 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "tation. Similar to ACORN, other works divide the scene into local regions and learn each of these regions implicitly [10,26,49].", + "bbox": [ + 496, + 654, + 890, + 699 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To our knowledge, this work is the first to highlight the artifacts caused by existing neural scene representation architectures when fitting polarimetric data. While we are one of the first works to examine polarization and neural scene representations in the same context, we would like to acknowledge that PANDORA [14], a concurrent work, also utilizes polarization and neural scene representations. However, they focus on radiance decomposition rather than 2D reconstruction.", + "bbox": [ + 496, + 700, + 892, + 835 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.2. Polarization vision", + "text_level": 1, + "bbox": [ + 500, + 845, + 679, + 861 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Polarization is useful in a variety of computer vision tasks. It can be used to estimate surface normals [1, 2,", + "bbox": [ + 500, + 869, + 893, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "16580", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "7, 15, 24, 25, 33, 39, 43] or refine depth maps to represent incredibly fine details [27]. It can be used in radiometric calibration [55], dynamic interferometry [32], facial reconstruction [6] and separation of diffuse and specular reflection [39, 42]. It also can be used to remove the effects of scattering media like haze [51, 57, 61] and water [57], to augment the performance of computer vision tasks in the presence of transparent objects [12, 29, 40], or even to assist in imaging objects in space [20]. Traditionally, polarimetric data is captured by rotating a linear polarizing filter in front of a camera [3, 59], but recent advances in machine vision have produced cameras that can capture multiple polar images in a single shot.", + "bbox": [ + 75, + 90, + 472, + 287 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our work uses a neural network to accurately parameterize polarimetric information captured from a scene. This allows for easier storage and transport of polarimetric data and facilitates its use in other deep learning based tasks.", + "bbox": [ + 75, + 291, + 470, + 354 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 76, + 377, + 169, + 392 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Polarization physics", + "text_level": 1, + "bbox": [ + 76, + 405, + 267, + 422 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Polarized light can be modelled as a sine wave, and can thus be parameterized by three quantities. The degree of linear polarization (DoLP) is a quantity between 0 and 1 that represents how much of the total intensity of the wave is polarized and unpolarized. Completely polarized light will have a DoLP of 1, and completely unpolarized light will have a DoLP of 0. The angle of linear polarization (AoLP) corresponds to the orientation of the plane in which the wave is oscillating. The AoLP takes values from 0 to $\\pi$ radians. The final quantity of interest is the unpolarized intensity, $I_{un}$ , of the wave, which corresponds to its amplitude. With these three quantities, it is possible to render a scene as viewed through a linear polarization filter at any angle using the following equation:", + "bbox": [ + 75, + 433, + 472, + 643 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nI \\left(\\phi_ {p o l}\\right) = I _ {u n} \\left(1 + \\rho c o s \\left(2 \\left(\\phi - \\phi_ {p o l}\\right)\\right)\\right), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 138, + 664, + 468, + 681 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $I_{un}$ denotes unpolarized intensity, $\\rho$ denotes DoLP, $\\phi$ denotes AoLP and $\\phi_{pol}$ denotes the desired filter angle at each pixel. This equation allows us to render images under any number of filter angles by saving only three quantities per pixel. In this paper we leverage the above equation to learn a representation for just these quantities, rather than the four original images.", + "bbox": [ + 75, + 700, + 468, + 806 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The DoLP $(\\rho)$ and AoLP $(\\phi)$ have uses beyond just rendering images. In the shape from polarization problem, these quantities are used to calculate the zenith and azimuth angles, respectively, of per-pixel surface normals. This relationship has been studied in previous work [1, 7]. Specifically, the azimuth angle, $\\theta_{a}$ , of a surface normal can be", + "bbox": [ + 75, + 809, + 470, + 901 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "calculated from the following relationship:", + "bbox": [ + 498, + 90, + 784, + 107 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\phi = \\left\\{ \\begin{array}{l} \\theta_ {a}, \\text {w h e n d i f f u s e r e f l e c t i o n d o m i n a t e s} \\\\ \\theta_ {a} - \\frac {\\pi}{2}, \\text {w h e n s p e c u l a r r e f l e c t i o n d o m i n a t e s} \\end{array} \\right. \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 514, + 119, + 890, + 170 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "DoLP, $\\rho$ , is related to the zenith angle, $\\theta_z$ , in terms of the refractive index, $n$ , of a surface. When diffuse reflection is dominant, the relationship can be written as:", + "bbox": [ + 498, + 172, + 890, + 217 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\rho = \\frac {\\left(n - \\frac {1}{n}\\right) ^ {2} \\sin^ {2} \\left(\\theta_ {z}\\right)}{2 + 2 n ^ {2} - \\left(n - \\frac {1}{n}\\right) ^ {2} \\sin^ {2} \\left(\\theta_ {z}\\right) + 4 \\cos \\left(\\theta_ {z}\\right) \\sqrt {n ^ {2} - \\sin^ {2} \\left(\\theta_ {z}\\right)}}. \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 519, + 229, + 890, + 258 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "When specular reflection dominates, the relationship is different:", + "bbox": [ + 498, + 268, + 890, + 299 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\rho = \\frac {2 \\sin^ {2} (\\theta_ {z}) \\cos (\\theta_ {z}) \\sqrt {n ^ {2} - \\sin^ {2} (\\theta_ {z})}}{n ^ {2} - \\sin^ {2} (\\theta_ {z}) - n ^ {2} \\sin^ {2} (\\theta_ {z}) + 2 \\sin^ {4} (\\theta_ {z})}. \\qquad (4)\n$$\n", + "text_format": "latex", + "bbox": [ + 526, + 311, + 890, + 354 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "$\\rho, \\phi$ and $I_{un}$ can be calculated directly from a vector known as the Stokes vector at each pixel. This vector has four elements. The first three elements deal with the linear polarization of light, and the final one represents the circular polarization of the wave. In this paper we will focus on linear polarization. To measure the Stokes vector of a scene, at least three images are needed, taken through linear polarizing filters at 0, 45 and 90 degrees. Since the camera used in our setup also captures an image with a filter at 135 degrees, we use four images in our calculations of the Stokes vectors for robustness to noise.", + "bbox": [ + 498, + 366, + 890, + 532 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. Learning from coarse to fine", + "text_level": 1, + "bbox": [ + 500, + 544, + 754, + 559 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Current coordinate network architectures produce artifacts when fitting polarimetric images. SIREN [52] and similar architectures treat every coordinate equally when training, and they produce noise patterns in the resulting images when the spatial frequencies present in the training data differ widely (eg. the maximum magnitude frequency differs by an order of magnitude). In the polarimetric images we obtained, we found the maximum frequency magnitude of some AoLP maps was around $10^{7}$ , while the maximum magnitude for the intensity image was only around $10^{6}$ . ACORN [34] does not treat each coordinate in the same way, but its dynamic tiling strategy looks for regions of low variance in order to create larger blocks. This is difficult to do when attempting to fit multiple images containing varying frequencies. The resulting reconstructions end up looking blocky, and fine detail is lost in the process. Our method removes these artifacts by learning image representations using their singular value decompositions. One idea to help in reconstructing high frequency details could be to use an image's Fourier decomposition. We found that in practice the SVD works better for our use case. This is due to the propagation of errors during the forward and inverse", + "bbox": [ + 496, + 568, + 890, + 902 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "16581", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/61c05bfe9d55edf34492a8775d5f5cae962d9566528b3196c889b8f7ef74ed6b.jpg", + "image_caption": [ + "Figure 2. pCON learns to fit an image by learning a series of reconstructions with different singular values. The model is organized into a series of $n_b$ parallel MLPs (denoted here as $g_i$ ) with sine activations. A 2D coordinate vector representing a point on an image is passed through all bands separately ( $g_0$ to $g_n$ ). To supervise the training of each band, we reconstruct the full image maps of each quantity, and then calculate the MSE between the model prediction, $\\hat{y}_i$ and their respective ground truth values, $y_i$ , at the input coordinate. The final output is the sum of all the intermediate reconstructions, which yields a set of images similar to the training data." + ], + "image_footnote": [], + "bbox": [ + 83, + 95, + 890, + 344 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Fourier transforms. The SVD does not require shifting between the spatial and frequency domains, which allows errors to propagate less than if we were supervising on Fourier frequencies. The singular value decomposition of an $m \\times n$ matrix $\\mathbf{A}$ is a set of matrices $\\mathbf{U} \\in \\mathbb{R}^{m \\times m}$ , $\\boldsymbol{\\Sigma} \\in \\mathbb{R}^{m \\times n}$ and $\\mathbf{V}^{\\top} \\in \\mathbb{R}^{n \\times n}$ such that $\\mathbf{A} = \\mathbf{U} \\boldsymbol{\\Sigma} \\mathbf{V}^{\\top}$ . This matrix product can be further decomposed:", + "bbox": [ + 75, + 452, + 472, + 559 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathbf {U} \\boldsymbol {\\Sigma} \\mathbf {V} ^ {\\top} = \\sum_ {i} ^ {r} \\sigma_ {i} \\mathbf {u} _ {i} \\mathbf {v} _ {i} ^ {\\top} \\tag {5} \\\\ = \\sum_ {i = 0} ^ {a _ {1}} \\sigma_ {i} \\mathbf {u} _ {i} \\mathbf {v} _ {i} ^ {\\top} + \\sum_ {i = a _ {1}} ^ {a _ {2}} \\sigma_ {i} \\mathbf {u} _ {i} \\mathbf {v} _ {i} ^ {\\top} + \\dots + \\sum_ {i = a _ {n}} ^ {r} \\sigma_ {i} \\mathbf {u} _ {i} \\mathbf {v} _ {i} ^ {\\top} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 101, + 566, + 468, + 650 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $r$ is the rank of $\\mathbf{A}$ , $\\mathbf{u}_i$ is the $i$ -th column of $\\mathbf{U}$ , $\\mathbf{v}_i$ is the $i$ -th column of $\\mathbf{V}$ , and $\\sigma_i$ is the $i$ -th singular value. In the case of an image, this means that it is possible to calculate different pieces of the decomposition individually, and then sum them to obtain the original image. We leverage this property of the SVD in our model architecture. Using just the largest singular values to reconstruct an image yields a result containing only the low frequency details of the original [28]. As more singular values are used in the reconstruction, higher frequency details are captured. A single coordinate may have features in many reconstructions, and others may have features in only a few. Our network learns a series of reconstructions in parallel, which effectively allocates more model capacity to coordinates which have details at numerous frequencies. Since we are not dividing the image into a grid like ACORN, our reconstruc", + "bbox": [ + 75, + 657, + 472, + 901 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "tions do not suffer from tiling artifacts, and they also do not exhibit the obvious noise pattern present in reconstructions from SIREN or ReLU MLPs.", + "bbox": [ + 496, + 452, + 893, + 498 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3. Network design", + "text_level": 1, + "bbox": [ + 498, + 507, + 658, + 523 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Our network design takes inspiration from SIREN [52]. The original SIREN architecture was similar to an ordinary MLP, except that it used the sine activation function. Our network is divided into a series of $n_b$ fully-connected blocks which map from a 2D input image coordinate to the AoLP $(\\phi)$ , DoLP $(\\rho)$ and unpolarized intensity $I_{un}$ at that pixel. We call each of these MLPs a band of the network, and we will notate them as $g_i$ for $i \\in 0,1,\\dots,n_b - 1$ . To fit an image, we first take the singular value decomposition of the map of each polar quantity:", + "bbox": [ + 496, + 530, + 893, + 681 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {\\Phi} = \\mathbf {U} _ {\\phi} \\boldsymbol {\\Sigma} _ {\\phi} \\mathbf {V} _ {\\phi} ^ {\\top},\n$$\n", + "text_format": "latex", + "bbox": [ + 635, + 691, + 751, + 710 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {\\rho} = \\mathbf {U} _ {\\rho} \\boldsymbol {\\Sigma} _ {\\rho} \\mathbf {V} _ {\\rho} ^ {\\top}, \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 640, + 713, + 890, + 733 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {I} _ {u n} = \\mathbf {U} _ {u n} \\boldsymbol {\\Sigma} _ {u n} \\mathbf {V} _ {u n} ^ {\\top}.\n$$\n", + "text_format": "latex", + "bbox": [ + 629, + 734, + 761, + 753 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "$\\Phi, \\rho$ and $\\mathbf{I}_{un}$ represent the full image maps of AoLP $(\\phi)$ , DoLP $(\\rho)$ and $I_{un}$ , respectively. The above equations are obtained by interpreting these maps as matrices and then using Eq. (5). We now define a series of $n_b$ thresholds for $\\Phi, \\rho$ and $\\mathbf{I}_{un}$ as $t_{\\phi,i}, t_{\\rho,i}$ and $t_{\\mathrm{un},i}$ , respectively. These thresholds dictate which singular values will be used to supervise each band of the network. We also define the ground truth intermediate reconstructions of each quantity using a subset of singular values as $y_{\\phi,i}, y_{\\rho,i}$ and $y_{\\mathrm{un},i}$ . We denote their", + "bbox": [ + 496, + 763, + 893, + 902 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "16582", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "corresponding predictions as $\\hat{y}_{\\phi,i}$ , $\\hat{y}_{\\rho,i}$ and $\\hat{y}_{\\mathrm{un},i}$ . We can use Eq. (5) to decompose each of the SVDs from Eq. (6) into a set of sums. For example, we can write $\\Phi$ as follows:", + "bbox": [ + 75, + 90, + 468, + 137 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\ny _ {\\phi , i} = \\sum_ {j = t _ {\\phi , i - 1}} ^ {t _ {\\phi , i}} \\sigma_ {\\phi , j} \\mathbf {u} _ {\\phi , j} \\mathbf {v} _ {\\phi , j} ^ {\\top}. \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 173, + 143, + 468, + 186 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The reconstructions for the other quantities can be written with their respective SVDs and thresholds similar to Eq. (7).", + "bbox": [ + 76, + 193, + 468, + 223 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Each band learns a single reconstruction for these quantities at each pixel.", + "bbox": [ + 76, + 223, + 468, + 253 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\ng _ {i} (x, y) = \\hat {y} _ {i} = (\\hat {y} _ {\\phi , i}, \\hat {y} _ {\\rho , i}, \\hat {y} _ {\\mathrm {u n}, i}). \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 161, + 261, + 468, + 277 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Here, $x$ and $y$ constitute the 2D pixel coordinate vector that serves as the input to the network. This coordinate is passed through each band of the network to compute all $\\hat{y}_i$ , and then the fully reconstructed image is calculated as $\\sum_{i}\\hat{y}_{i}$ . See Fig. 2 for a visualization of this entire process.", + "bbox": [ + 75, + 284, + 468, + 359 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.4. Loss functions", + "text_level": 1, + "bbox": [ + 76, + 367, + 223, + 382 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Our network outputs a set of $n_b$ images. For each band, we compute the MSE between the cumulative sum of all outputs up to, and including, the current band. We define multiplicative factors for the three polar quantities as $\\lambda_{\\phi}$ , $\\lambda_{\\rho}$ and $\\lambda_{\\mathrm{un}}$ . We also define factors for each band as $\\lambda_{b,i}$ . The loss of the network can be calculated as follows, where $L$ is the loss function and $x$ is the data point for which the loss is being calculated:", + "bbox": [ + 75, + 388, + 468, + 510 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} L (x) = \\sum_ {i} \\lambda_ {b, i} \\sum_ {j = 0} ^ {i} \\lambda_ {\\phi} \\left(\\hat {y} _ {\\phi , j} - y _ {\\phi , j}\\right) ^ {2} \\tag {9} \\\\ + \\lambda_ {\\rho} (\\hat {y} _ {\\rho , j} - y _ {\\rho , j}) ^ {2} + \\lambda_ {\\mathrm {u n}} (\\hat {y} _ {\\mathrm {u n}, j} - y _ {\\mathrm {u n}, j}) ^ {2}. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 116, + 517, + 468, + 580 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.5. Implementation details", + "text_level": 1, + "bbox": [ + 76, + 585, + 290, + 601 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.5.1 Data", + "text_level": 1, + "bbox": [ + 76, + 609, + 163, + 622 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We collected all of our own data using a Flir Blackfly S RGB polarization camera. From this camera's images, it is possible to calculate the desired polarimetric quantities using the physics discussed in Sec. 3.1. We release two datasets with this paper. The first contains the six scenes used to create figures in this paper. The second set contains twenty four additional scenes for use in validating our approach. The captured scenes represent a diverse set of polarization effects. The DoLP and AoLP values span the entire ranges (zero to one for DoLP and zero to pi for AoLP) of possible values. We capture interesting polarization phenomena such as transparent and reflective surfaces. All released images have a resolution of $1024 \\times 1024$ .", + "bbox": [ + 75, + 632, + 468, + 828 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.5.2 Hyperparameters", + "text_level": 1, + "bbox": [ + 76, + 845, + 254, + 861 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We built all models in PyTorch [48]. We began all experiments with a learning rate of $1 \\times 10^{-5}$ , and then multiplied", + "bbox": [ + 75, + 869, + 468, + 900 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "it by 0.1 at 5000 epochs. Models were trained for a total of 10000 epochs. We also set the unitless frequency parameter $\\omega_0$ of our sine activations to 90. For our best model, we used a total of 10 bands, each with 2 hidden layers and a hidden dimension of 256.", + "bbox": [ + 496, + 90, + 890, + 165 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We chose the singular value thresholds of each band based on the sum of the magnitudes of singular values. Band one was given roughly $90\\%$ of the sum, then the others $99\\%$ , $99.9\\%$ , and so on. Exact values for $\\lambda_{b_i}$ used in all presented experiments can be found in the supplement.", + "bbox": [ + 496, + 167, + 890, + 241 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "For our experiments, we set $\\lambda_{\\phi} = 1.0$ , $\\lambda_{\\rho} = 5.0$ and $\\lambda_{\\mathrm{un}} = 5.0$ .", + "bbox": [ + 500, + 242, + 890, + 271 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 500, + 284, + 630, + 300 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In this section, we present comparisons between our model, SIREN [52], ACORN [34] and an MLP using ReLU activations and positional encoding, as used in NeRF [38]. We changed the number of parameters and output values of the baseline architectures, since originally these models were designed to fit only a single image at a time. We also changed the frequency parameter $\\omega_0$ of the SIREN sine activations to 90 to match the parameter used in our own model. All our models were trained using the training strategy discussed in Sec. 3.5.", + "bbox": [ + 496, + 309, + 890, + 459 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Validation of proposed failure case", + "text_level": 1, + "bbox": [ + 500, + 468, + 800, + 484 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We hypothesized the reason for the poor performance of baseline models when fitting polarimetric images was due to the presence of details at high spatial frequencies in the captured AoLP maps. To validate this hypothesis, we performed low-pass filtering on AoLP maps of a scene and then fit a model on the resulting AoLP, DoLP and $\\mathbf{I}_{un}$ maps. We found a clear trend in the reconstruction quality as we filtered out higher percentages of high spatial frequencies. All models performed better when fewer high frequency details were present in the target images. This aligns with our idea that these details create difficult scenes for networks to reconstruct. For the scene in Fig. 3, the AoLP reconstruction SSIMs with different amounts of frequencies removed from the GT AoLP maps can be seen in Table 2.", + "bbox": [ + 496, + 491, + 890, + 702 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/de75df019bacd942c24557e9b56e723774a6dafc574818fa4c51857709e0bf47.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
% Highest Frequencies RemovedSIREN [52]ACORN [34]ReLU P.E. [38]
0%0.600.510.63
75%0.540.800.93
80.5%0.890.970.98
93.75%0.950.990.99
", + "bbox": [ + 500, + 712, + 890, + 777 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Table 2. All baseline models reconstruct AoLP maps better when details at higher spatial frequencies are filtered out. This trend validates our hypothesis that images with high frequency details are more difficult for a network to reconstruct.", + "bbox": [ + 496, + 787, + 890, + 842 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.2. Comparison with others", + "text_level": 1, + "bbox": [ + 500, + 847, + 720, + 863 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We trained both our model and the baselines to predict AoLP $(\\Phi)$ , DoLP $(\\rho)$ and $\\mathbf{I}_{un}$ maps directly. Quali", + "bbox": [ + 500, + 869, + 890, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "16583", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/894ac63c8e6f5837dbd9828496ed69bf8abcee697de7b7d44226dcf7419db036.jpg", + "image_caption": [ + "GT" + ], + "image_footnote": [], + "bbox": [ + 88, + 104, + 243, + 224 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/219672bd146029b50af6bef2d4f2582e90313335177502790d4a8c0ec26fc225.jpg", + "image_caption": [ + "SIREN [52]", + "SSIM/PSNR: 0.60/14.32" + ], + "image_footnote": [], + "bbox": [ + 246, + 104, + 403, + 226 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/4b0ba5d5299bc6bb52b5e971debba80e0322daa05ac40dd28118f14ee7ffe6fb.jpg", + "image_caption": [ + "ACORN [34]", + "SSIM/PSNR: 0.51/15.99" + ], + "image_footnote": [], + "bbox": [ + 406, + 104, + 563, + 226 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/d7ec3e4ed80f1182d343c1558e0a86b338935748308aa4f646857122dca29c5c.jpg", + "image_caption": [ + "ReLU P.E. [38]", + "SSIM/PSNR: 0.63/17.18" + ], + "image_footnote": [], + "bbox": [ + 566, + 104, + 722, + 226 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/78e5251bffc2a2a040676706242845dae107efe0588ed652a9842e431f03c553.jpg", + "image_caption": [ + "Ours", + "SSIM/PSNR: 0.77/16.57" + ], + "image_footnote": [], + "bbox": [ + 723, + 104, + 883, + 226 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/206b75d1ec28eba3527f38730b6db8ec9d66c652c4aa1ba6348f618db675bb11.jpg", + "image_caption": [ + "AoLP", + "DoLP" + ], + "image_footnote": [], + "bbox": [ + 86, + 241, + 243, + 362 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/8da038295be72acd62631f8f71f1341c349f38cb63016f8be6e9a1707236e427.jpg", + "image_caption": [ + "SSIM/PSNR: 0.73/29.83" + ], + "image_footnote": [], + "bbox": [ + 246, + 241, + 403, + 362 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/9f0017eefa0a779cf8a6cd643d76b9401bc6e8e734907c866007124c1863ac2e.jpg", + "image_caption": [ + "SSIM/PSNR: 0.80/31.78" + ], + "image_footnote": [], + "bbox": [ + 406, + 241, + 563, + 362 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/c53b8f4cb5579fdbebc74c94e747e74788f917aca4fc3b41b2d54fa45e445e94.jpg", + "image_caption": [ + "SSIM/PSNR: 0.79/32.06" + ], + "image_footnote": [], + "bbox": [ + 566, + 241, + 722, + 362 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/4758abb3f2fc53c31d87da3a469712556e0aa964ce1cf461476be42389ff52d6.jpg", + "image_caption": [ + "SSIM: 0.82/34.56" + ], + "image_footnote": [], + "bbox": [ + 723, + 241, + 883, + 362 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/a371f3204275bd6c0aaa77cee788ca9027b40fdd877be2d94b15253e4de5382b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 88, + 377, + 243, + 522 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/26056e5fb16626ad87bea0c504957cf7d1f2c8f77583efc5c843907d85af0968.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 246, + 377, + 403, + 522 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/fb5aa89caaa1b15d862b2f0e9dd53831566d0dd71cbf26be800f07b6a036f5ff.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 406, + 377, + 562, + 522 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/cfdb3ca828d32759100f4f68dc7ec63365cdce609436299de164a0a4be84ad20.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 566, + 377, + 722, + 522 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/54c3d7daa3605fe026e756029b47cebd0af1fdfb5af5301a2e340b3df09c10de.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 723, + 377, + 880, + 522 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/d864f0d4325f7617845db59c764cc4a668493db20c6d71fe8e8e68fb461f1388.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 86, + 523, + 243, + 645 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/8ac2ef750c0bd8d7d9a189a174af23d9ce98f8e2c538b45959dd3b63f0fcf84a.jpg", + "image_caption": [ + "SSIM/PSNR: 0.59/26.42" + ], + "image_footnote": [], + "bbox": [ + 246, + 523, + 403, + 645 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/83870ba8fe998c9046d9ba6feff9bc288e91e9c58ecf5d1946a9989ca00d7288.jpg", + "image_caption": [ + "SSIM/PSNR: 0.77/28.43" + ], + "image_footnote": [], + "bbox": [ + 406, + 523, + 563, + 645 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/9b07c75b56190125d26fc54b6df16a8b12f5557b37a655c0abb1afaae6dc9978.jpg", + "image_caption": [ + "SSIM/PSNR: 0.71/29.58" + ], + "image_footnote": [], + "bbox": [ + 566, + 523, + 722, + 645 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/8106318c099bf3b71b3b5de39926488f31d3886d0233125d74418fc7ab2d7de5.jpg", + "image_caption": [ + "SSIM/PSNR: 0.89/34.82" + ], + "image_footnote": [], + "bbox": [ + 723, + 523, + 883, + 645 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/8a40f8d92246985a22b1bbd5416e3c6b04bd7c1f879a9c5c96bd49ea8d92b904.jpg", + "image_caption": [ + "Un", + "Figure 3. Our model shows higher SSIM and fewer artifacts on predicted $\\Phi$ , $\\rho$ and $\\mathbf{I}_{un}$ maps. Baseline models cause noise or tiling which is clearly visible on the checkerboard pattern on the floor, where all three quantities take large values. The artifacts are present on objects exhibiting both specular reflections, like the floor, and diffuse reflections, like the wall and doors in the background." + ], + "image_footnote": [], + "bbox": [ + 88, + 660, + 243, + 804 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/98d88428449630d099696dea35ad62ee492fd2d7db84c6cd0b76212e63e369e6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 246, + 660, + 403, + 804 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/341859f58aaab3e72eb3cd4651d7fa10cf3bfee85d205741c9ba500ac7aee56b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 406, + 660, + 562, + 804 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/f6a518f6b76776f65c51a7106d12d995e4d0ed70541d258e2b0b3198091be6c1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 566, + 660, + 722, + 804 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/be7421ee9c4e38f2f692300c794cf956350e3925399473df3b793a58c41c22b4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 723, + 660, + 880, + 804 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "16584", + "bbox": [ + 480, + 945, + 517, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/e67bcc94689dddb038c7f79dcb9e3bb084b487f258ce89580cb074e29b0004d6.jpg", + "image_caption": [ + "Figure 4. Our model can more accurately reconstruct RGB images taken through different polarizing filter angles when compared to SIREN [52], ACORN [34] and a ReLU MLP [38] with positional encoding. The images reconstructed here are the scene as viewed through a linear polarizer oriented at $0^{\\circ}$" + ], + "image_footnote": [], + "bbox": [ + 107, + 89, + 864, + 347 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/66d00254f3228d5be010764d4712f8d1062b608523cc0f37c91b6b3e0ac3eaa8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 81, + 425, + 176, + 496 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/e6a40800a18645bf213449b0c15d773fb744e8b6d83c4b2f53a38cb261f74753.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 83, + 498, + 174, + 566 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/31487e577aaf55b395f5f8d380eb899fad8d8d5345f20e470895f563cdf1ea7d.jpg", + "image_caption": [ + "GT" + ], + "image_footnote": [], + "bbox": [ + 83, + 566, + 174, + 637 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/c84cf0120ade6da08702c7a070e1ef93891f5e4b3a5bd60618f42cb6112b85de.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 179, + 425, + 272, + 496 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/649f17527a60f65c786f89ea05a02ce34d05256fb198b59a516718bbad72dbe0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 179, + 497, + 272, + 566 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/12614e630b9e65dc6eac9610fd8e867c0203de53f61a18a75de8bacf03bfcd98.jpg", + "image_caption": [ + "1 band" + ], + "image_footnote": [], + "bbox": [ + 179, + 566, + 272, + 637 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/abb8ce532ae5b737c544cde659a2e027d0a872c6355b790033fb5f1970b688cf.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 274, + 425, + 367, + 496 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/622473d89340986fb169b15080195530e6295f6500efa7b0641b28132a4cb8bf.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 274, + 497, + 367, + 566 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/e70480a6a1d77b181db6326f428bf2a1cd6d827291f234e6cf7bdbfb43593f54.jpg", + "image_caption": [ + "4 bands" + ], + "image_footnote": [], + "bbox": [ + 274, + 566, + 367, + 637 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/29308e05b1197201051ad12ef8bf555dfbdce11afebf6672d48d1da33d66ba2a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 370, + 425, + 464, + 496 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/5f28cc4526343d84b6c91deaa7dfef579598ba1cc0ad0605bc9c4c5bba9ff6ab.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 370, + 497, + 462, + 566 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/b4ed5028ce6eda169e1a9dd073a0472ee7405d464aeb6c3d83552c279be89412.jpg", + "image_caption": [ + "Full model", + "Figure 5. As the number of bands used in the reconstruction increases, so does the quality of the image. Even with a single band the reconstruction is visually close to the original." + ], + "image_footnote": [], + "bbox": [ + 370, + 566, + 462, + 637 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "tative and quantitative results can be found in Fig. 3. Our model performs yeilds better PSNR and SSIM than all baselines and it also does not produce the tiling artifacts or the noise patterns present in the reconstructions created by other models.", + "bbox": [ + 75, + 731, + 468, + 806 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3. Accuracy and model size trade-off", + "text_level": 1, + "bbox": [ + 76, + 816, + 377, + 832 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In order to fit an image with a smaller or larger model, current architectures require a full retraining with a different number of parameters. The structure of our model allows us to provide a tradeoff between model size and reconstruction", + "bbox": [ + 75, + 839, + 470, + 900 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/3006662a51133d83bb14d245e9c9684af74131cf2a0fd84faf3b32fbd6294cac.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelΦ(↑)ρ(↑)Iun(↑)# Params. (↓)
Ours (1 band)0.12/10.830.50/22.870.74/26.58130K
Ours (2 bands)0.32/14.660.64/28.400.91/34.74270K
Ours (3 bands)0.42/14.420.65/28.590.92/34.43400K
Ours (4 bands)0.51/16.320.65/28.710.92/34.62530K
Ours (5 bands)0.64/17.680.67/28.870.92/36.74670K
Ours (Full model)0.79/18.080.76/31.750.92/36.001.3M
SIREN [52]0.59/15.960.67/28.200.70/28.23660K
ACORN [34]0.48/17.010.73/29.960.82/29.85530K
ReLU [38] w/P.E.0.64/18.300.76/30.990.81/32.13660K
", + "bbox": [ + 503, + 426, + 888, + 545 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 3. As more bands are used, the number of parameters grows along with the resulting performance (SSIM/PSNR). The metrics shown here are averages across our whole dataset.", + "bbox": [ + 498, + 555, + 890, + 597 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "accuracy without retraining. Each band of the model learns a representation of the image when reconstructed with a different set of singular values. If the downstream task doesn't require incredibly high accuracy, and the user would rather save and transport a smaller set of model weights, they can just save the weights from the first band of the network and reconstruct the image with only the singular values from that band, or vice versa if more accuracy is required. A visualization of reconstruction quality using different numbers of bands can be seen in Fig. 5. See Table 3 for quantitative results using different bands of our network. With a similar number of parameters to the baseline models, it achieves comparable performance to all baseline architectures. Our full model outperforms all baselines on predicting AoLP $(\\Phi)$ and $\\mathbf{I}_{un}$ maps. It is also worth noting that our full model achieves significant compression over storing raw data. The combined memory size of the AoLP, DoLP and $\\mathbf{I}_{un}$ maps", + "bbox": [ + 496, + 643, + 893, + 902 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "16585", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/e8a748f34c8407db99d3fc22351abb39391121ed1ba0deb0ce1f34254235d847.jpg", + "image_caption": [ + "GT" + ], + "image_footnote": [], + "bbox": [ + 140, + 104, + 305, + 231 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/3a799a3959456abb28f5b11eb6a9db3843f0f60ec9effd7382718067d9c9c661.jpg", + "image_caption": [ + "SIREN [52]" + ], + "image_footnote": [], + "bbox": [ + 316, + 104, + 480, + 231 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/bdcc38bafd91773a855aea6aa7264f9315c9c900f420c0c31f3cb7679a963096.jpg", + "image_caption": [ + "ReLU P.E. [38]" + ], + "image_footnote": [], + "bbox": [ + 488, + 104, + 656, + 231 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/6dd43389def5f008bbc1518ccb1cff2e547fc5173a837f82dd4890c785393944.jpg", + "image_caption": [ + "Ours" + ], + "image_footnote": [], + "bbox": [ + 663, + 104, + 828, + 231 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/1f9978b468cadc6b997f8cb2b6c6a6f96e89c6781160056f51ce6ac3cbb0a60b.jpg", + "image_caption": [ + "Figure 6. Both SIREN [52] and the ReLU MLP [38] with positional encoding show artifacts when queried at a different resolution than they were trained on. Our model does not. We trained models at a resolution of $1024 \\times 1024$ and queried them at a resolution of $512 \\times 512$" + ], + "image_footnote": [], + "bbox": [ + 142, + 232, + 223, + 308 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/6cfc053ac7741db96885e2d107be96e4557b6775b50c8efb2609888200cb0976.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 223, + 232, + 303, + 308 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/b34f5f4be91ed9ad1dd6457b904dfb8e61a4a817068735796680244200acd835.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 316, + 232, + 395, + 308 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/8aae601fef50ca2cf4eaff39fc9c4b8eff02bc0cd424f74e3d6cae508c579435.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 398, + 232, + 478, + 309 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/5ea0d19bad2190dfe707fcb8e515330ea3c3a214e5282e448e15ed67036ea3df.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 491, + 232, + 570, + 308 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/e25c1b92080f6fd6a635fbb603067a3022b048a5516790978d8761a9809b3850.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 573, + 232, + 651, + 308 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/3cbda8f9cc88ca0bb2d0da2e5cb4c2f418c68dd635ea70e075017793ff02a7b2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 665, + 232, + 743, + 309 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/70d6f0bb7083b1dc106f5d20ecbe104c7c65044cf2b3d2543b917f8de3f9032a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 746, + 232, + 825, + 309 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "is 36 megabytes (MB), while the size of our full model is only 5 MB. Representing images with our model allows us to scale image size without scaling memory footprint as quickly. In this work we use small images, but the memory saved when reconstructing images at the mega or gigapixel scale would be significant.", + "bbox": [ + 75, + 392, + 468, + 482 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.4. RGB reconstruction", + "text_level": 1, + "bbox": [ + 76, + 484, + 267, + 498 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In addition to reconstructing the DoLP $(\\rho)$ , AoLP $(\\Phi)$ and $\\mathbf{I}_{un}$ maps with our model, we also present results for reconstructing the original RGB images captured by the camera. For a specific polarizing filter angle, we can reconstruct the value of a pixel captured by the camera through that filter using Eq. (1). Our model removes the artifacts present in the reconstructions from all baseline comparisons and retains more detail comparatively. See Fig. 4 for a visualization of reconstructions of images taken through a linear polarizer oriented at $0^{\\circ}$ .", + "bbox": [ + 75, + 507, + 468, + 657 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.5. Multiple resolution interpolation", + "text_level": 1, + "bbox": [ + 76, + 659, + 364, + 675 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We present results for fitting an image at one resolution and querying it at a second resolution. In this section we only compare to SIREN [52] and a ReLU MLP [38], as the dynamic tiling strategy of ACORN [34] does not allow us to simply query the representation at a different resolution. We train both models on the original scene at a resolution of $1024 \\times 1024$ and then query them at a resolution of $512 \\times 512$ . Both baselines show artifacts when queried at this new resolution, while our model does not have this issue. In Fig. 6 we visualize these results on $\\mathbf{I}_{un}$ maps.", + "bbox": [ + 75, + 681, + 468, + 834 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 76, + 844, + 194, + 859 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In summary, we have presented an attempt at creating neural representations of polarimetric information without", + "bbox": [ + 76, + 869, + 468, + 900 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "the artifacts introduced by current models. Compared to existing methods, our model shows an increase in image reconstruction quality on AoLP, DoLP and $\\mathbf{I}_{un}$ maps, in addition to effectively removing the artifacts we were targeting. Having a compact representation of polarimetric images will facilitate future research in areas where this data is required.", + "bbox": [ + 496, + 392, + 892, + 498 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "While our work provides noticeable improvement over current methods, it is not perfect. To achieve state of the art performance on reconstructing AoLP maps, we need quite a few bands in our network, which makes the number of parameters quite large compared to other architectures. A valuable next step could be creating a model that could achieve the same performance as ours while cutting down on the memory footprint. Furthermore, we only demonstrated the effectiveness of this approach on 2D data, since polarization is not well studied in three dimensions. Validating our approach on 3D data would be a useful next step, once the field has developed a greater understanding of the underlying physics. We motivated our method using polarimetric data, but there are many types of data in computational imaging [8]. Our method will be valuable in representing multiple physical quantities of a scene at once whenever at least one measurement contains high frequency details or noise, and future research could extending this work by demonstrating its effectiveness on other types of data encountered in computational imaging.", + "bbox": [ + 496, + 507, + 892, + 811 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgements We thank members of the Visual Machines Group (VMG) at UCLA for feedback and support. A.K. was supported by an NSF CAREER award IIS-2046737 and Army Young Investigator Program (YIP) Award.", + "bbox": [ + 496, + 824, + 892, + 898 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "16586", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] G.A. Atkinson and E.R. Hancock. Recovery of surface orientation from diffuse polarization. IEEE Transactions on Image Processing, 15(6):1653-1664, 2006. 2, 3", + "[2] Gary A Atkinson. Polarisation photometric stereo. Computer Vision and Image Understanding, 160:158-167, 2017. 2", + "[3] Gary A Atkinson and Jürgen D Ernst. High-sensitivity analysis of polarization by surface reflection. Machine Vision and Applications, 29(7):1171-1189, 2018. 3", + "[4] Benjamin Attal, Selena Ling, Aaron Gokaslan, Christian Richardt, and James Tompkin. Matryodshka: Real-time 6dof video view synthesis using multi-sphere images. In European Conference on Computer Vision, pages 441-459. Springer, 2020. 2", + "[5] Matan Atzmon and Yaron Lipman. Sal: Sign agnostic learning of shapes from raw data. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2565-2574, 2020. 2", + "[6] Dejan Azinović, Olivier Maury, Christophe Hery, Matthias Nießner, and Justus Thies. High-res facial appearance capture from polarized smartphone images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2023. 3", + "[7] Yunhao Ba, Alex Gilbert, Franklin Wang, Jina Yang, Rui Chen, Yiqin Wang, Lei Yan, Boxin Shi, and Achuta Kadambi. Deep shape from polarization. In European Conference on Computer Vision, pages 554-571. Springer, 2020. 2, 3", + "[8] Ayush Bhandari, Achuta Kadambi, and Ramesh Raskar. Computational Imaging. The MIT Press, 2022. 8", + "[9] Michael Broxton, John Flynn, Ryan Overbeck, Daniel Erickson, Peter Hedman, Matthew Duvall, Jason Dourgarian, Jay Busch, Matt Whalen, and Paul Debevec. Immersive light field video with a layered mesh representation. ACM Transactions on Graphics (TOG), 39(4):86-1, 2020. 2", + "[10] Rohan Chabra, Jan E Lenssen, Eddy Ilg, Tanner Schmidt, Julian Straub, Steven Lovegrove, and Richard Newcombe. Deep local shapes: Learning local sdf priors for detailed 3d reconstruction. In European Conference on Computer Vision, pages 608-625. Springer, 2020. 2", + "[11] Eric R. Chan, Marco Monteiro, Petr Kellnhofer, Jiajun Wu, and Gordon Wetzstein. Pi-gan: Periodic implicit generative adversarial networks for 3d-aware image synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 5799-5809, June 2021. 2", + "[12] Tongbo Chen, Hendrik P. A. Lensch, Christian Fuchs, and Hans-Peter Seidel. Polarization and phase-shifting for 3d scanning of translucent objects. In 2007 IEEE Conference on Computer Vision and Pattern Recognition, pages 1-8, 2007. 3", + "[13] Zhiqin Chen and Hao Zhang. Learning implicit fields for generative shape modeling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5939-5948, 2019. 2", + "[14] Akshit Dave, Yongyi Zhao, and Ashok Veeraraghavan. Pandora: Polarization-aided neural decomposition of radiance." + ], + "bbox": [ + 76, + 114, + 470, + 902 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "In Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part VII, pages 538-556. Springer, 2022. 2", + "[15] O. Drbohlav and R. Sara. Unambiguous determination of shape from photometric stereo with unknown light sources. In Proceedings Eighth IEEE International Conference on Computer Vision. ICCV 2001, volume 1, pages 581-586 vol.1, 2001. 2", + "[16] SM Ali Eslami, Danilo Jimenez Rezende, Frederic Besse, Fabio Viola, Ari S Morcos, Marta Garnelo, Avraham Ruderman, Andrei A Rusu, Ivo Danihelka, Karol Gregor, et al. Neural scene representation and rendering. Science, 360(6394):1204-1210, 2018. 2", + "[17] John Flynn, Michael Broxton, Paul Debevec, Matthew DuVall, Graham Fyffe, Ryan Overbeck, Noah Snavely, and Richard Tucker. Deepview: View synthesis with learned gradient descent. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2367-2376, 2019. 2", + "[18] Kyle Genova, Forrester Cole, Avneesh Sud, Aaron Sarna, and Thomas Funkhouser. Local deep implicit functions for 3d shape. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4857-4866, 2020. 2", + "[19] Kyle Genova, Forrester Cole, Daniel Vlasic, Aaron Sarna, William T Freeman, and Thomas Funkhouser. Learning shape templates with structured implicit functions. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7154-7164, 2019. 2", + "[20] Ciriaco Goddi, Ivan Martí-Vidal, Hugo Messias, Geoffrey C Bower, Avery E Broderick, Jason Dexter, Daniel P Marrone, Monika Moscibrodzka, Hiroshi Nagai, Juan Carlos Algaba, et al. Polarimetric properties of event horizon telescope targets from alma. The Astrophysical Journal Letters, 910(1):L14, 2021. 3", + "[21] Amos Gropp, Lior Yariv, Niv Haim, Matan Atzmon, and Yaron Lipman. Implicit geometric regularization for learning shapes. arXiv preprint arXiv:2002.10099, 2020. 2", + "[22] Peter Hedman, Julien Philip, True Price, Jan-Michael Frahm, George Drettakis, and Gabriel Brostow. Deep blending for free-viewpoint image-based rendering. ACM Transactions on Graphics (TOG), 37(6):1-15, 2018. 2", + "[23] Philipp Henzler, Niloy J Mitra, and Tobias Ritschel. Escaping Plato's cave: 3d shape from adversarial rendering. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9984-9993, 2019. 2", + "[24] Cong Phuoc Huynh, Antonio Robles-Kelly, and Edwin Hancock. Shape and refractive index recovery from single-view polarisation images. In 2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 1229-1236, 2010. 2", + "[25] Cong Phuoc Huynh, Antonio Robles-Kelly, and Edwin R Hancock. Shape and refractive index from single-view spectro-polarimetric images. International journal of computer vision, 101(1):64-94, 2013. 2", + "[26] Chiyu Jiang, Avneesh Sud, Ameesh Makadia, Jingwei Huang, Matthias Nießner, Thomas Funkhouser, et al. Local" + ], + "bbox": [ + 501, + 92, + 893, + 902 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "16587", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "implicit grid representations for 3d scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6001-6010, 2020. 2", + "[27] Achuta Kadambi, Vage Taamazyan, Boxin Shi, and Ramesh Raskar. Polarized 3d: High-quality depth sensing with polarization cues. In Proceedings of the IEEE International Conference on Computer Vision, pages 3370-3378, 2015. 3", + "[28] Samruddhi Kahu and Reena Rahate. Image compression using singular value decomposition. International Journal of Advancements in Research & Technology, 2(8):244-248, 2013. 1, 4", + "[29] Agastya Kalra, Vage Taamazyan, Supreeth Krishna Rao, Kartik Venkataraman, Ramesh Raskar, and Achuta Kadambi. Deep polarization cues for transparent object segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2020. 3", + "[30] Shaohui Liu, Yinda Zhang, Songyou Peng, Boxin Shi, Marc Pollefeys, and Zhaopeng Cui. Dist: Rendering deep implicit signed distance function with differentiable sphere tracing. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2019-2028, 2020. 2", + "[31] Stephen Lombardi, Tomas Simon, Jason Saragih, Gabriel Schwartz, Andreas Lehrmann, and Yaser Sheikh. Neural volumes: Learning dynamic renderable volumes from images. arXiv preprint arXiv:1906.07751, 2019. 2", + "[32] Tomohiro Maeda, Achuta Kadambi, Yoav Y Schechner, and Ramesh Raskar. Dynamic heterodyne interferometry. In 2018 IEEE International Conference on Computational Photography (ICCP), pages 1-11. IEEE, 2018. 3", + "[33] Ali H. Mahmoud, Moumen T. El-Melegy, and Aly A. Farag. Direct method for shape recovery from polarization and shading. In 2012 19th IEEE International Conference on Image Processing, pages 1769-1772, 2012. 2", + "[34] Julien N. P. Martel, David B. Lindell, Connor Z. Lin, Eric R. Chan, Marco Monteiro, and Gordon Wetzstein. Acorn: Adaptive coordinate networks for neural scene representation. ACM Trans. Graph. (SIGGRAPH), 40(4), 2021. 2, 3, 5, 6, 7, 8", + "[35] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3d reconstruction in function space. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4460-4470, 2019. 2", + "[36] Ben Mildenhall, Peter Hedman, Ricardo Martin-Brualla, Pratul P Srinivasan, and Jonathan T Barron. Nerf in the dark: High dynamic range view synthesis from noisy raw images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16190-16199, 2022. 2", + "[37] Ben Mildenhall, Pratul P Srinivasan, Rodrigo Ortiz-Cayon, Nima Khademi Kalantari, Ravi Ramamoorthi, Ren Ng, and Abhishek Kar. Local light field fusion: Practical view synthesis with prescriptive sampling guidelines. ACM Transactions on Graphics (TOG), 38(4):1-14, 2019. 2", + "[38] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf:" + ], + "bbox": [ + 78, + 90, + 470, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Representing scenes as neural radiance fields for view synthesis. In European conference on computer vision, pages 405-421. Springer, 2020. 2, 5, 6, 7, 8", + "[39] Miyazaki, Tan, Hara, and Ikeuchi. Polarization-based inverse rendering from a single view. In Proceedings Ninth IEEE International Conference on Computer Vision, pages 982-987 vol.2, 2003. 2, 3", + "[40] D. Miyazaki, M. Kagesawa, and K. Ikeuchi. Transparent surface modeling from a pair of polarization images. IEEE Transactions on Pattern Analysis and Machine Intelligence, 26(1):73-82, 2004. 3", + "[41] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Transactions on Graphics (ToG), 41(4):1-15, 2022. 2", + "[42] Shree K Nayar, Xi-Sheng Fang, and Terrance Boult. Separation of reflection components using color and polarization. International Journal of Computer Vision, 21(3):163-186, 1997. 3", + "[43] Trung Ngo Thanh, Hajime Nagahara, and Rin-ichiro Taniguchi. Shape and light directions from shading and polarization. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), June 2015. 2", + "[44] Thu H Nguyen-Phuoc, Christian Richardt, Long Mai, Yongliang Yang, and Niloy Mitra. Blockgan: Learning 3d object-aware scene representations from unlabelled images. Advances in Neural Information Processing Systems, 33:6767-6778, 2020. 2", + "[45] Michael Niemeyer, Lars Mescheder, Michael Oechsle, and Andreas Geiger. Occupancy flow: 4d reconstruction by learning particle dynamics. In Proceedings of the IEEE/CVF international conference on computer vision, pages 5379-5389, 2019. 2", + "[46] Michael Oechsle, Lars Mescheder, Michael Niemeyer, Thilo Strauss, and Andreas Geiger. Texture fields: Learning texture representations in function space. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4531-4540, 2019. 2", + "[47] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. Deepsdf: Learning continuous signed distance functions for shape representation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 165-174, 2019. 2", + "[48] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Kopf, Edward Yang, Zachary DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. Pytorch: An imperative style, high-performance deep learning library. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'Alché-Buc, E. Fox, and R. Garnett, editors, Advances in Neural Information Processing Systems 32, pages 8024-8035. Curran Associates, Inc., 2019. 5", + "[49] Songyou Peng, Michael Niemeyer, Lars Mescheder, Marc Pollefeys, and Andreas Geiger. Convolutional occupancy networks. In European Conference on Computer Vision, pages 523-540. Springer, 2020. 2" + ], + "bbox": [ + 501, + 92, + 890, + 900 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "16588", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[50] Gernot Riegler and Vladlen Koltun. Free view synthesis. In European Conference on Computer Vision, pages 623-640. Springer, 2020. 2", + "[51] Y.Y. Schechner, S.G. Narasimhan, and S.K. Nayar. Instant dehazing of images using polarization. In Proceedings of the 2001 IEEE Computer Society Conference on Computer Vision and Pattern Recognition. CVPR 2001, volume 1, pages I-I, 2001. 3", + "[52] Vincent Sitzmann, Julien Martel, Alexander Bergman, David Lindell, and Gordon Wetzstein. Implicit neural representations with periodic activation functions. Advances in Neural Information Processing Systems, 33:7462-7473, 2020. 2, 3, 4, 5, 6, 7, 8", + "[53] Vincent Sitzmann, Justus Thies, Felix Heide, Matthias Nießner, Gordon Wetzstein, and Michael Zollhofer. Deepvoxels: Learning persistent 3d feature embeddings. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2437-2446, 2019. 2", + "[54] Vincent Sitzmann, Michael Zollhöfer, and Gordon Wetzstein. Scene representation networks: Continuous 3d-structure-aware neural scene representations. Advances in Neural Information Processing Systems, 32, 2019. 2", + "[55] Daniel Teo, Boxin Shi, Yinqiang Zheng, and Sai-Kit Yeung. Self-calibrating polarising radiometric calibration. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), June 2018. 3", + "[56] Justus Thies, Michael Zollhöfer, and Matthias Nießner. Deferred neural rendering: Image synthesis using neural textures. ACM Transactions on Graphics (TOG), 38(4):1-12, 2019. 2", + "[57] Tali Treibitz and Yoav Y. Schechner. Active polarization rescattering. IEEE Transactions on Pattern Analysis and Machine Intelligence, 31(3):385-399, 2009. 3", + "[58] Zhen Wang, Shijie Zhou, Jeong Joon Park, Despoina Paschalidou, Suya You, Gordon Wetzstein, Leonidas Guibas, and Achuta Kadambi. Alto: Alternating latent topologies for implicit 3d reconstruction. arXiv preprint arXiv:2212.04096, 2022. 2", + "[59] Lawrence B Wolff. Polarization vision: a new sensory approach to image understanding. Image and Vision computing, 15(2):81-93, 1997. 3", + "[60] Xiuming Zhang, Sean Fanello, Yun-Ta Tsai, Tiancheng Sun, Tianfan Xue, Rohit Pandey, Sergio Orts-Escalano, Philip Davidson, Christoph Rhemann, Paul Debevec, et al. Neural light transport for relighting and view synthesis. ACM Transactions on Graphics (TOG), 40(1):1-17, 2021. 2", + "[61] Chu Zhou, Minggui Teng, Yufei Han, Chao Xu, and Boxin Shi. Learning to dehaze with polarization. Advances in Neural Information Processing Systems, 34, 2021. 3" + ], + "bbox": [ + 78, + 90, + 467, + 782 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "16589", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/2023/pCON_ Polarimetric Coordinate Networks for Neural Scene Representations/762a8e4d-373c-4bab-83f9-f1ad8a1ae928_model.json b/2023/pCON_ Polarimetric Coordinate Networks for Neural Scene Representations/762a8e4d-373c-4bab-83f9-f1ad8a1ae928_model.json new file mode 100644 index 0000000000000000000000000000000000000000..a5dd10f890a1dc27833b2f706b2b3da9f1c61837 --- /dev/null +++ b/2023/pCON_ Polarimetric Coordinate Networks for Neural Scene Representations/762a8e4d-373c-4bab-83f9-f1ad8a1ae928_model.json @@ -0,0 +1,3104 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.812, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.099, + 0.131, + 0.872, + 0.154 + ], + "angle": 0, + "content": "pCON: Polarimetric Coordinate Networks for Neural Scene Representations" + }, + { + "type": "text", + "bbox": [ + 0.283, + 0.181, + 0.684, + 0.199 + ], + "angle": 0, + "content": "Henry Peters\\*,1, Yunhao Ba\\*,2, Achuta Kadambi\\*1,2" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.199, + 0.799, + 0.217 + ], + "angle": 0, + "content": "1Computer Science Department, University of California, Los Angeles (UCLA)" + }, + { + "type": "text", + "bbox": [ + 0.255, + 0.217, + 0.716, + 0.235 + ], + "angle": 0, + "content": "\\(^{2}\\)Electrical and Computer Engineering Department, UCLA" + }, + { + "type": "text", + "bbox": [ + 0.255, + 0.237, + 0.708, + 0.251 + ], + "angle": 0, + "content": "hpeters@ucla.edu, yhba@ucla.edu, achuta@ee.ucla.edu" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.287, + 0.314, + 0.303 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.319, + 0.473, + 0.533 + ], + "angle": 0, + "content": "Neural scene representations have achieved great success in parameterizing and reconstructing images, but current state of the art models are not optimized with the preservation of physical quantities in mind. While current architectures can reconstruct color images correctly, they create artifacts when trying to fit maps of polar quantities. We propose polarimetric coordinate networks (pCON), a new model architecture for neural scene representations aimed at preserving polarimetric information while accurately parameterizing the scene. Our model removes artifacts created by current coordinate network architectures when reconstructing three polarimetric quantities of interest. All code and data can be found at this link: https://visual.ee.ucla.edu/pcon.htm." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.562, + 0.21, + 0.577 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.588, + 0.47, + 0.814 + ], + "angle": 0, + "content": "Neural scene representations are a popular and useful tool in many computer vision tasks, but these models are optimized to preserve visual content, not physical information. Current state-of-the-art models create artifacts due to the presence of a large range of spatial frequencies when reconstructing polarimetric data. Many tasks in polarimetric imaging rely on precise measurements, and thus even small artifacts are a hindrance for downstream tasks that would like to leverage neural reconstructions of polarization images. In this work we present pCON, a new architecture for neural scene representations. pCON leverages images' singular value decompositions to effectively allocate network capacity to learning the more difficult spatial frequencies at each pixel. Our model reconstructs polarimetric images without the artifacts introduced by state-of-the-art models." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.815, + 0.47, + 0.876 + ], + "angle": 0, + "content": "The polarization of light passing through a scene contains a wealth of information, and while current neural representations can represent single images accurately, but they produce noticeable visual artifacts when trying to represent" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.289, + 0.8, + 0.304 + ], + "angle": 0, + "content": "multiple polarimetric quantities concurrently." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.305, + 0.895, + 0.743 + ], + "angle": 0, + "content": "We propose a new architecture for neural scene representations that can effectively reconstruct polarimetric images without artifacts. Our model reconstructs color images accurately while also ensuring the quality of three important polarimetric quantities, the degree \\((\\rho)\\) and angle \\((\\phi)\\) of linear polarization (DoLP and AoLP), and the unpolarized intensity \\(I_{un}\\). This information is generally captured using images of a scene taken through linear polarizing filters at four different angles. Instead of learning a representation of these images, our model operates directly on the DoLP, AoLP and unpolarized intensity maps. When learning to fit these images, current coordinate network architectures produce artifacts in the predicted DoLP and unpolarized intensity maps. To alleviate this issue, we take inspiration from traditional image compression techniques and fit images using their singular value decompositions. Images can be compressed by reconstructing them using only a subset of their singular values [28]. By utilizing different, non-overlapping sets of singular values to reconstruct an image, the original image can be recovered by summing the individual reconstructions together. Our model is supervised in a coarse-to-fine manner, which helps the model to represent both the low and high frequency details present in maps of polarimetric quantities without introducint noise or tiling artifacts. A demonstration of the efficacy our model can be seen in Fig. 1 and Table 1. Furthermore, our model is capable of representing images at varying levels of detail, creating a tradeoff between performance and model size without retraining." + }, + { + "type": "title", + "bbox": [ + 0.501, + 0.756, + 0.644, + 0.771 + ], + "angle": 0, + "content": "1.1. Contributions" + }, + { + "type": "text", + "bbox": [ + 0.52, + 0.781, + 0.872, + 0.795 + ], + "angle": 0, + "content": "To summarize, the contributions of our work include:" + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.811, + 0.891, + 0.84 + ], + "angle": 0, + "content": "- a coordinate network architecture for neural scene representations of polarimetric images;" + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.841, + 0.892, + 0.9 + ], + "angle": 0, + "content": "- a training strategy for our network which learns a series of representations using different sets of singular values, allowing for a trade-off between performance and model size without retraining;" + }, + { + "type": "list", + "bbox": [ + 0.519, + 0.811, + 0.892, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_footnote", + "bbox": [ + 0.096, + 0.887, + 0.206, + 0.9 + ], + "angle": 0, + "content": "*Equal contribution." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "16579" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.108, + 0.088, + 0.257, + 0.204 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.109, + 0.205, + 0.256, + 0.35 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.355, + 0.194, + 0.366 + ], + "angle": 0, + "content": "GT" + }, + { + "type": "image", + "bbox": [ + 0.26, + 0.089, + 0.409, + 0.204 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.262, + 0.204, + 0.407, + 0.35 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.297, + 0.354, + 0.371, + 0.368 + ], + "angle": 0, + "content": "SIREN [52]" + }, + { + "type": "image", + "bbox": [ + 0.411, + 0.089, + 0.56, + 0.204 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.412, + 0.205, + 0.558, + 0.35 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.445, + 0.355, + 0.526, + 0.368 + ], + "angle": 0, + "content": "ACORN [34]" + }, + { + "type": "image", + "bbox": [ + 0.562, + 0.089, + 0.712, + 0.204 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.563, + 0.205, + 0.71, + 0.35 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.606, + 0.355, + 0.668, + 0.367 + ], + "angle": 0, + "content": "ReLU P.E" + }, + { + "type": "image", + "bbox": [ + 0.715, + 0.089, + 0.863, + 0.204 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.715, + 0.205, + 0.861, + 0.35 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.773, + 0.355, + 0.804, + 0.367 + ], + "angle": 0, + "content": "Ours" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.381, + 0.895, + 0.424 + ], + "angle": 0, + "content": "Figure 1. Our model reconstructs the training scene more accurately than other architectures. Our model does not have the noise pattern present in reconstructions from SIREN [52] or a ReLU MLP with positional encoding [38], nor does it show tiling artifacts as in ACORN's [34] prediction." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.45, + 0.47, + 0.496 + ], + "angle": 0, + "content": "- results demonstrating that our model reconstructs maps of polarimetric quantities without the artifacts created by current state-of-the-art approaches." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.515, + 0.216, + 0.531 + ], + "angle": 0, + "content": "2. Related work" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.542, + 0.336, + 0.559 + ], + "angle": 0, + "content": "2.1. Neural scene representations" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.567, + 0.47, + 0.718 + ], + "angle": 0, + "content": "The aim of neural scene representations is to parameterize a two or three dimensional scene in the weights of a neural network in order to accomplish some other task related to the scene. Most papers fall into one of three categories. Explicit representations model the scene directly, which allows them to quickly accomplish tasks such as scene reconstruction [9, 31], novel view synthesis [4, 17, 22, 36, 37, 41, 50, 53, 56] or relighting [60]. However, since the scene is modelled explicitly, these representations require more memory than the alternatives." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.72, + 0.471, + 0.902 + ], + "angle": 0, + "content": "Implicit representations do not model the scene directly, but instead use an MLP to map from a coordinate in either 2D or 3D space to some desired output value. This value could be the observed radiance or pixel intensity [11, 16, 38, 44, 46, 54], occupancy of a pixel or voxel [35, 45], a quantity related to shape [5, 10, 13, 18, 19, 21, 23, 26, 30, 44, 47, 54, 58], or any other quantity of interest. The final category of neural scene representations is a hybrid of the first two. The only work that fits directly into this category is ACORN [34], which accomplishes state-of-the-art performance on image and volume fitting by combining a coordinate network with an explicit grid or voxel represent-" + }, + { + "type": "table", + "bbox": [ + 0.503, + 0.447, + 0.878, + 0.535 + ], + "angle": 0, + "content": "
ModelNoise PatternTiling ArtifactsResizing Artifacts
ACORN [34]MediumHighNot Supported
ReLU w/P.E. [38]MediumNoneYes
SIREN [52]HighNoneYes
ProposedMinimalNoneMinimal
" + }, + { + "type": "table_caption", + "bbox": [ + 0.498, + 0.545, + 0.894, + 0.63 + ], + "angle": 0, + "content": "Table 1. Our model shows fewer artifacts than current state-of-the-art architectures. Since ACORN divides an image into a discrete grid, in order to query an image at a different resolution it is necessary to also reform the grid. The grid is created online during training, so it is not feasible to query a model at a different resolution without retraining." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.655, + 0.892, + 0.7 + ], + "angle": 0, + "content": "tation. Similar to ACORN, other works divide the scene into local regions and learn each of these regions implicitly [10,26,49]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.701, + 0.893, + 0.837 + ], + "angle": 0, + "content": "To our knowledge, this work is the first to highlight the artifacts caused by existing neural scene representation architectures when fitting polarimetric data. While we are one of the first works to examine polarization and neural scene representations in the same context, we would like to acknowledge that PANDORA [14], a concurrent work, also utilizes polarization and neural scene representations. However, they focus on radiance decomposition rather than 2D reconstruction." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.847, + 0.681, + 0.862 + ], + "angle": 0, + "content": "2.2. Polarization vision" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.871, + 0.894, + 0.901 + ], + "angle": 0, + "content": "Polarization is useful in a variety of computer vision tasks. It can be used to estimate surface normals [1, 2," + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "16580" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.289 + ], + "angle": 0, + "content": "7, 15, 24, 25, 33, 39, 43] or refine depth maps to represent incredibly fine details [27]. It can be used in radiometric calibration [55], dynamic interferometry [32], facial reconstruction [6] and separation of diffuse and specular reflection [39, 42]. It also can be used to remove the effects of scattering media like haze [51, 57, 61] and water [57], to augment the performance of computer vision tasks in the presence of transparent objects [12, 29, 40], or even to assist in imaging objects in space [20]. Traditionally, polarimetric data is captured by rotating a linear polarizing filter in front of a camera [3, 59], but recent advances in machine vision have produced cameras that can capture multiple polar images in a single shot." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.292, + 0.472, + 0.355 + ], + "angle": 0, + "content": "Our work uses a neural network to accurately parameterize polarimetric information captured from a scene. This allows for easier storage and transport of polarimetric data and facilitates its use in other deep learning based tasks." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.378, + 0.17, + 0.393 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.406, + 0.268, + 0.423 + ], + "angle": 0, + "content": "3.1. Polarization physics" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.434, + 0.473, + 0.645 + ], + "angle": 0, + "content": "Polarized light can be modelled as a sine wave, and can thus be parameterized by three quantities. The degree of linear polarization (DoLP) is a quantity between 0 and 1 that represents how much of the total intensity of the wave is polarized and unpolarized. Completely polarized light will have a DoLP of 1, and completely unpolarized light will have a DoLP of 0. The angle of linear polarization (AoLP) corresponds to the orientation of the plane in which the wave is oscillating. The AoLP takes values from 0 to \\(\\pi\\) radians. The final quantity of interest is the unpolarized intensity, \\(I_{un}\\), of the wave, which corresponds to its amplitude. With these three quantities, it is possible to render a scene as viewed through a linear polarization filter at any angle using the following equation:" + }, + { + "type": "equation", + "bbox": [ + 0.14, + 0.665, + 0.47, + 0.683 + ], + "angle": 0, + "content": "\\[\nI \\left(\\phi_ {p o l}\\right) = I _ {u n} \\left(1 + \\rho c o s \\left(2 \\left(\\phi - \\phi_ {p o l}\\right)\\right)\\right), \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.701, + 0.47, + 0.807 + ], + "angle": 0, + "content": "where \\( I_{un} \\) denotes unpolarized intensity, \\( \\rho \\) denotes DoLP, \\( \\phi \\) denotes AoLP and \\( \\phi_{pol} \\) denotes the desired filter angle at each pixel. This equation allows us to render images under any number of filter angles by saving only three quantities per pixel. In this paper we leverage the above equation to learn a representation for just these quantities, rather than the four original images." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.81, + 0.471, + 0.902 + ], + "angle": 0, + "content": "The DoLP \\((\\rho)\\) and AoLP \\((\\phi)\\) have uses beyond just rendering images. In the shape from polarization problem, these quantities are used to calculate the zenith and azimuth angles, respectively, of per-pixel surface normals. This relationship has been studied in previous work [1, 7]. Specifically, the azimuth angle, \\(\\theta_{a}\\), of a surface normal can be" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.092, + 0.785, + 0.108 + ], + "angle": 0, + "content": "calculated from the following relationship:" + }, + { + "type": "equation", + "bbox": [ + 0.515, + 0.12, + 0.892, + 0.171 + ], + "angle": 0, + "content": "\\[\n\\phi = \\left\\{ \\begin{array}{l} \\theta_ {a}, \\text {w h e n d i f f u s e r e f l e c t i o n d o m i n a t e s} \\\\ \\theta_ {a} - \\frac {\\pi}{2}, \\text {w h e n s p e c u l a r r e f l e c t i o n d o m i n a t e s} \\end{array} \\right. \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.173, + 0.892, + 0.218 + ], + "angle": 0, + "content": "DoLP, \\(\\rho\\), is related to the zenith angle, \\(\\theta_z\\), in terms of the refractive index, \\(n\\), of a surface. When diffuse reflection is dominant, the relationship can be written as:" + }, + { + "type": "equation", + "bbox": [ + 0.52, + 0.23, + 0.892, + 0.26 + ], + "angle": 0, + "content": "\\[\n\\rho = \\frac {\\left(n - \\frac {1}{n}\\right) ^ {2} \\sin^ {2} \\left(\\theta_ {z}\\right)}{2 + 2 n ^ {2} - \\left(n - \\frac {1}{n}\\right) ^ {2} \\sin^ {2} \\left(\\theta_ {z}\\right) + 4 \\cos \\left(\\theta_ {z}\\right) \\sqrt {n ^ {2} - \\sin^ {2} \\left(\\theta_ {z}\\right)}}. \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.27, + 0.892, + 0.3 + ], + "angle": 0, + "content": "When specular reflection dominates, the relationship is different:" + }, + { + "type": "equation", + "bbox": [ + 0.527, + 0.312, + 0.892, + 0.356 + ], + "angle": 0, + "content": "\\[\n\\rho = \\frac {2 \\sin^ {2} (\\theta_ {z}) \\cos (\\theta_ {z}) \\sqrt {n ^ {2} - \\sin^ {2} (\\theta_ {z})}}{n ^ {2} - \\sin^ {2} (\\theta_ {z}) - n ^ {2} \\sin^ {2} (\\theta_ {z}) + 2 \\sin^ {4} (\\theta_ {z})}. \\qquad (4)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.367, + 0.892, + 0.533 + ], + "angle": 0, + "content": "\\(\\rho, \\phi\\) and \\(I_{un}\\) can be calculated directly from a vector known as the Stokes vector at each pixel. This vector has four elements. The first three elements deal with the linear polarization of light, and the final one represents the circular polarization of the wave. In this paper we will focus on linear polarization. To measure the Stokes vector of a scene, at least three images are needed, taken through linear polarizing filters at 0, 45 and 90 degrees. Since the camera used in our setup also captures an image with a filter at 135 degrees, we use four images in our calculations of the Stokes vectors for robustness to noise." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.545, + 0.755, + 0.56 + ], + "angle": 0, + "content": "3.2. Learning from coarse to fine" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.569, + 0.892, + 0.903 + ], + "angle": 0, + "content": "Current coordinate network architectures produce artifacts when fitting polarimetric images. SIREN [52] and similar architectures treat every coordinate equally when training, and they produce noise patterns in the resulting images when the spatial frequencies present in the training data differ widely (eg. the maximum magnitude frequency differs by an order of magnitude). In the polarimetric images we obtained, we found the maximum frequency magnitude of some AoLP maps was around \\(10^{7}\\), while the maximum magnitude for the intensity image was only around \\(10^{6}\\). ACORN [34] does not treat each coordinate in the same way, but its dynamic tiling strategy looks for regions of low variance in order to create larger blocks. This is difficult to do when attempting to fit multiple images containing varying frequencies. The resulting reconstructions end up looking blocky, and fine detail is lost in the process. Our method removes these artifacts by learning image representations using their singular value decompositions. One idea to help in reconstructing high frequency details could be to use an image's Fourier decomposition. We found that in practice the SVD works better for our use case. This is due to the propagation of errors during the forward and inverse" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "16581" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.084, + 0.096, + 0.891, + 0.345 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.357, + 0.895, + 0.429 + ], + "angle": 0, + "content": "Figure 2. pCON learns to fit an image by learning a series of reconstructions with different singular values. The model is organized into a series of \\( n_b \\) parallel MLPs (denoted here as \\( g_i \\)) with sine activations. A 2D coordinate vector representing a point on an image is passed through all bands separately (\\( g_0 \\) to \\( g_n \\)). To supervise the training of each band, we reconstruct the full image maps of each quantity, and then calculate the MSE between the model prediction, \\( \\hat{y}_i \\) and their respective ground truth values, \\( y_i \\), at the input coordinate. The final output is the sum of all the intermediate reconstructions, which yields a set of images similar to the training data." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.453, + 0.473, + 0.56 + ], + "angle": 0, + "content": "Fourier transforms. The SVD does not require shifting between the spatial and frequency domains, which allows errors to propagate less than if we were supervising on Fourier frequencies. The singular value decomposition of an \\(m \\times n\\) matrix \\(\\mathbf{A}\\) is a set of matrices \\(\\mathbf{U} \\in \\mathbb{R}^{m \\times m}\\), \\(\\boldsymbol{\\Sigma} \\in \\mathbb{R}^{m \\times n}\\) and \\(\\mathbf{V}^{\\top} \\in \\mathbb{R}^{n \\times n}\\) such that \\(\\mathbf{A} = \\mathbf{U} \\boldsymbol{\\Sigma} \\mathbf{V}^{\\top}\\). This matrix product can be further decomposed:" + }, + { + "type": "equation", + "bbox": [ + 0.102, + 0.568, + 0.47, + 0.651 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathbf {U} \\boldsymbol {\\Sigma} \\mathbf {V} ^ {\\top} = \\sum_ {i} ^ {r} \\sigma_ {i} \\mathbf {u} _ {i} \\mathbf {v} _ {i} ^ {\\top} \\tag {5} \\\\ = \\sum_ {i = 0} ^ {a _ {1}} \\sigma_ {i} \\mathbf {u} _ {i} \\mathbf {v} _ {i} ^ {\\top} + \\sum_ {i = a _ {1}} ^ {a _ {2}} \\sigma_ {i} \\mathbf {u} _ {i} \\mathbf {v} _ {i} ^ {\\top} + \\dots + \\sum_ {i = a _ {n}} ^ {r} \\sigma_ {i} \\mathbf {u} _ {i} \\mathbf {v} _ {i} ^ {\\top} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.659, + 0.473, + 0.902 + ], + "angle": 0, + "content": "where \\( r \\) is the rank of \\( \\mathbf{A} \\), \\( \\mathbf{u}_i \\) is the \\( i \\)-th column of \\( \\mathbf{U} \\), \\( \\mathbf{v}_i \\) is the \\( i \\)-th column of \\( \\mathbf{V} \\), and \\( \\sigma_i \\) is the \\( i \\)-th singular value. In the case of an image, this means that it is possible to calculate different pieces of the decomposition individually, and then sum them to obtain the original image. We leverage this property of the SVD in our model architecture. Using just the largest singular values to reconstruct an image yields a result containing only the low frequency details of the original [28]. As more singular values are used in the reconstruction, higher frequency details are captured. A single coordinate may have features in many reconstructions, and others may have features in only a few. Our network learns a series of reconstructions in parallel, which effectively allocates more model capacity to coordinates which have details at numerous frequencies. Since we are not dividing the image into a grid like ACORN, our reconstruc" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.453, + 0.895, + 0.499 + ], + "angle": 0, + "content": "tions do not suffer from tiling artifacts, and they also do not exhibit the obvious noise pattern present in reconstructions from SIREN or ReLU MLPs." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.508, + 0.659, + 0.525 + ], + "angle": 0, + "content": "3.3. Network design" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.531, + 0.895, + 0.683 + ], + "angle": 0, + "content": "Our network design takes inspiration from SIREN [52]. The original SIREN architecture was similar to an ordinary MLP, except that it used the sine activation function. Our network is divided into a series of \\( n_b \\) fully-connected blocks which map from a 2D input image coordinate to the AoLP \\( (\\phi) \\), DoLP \\( (\\rho) \\) and unpolarized intensity \\( I_{un} \\) at that pixel. We call each of these MLPs a band of the network, and we will notate them as \\( g_i \\) for \\( i \\in 0,1,\\dots,n_b - 1 \\). To fit an image, we first take the singular value decomposition of the map of each polar quantity:" + }, + { + "type": "equation", + "bbox": [ + 0.636, + 0.693, + 0.753, + 0.712 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {\\Phi} = \\mathbf {U} _ {\\phi} \\boldsymbol {\\Sigma} _ {\\phi} \\mathbf {V} _ {\\phi} ^ {\\top},\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.641, + 0.714, + 0.892, + 0.734 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {\\rho} = \\mathbf {U} _ {\\rho} \\boldsymbol {\\Sigma} _ {\\rho} \\mathbf {V} _ {\\rho} ^ {\\top}, \\tag {6}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.63, + 0.736, + 0.763, + 0.755 + ], + "angle": 0, + "content": "\\[\n\\mathbf {I} _ {u n} = \\mathbf {U} _ {u n} \\boldsymbol {\\Sigma} _ {u n} \\mathbf {V} _ {u n} ^ {\\top}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.765, + 0.895, + 0.903 + ], + "angle": 0, + "content": "\\(\\Phi, \\rho\\) and \\(\\mathbf{I}_{un}\\) represent the full image maps of AoLP \\((\\phi)\\), DoLP \\((\\rho)\\) and \\(I_{un}\\), respectively. The above equations are obtained by interpreting these maps as matrices and then using Eq. (5). We now define a series of \\(n_b\\) thresholds for \\(\\Phi, \\rho\\) and \\(\\mathbf{I}_{un}\\) as \\(t_{\\phi,i}, t_{\\rho,i}\\) and \\(t_{\\mathrm{un},i}\\), respectively. These thresholds dictate which singular values will be used to supervise each band of the network. We also define the ground truth intermediate reconstructions of each quantity using a subset of singular values as \\(y_{\\phi,i}, y_{\\rho,i}\\) and \\(y_{\\mathrm{un},i}\\). We denote their" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "16582" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.138 + ], + "angle": 0, + "content": "corresponding predictions as \\(\\hat{y}_{\\phi,i}\\), \\(\\hat{y}_{\\rho,i}\\) and \\(\\hat{y}_{\\mathrm{un},i}\\). We can use Eq. (5) to decompose each of the SVDs from Eq. (6) into a set of sums. For example, we can write \\(\\Phi\\) as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.174, + 0.144, + 0.47, + 0.188 + ], + "angle": 0, + "content": "\\[\ny _ {\\phi , i} = \\sum_ {j = t _ {\\phi , i - 1}} ^ {t _ {\\phi , i}} \\sigma_ {\\phi , j} \\mathbf {u} _ {\\phi , j} \\mathbf {v} _ {\\phi , j} ^ {\\top}. \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.194, + 0.469, + 0.224 + ], + "angle": 0, + "content": "The reconstructions for the other quantities can be written with their respective SVDs and thresholds similar to Eq. (7)." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.224, + 0.469, + 0.254 + ], + "angle": 0, + "content": "Each band learns a single reconstruction for these quantities at each pixel." + }, + { + "type": "equation", + "bbox": [ + 0.162, + 0.262, + 0.469, + 0.278 + ], + "angle": 0, + "content": "\\[\ng _ {i} (x, y) = \\hat {y} _ {i} = (\\hat {y} _ {\\phi , i}, \\hat {y} _ {\\rho , i}, \\hat {y} _ {\\mathrm {u n}, i}). \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.285, + 0.469, + 0.36 + ], + "angle": 0, + "content": "Here, \\(x\\) and \\(y\\) constitute the 2D pixel coordinate vector that serves as the input to the network. This coordinate is passed through each band of the network to compute all \\(\\hat{y}_i\\), and then the fully reconstructed image is calculated as \\(\\sum_{i}\\hat{y}_{i}\\). See Fig. 2 for a visualization of this entire process." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.368, + 0.224, + 0.383 + ], + "angle": 0, + "content": "3.4. Loss functions" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.39, + 0.469, + 0.511 + ], + "angle": 0, + "content": "Our network outputs a set of \\( n_b \\) images. For each band, we compute the MSE between the cumulative sum of all outputs up to, and including, the current band. We define multiplicative factors for the three polar quantities as \\( \\lambda_{\\phi} \\), \\( \\lambda_{\\rho} \\) and \\( \\lambda_{\\mathrm{un}} \\). We also define factors for each band as \\( \\lambda_{b,i} \\). The loss of the network can be calculated as follows, where \\( L \\) is the loss function and \\( x \\) is the data point for which the loss is being calculated:" + }, + { + "type": "equation", + "bbox": [ + 0.117, + 0.518, + 0.469, + 0.581 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} L (x) = \\sum_ {i} \\lambda_ {b, i} \\sum_ {j = 0} ^ {i} \\lambda_ {\\phi} \\left(\\hat {y} _ {\\phi , j} - y _ {\\phi , j}\\right) ^ {2} \\tag {9} \\\\ + \\lambda_ {\\rho} (\\hat {y} _ {\\rho , j} - y _ {\\rho , j}) ^ {2} + \\lambda_ {\\mathrm {u n}} (\\hat {y} _ {\\mathrm {u n}, j} - y _ {\\mathrm {u n}, j}) ^ {2}. \\\\ \\end{array}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.587, + 0.291, + 0.602 + ], + "angle": 0, + "content": "3.5. Implementation details" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.61, + 0.165, + 0.623 + ], + "angle": 0, + "content": "3.5.1 Data" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.633, + 0.469, + 0.829 + ], + "angle": 0, + "content": "We collected all of our own data using a Flir Blackfly S RGB polarization camera. From this camera's images, it is possible to calculate the desired polarimetric quantities using the physics discussed in Sec. 3.1. We release two datasets with this paper. The first contains the six scenes used to create figures in this paper. The second set contains twenty four additional scenes for use in validating our approach. The captured scenes represent a diverse set of polarization effects. The DoLP and AoLP values span the entire ranges (zero to one for DoLP and zero to pi for AoLP) of possible values. We capture interesting polarization phenomena such as transparent and reflective surfaces. All released images have a resolution of \\(1024 \\times 1024\\)." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.847, + 0.255, + 0.862 + ], + "angle": 0, + "content": "3.5.2 Hyperparameters" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.87, + 0.469, + 0.901 + ], + "angle": 0, + "content": "We built all models in PyTorch [48]. We began all experiments with a learning rate of \\(1 \\times 10^{-5}\\), and then multiplied" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.166 + ], + "angle": 0, + "content": "it by 0.1 at 5000 epochs. Models were trained for a total of 10000 epochs. We also set the unitless frequency parameter \\(\\omega_0\\) of our sine activations to 90. For our best model, we used a total of 10 bands, each with 2 hidden layers and a hidden dimension of 256." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.168, + 0.892, + 0.242 + ], + "angle": 0, + "content": "We chose the singular value thresholds of each band based on the sum of the magnitudes of singular values. Band one was given roughly \\(90\\%\\) of the sum, then the others \\(99\\%\\), \\(99.9\\%\\), and so on. Exact values for \\(\\lambda_{b_i}\\) used in all presented experiments can be found in the supplement." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.243, + 0.892, + 0.272 + ], + "angle": 0, + "content": "For our experiments, we set \\(\\lambda_{\\phi} = 1.0\\), \\(\\lambda_{\\rho} = 5.0\\) and \\(\\lambda_{\\mathrm{un}} = 5.0\\)." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.285, + 0.632, + 0.301 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.31, + 0.892, + 0.46 + ], + "angle": 0, + "content": "In this section, we present comparisons between our model, SIREN [52], ACORN [34] and an MLP using ReLU activations and positional encoding, as used in NeRF [38]. We changed the number of parameters and output values of the baseline architectures, since originally these models were designed to fit only a single image at a time. We also changed the frequency parameter \\(\\omega_0\\) of the SIREN sine activations to 90 to match the parameter used in our own model. All our models were trained using the training strategy discussed in Sec. 3.5." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.469, + 0.802, + 0.485 + ], + "angle": 0, + "content": "4.1. Validation of proposed failure case" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.492, + 0.892, + 0.703 + ], + "angle": 0, + "content": "We hypothesized the reason for the poor performance of baseline models when fitting polarimetric images was due to the presence of details at high spatial frequencies in the captured AoLP maps. To validate this hypothesis, we performed low-pass filtering on AoLP maps of a scene and then fit a model on the resulting AoLP, DoLP and \\(\\mathbf{I}_{un}\\) maps. We found a clear trend in the reconstruction quality as we filtered out higher percentages of high spatial frequencies. All models performed better when fewer high frequency details were present in the target images. This aligns with our idea that these details create difficult scenes for networks to reconstruct. For the scene in Fig. 3, the AoLP reconstruction SSIMs with different amounts of frequencies removed from the GT AoLP maps can be seen in Table 2." + }, + { + "type": "table", + "bbox": [ + 0.501, + 0.713, + 0.892, + 0.778 + ], + "angle": 0, + "content": "
% Highest Frequencies RemovedSIREN [52]ACORN [34]ReLU P.E. [38]
0%0.600.510.63
75%0.540.800.93
80.5%0.890.970.98
93.75%0.950.990.99
" + }, + { + "type": "table_caption", + "bbox": [ + 0.498, + 0.789, + 0.892, + 0.843 + ], + "angle": 0, + "content": "Table 2. All baseline models reconstruct AoLP maps better when details at higher spatial frequencies are filtered out. This trend validates our hypothesis that images with high frequency details are more difficult for a network to reconstruct." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.848, + 0.722, + 0.864 + ], + "angle": 0, + "content": "4.2. Comparison with others" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.871, + 0.892, + 0.901 + ], + "angle": 0, + "content": "We trained both our model and the baselines to predict AoLP \\((\\Phi)\\), DoLP \\((\\rho)\\) and \\(\\mathbf{I}_{un}\\) maps directly. Quali" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "16583" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.157, + 0.094, + 0.177, + 0.104 + ], + "angle": 0, + "content": "GT" + }, + { + "type": "image", + "bbox": [ + 0.089, + 0.106, + 0.245, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.151, + 0.229, + 0.184, + 0.239 + ], + "angle": 0, + "content": "AoLP" + }, + { + "type": "image_caption", + "bbox": [ + 0.294, + 0.094, + 0.358, + 0.105 + ], + "angle": 0, + "content": "SIREN [52]" + }, + { + "type": "image", + "bbox": [ + 0.248, + 0.106, + 0.405, + 0.227 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.261, + 0.229, + 0.392, + 0.239 + ], + "angle": 0, + "content": "SSIM/PSNR: 0.60/14.32" + }, + { + "type": "image_caption", + "bbox": [ + 0.45, + 0.094, + 0.521, + 0.105 + ], + "angle": 0, + "content": "ACORN [34]" + }, + { + "type": "image", + "bbox": [ + 0.408, + 0.106, + 0.564, + 0.227 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.42, + 0.229, + 0.551, + 0.239 + ], + "angle": 0, + "content": "SSIM/PSNR: 0.51/15.99" + }, + { + "type": "image_caption", + "bbox": [ + 0.604, + 0.094, + 0.686, + 0.105 + ], + "angle": 0, + "content": "ReLU P.E. [38]" + }, + { + "type": "image", + "bbox": [ + 0.567, + 0.106, + 0.723, + 0.227 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.579, + 0.229, + 0.711, + 0.239 + ], + "angle": 0, + "content": "SSIM/PSNR: 0.63/17.18" + }, + { + "type": "image_caption", + "bbox": [ + 0.79, + 0.095, + 0.818, + 0.104 + ], + "angle": 0, + "content": "Ours" + }, + { + "type": "image", + "bbox": [ + 0.725, + 0.106, + 0.884, + 0.227 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.739, + 0.229, + 0.869, + 0.239 + ], + "angle": 0, + "content": "SSIM/PSNR: 0.77/16.57" + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.242, + 0.245, + 0.363 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.15, + 0.365, + 0.184, + 0.376 + ], + "angle": 0, + "content": "DoLP" + }, + { + "type": "image", + "bbox": [ + 0.248, + 0.242, + 0.405, + 0.363 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.261, + 0.365, + 0.392, + 0.376 + ], + "angle": 0, + "content": "SSIM/PSNR: 0.73/29.83" + }, + { + "type": "image", + "bbox": [ + 0.408, + 0.242, + 0.564, + 0.363 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.42, + 0.365, + 0.551, + 0.376 + ], + "angle": 0, + "content": "SSIM/PSNR: 0.80/31.78" + }, + { + "type": "image", + "bbox": [ + 0.567, + 0.242, + 0.723, + 0.363 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.579, + 0.365, + 0.711, + 0.376 + ], + "angle": 0, + "content": "SSIM/PSNR: 0.79/32.06" + }, + { + "type": "image", + "bbox": [ + 0.725, + 0.242, + 0.884, + 0.363 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.757, + 0.365, + 0.852, + 0.375 + ], + "angle": 0, + "content": "SSIM: 0.82/34.56" + }, + { + "type": "image", + "bbox": [ + 0.089, + 0.378, + 0.245, + 0.523 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.248, + 0.378, + 0.404, + 0.523 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.408, + 0.378, + 0.563, + 0.523 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.567, + 0.378, + 0.723, + 0.523 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.725, + 0.378, + 0.882, + 0.523 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.525, + 0.245, + 0.646 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.155, + 0.649, + 0.178, + 0.659 + ], + "angle": 0, + "content": "Un" + }, + { + "type": "image", + "bbox": [ + 0.248, + 0.525, + 0.405, + 0.646 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.261, + 0.648, + 0.392, + 0.658 + ], + "angle": 0, + "content": "SSIM/PSNR: 0.59/26.42" + }, + { + "type": "image", + "bbox": [ + 0.408, + 0.525, + 0.564, + 0.646 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.42, + 0.648, + 0.551, + 0.658 + ], + "angle": 0, + "content": "SSIM/PSNR: 0.77/28.43" + }, + { + "type": "image", + "bbox": [ + 0.567, + 0.525, + 0.723, + 0.646 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.579, + 0.648, + 0.711, + 0.658 + ], + "angle": 0, + "content": "SSIM/PSNR: 0.71/29.58" + }, + { + "type": "image", + "bbox": [ + 0.725, + 0.525, + 0.884, + 0.646 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.739, + 0.648, + 0.87, + 0.658 + ], + "angle": 0, + "content": "SSIM/PSNR: 0.89/34.82" + }, + { + "type": "image", + "bbox": [ + 0.089, + 0.661, + 0.245, + 0.805 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.248, + 0.661, + 0.404, + 0.805 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.408, + 0.661, + 0.563, + 0.805 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.567, + 0.661, + 0.723, + 0.805 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.725, + 0.661, + 0.882, + 0.805 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.819, + 0.895, + 0.862 + ], + "angle": 0, + "content": "Figure 3. Our model shows higher SSIM and fewer artifacts on predicted \\(\\Phi\\), \\(\\rho\\) and \\(\\mathbf{I}_{un}\\) maps. Baseline models cause noise or tiling which is clearly visible on the checkerboard pattern on the floor, where all three quantities take large values. The artifacts are present on objects exhibiting both specular reflections, like the floor, and diffuse reflections, like the wall and doors in the background." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.946, + 0.519, + 0.957 + ], + "angle": 0, + "content": "16584" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.108, + 0.09, + 0.865, + 0.348 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.361, + 0.895, + 0.404 + ], + "angle": 0, + "content": "Figure 4. Our model can more accurately reconstruct RGB images taken through different polarizing filter angles when compared to SIREN [52], ACORN [34] and a ReLU MLP [38] with positional encoding. The images reconstructed here are the scene as viewed through a linear polarizer oriented at \\(0^{\\circ}\\)" + }, + { + "type": "image", + "bbox": [ + 0.083, + 0.426, + 0.177, + 0.497 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.084, + 0.499, + 0.175, + 0.568 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.084, + 0.568, + 0.175, + 0.638 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.119, + 0.64, + 0.14, + 0.651 + ], + "angle": 0, + "content": "GT" + }, + { + "type": "image", + "bbox": [ + 0.181, + 0.426, + 0.273, + 0.497 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.181, + 0.498, + 0.273, + 0.568 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.181, + 0.568, + 0.273, + 0.638 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.208, + 0.64, + 0.245, + 0.651 + ], + "angle": 0, + "content": "1 band" + }, + { + "type": "image", + "bbox": [ + 0.276, + 0.426, + 0.369, + 0.497 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.276, + 0.498, + 0.369, + 0.568 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.276, + 0.568, + 0.369, + 0.638 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.3, + 0.64, + 0.344, + 0.651 + ], + "angle": 0, + "content": "4 bands" + }, + { + "type": "image", + "bbox": [ + 0.372, + 0.426, + 0.465, + 0.497 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.372, + 0.498, + 0.464, + 0.568 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.372, + 0.568, + 0.464, + 0.638 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.388, + 0.64, + 0.448, + 0.651 + ], + "angle": 0, + "content": "Full model" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.664, + 0.47, + 0.707 + ], + "angle": 0, + "content": "Figure 5. As the number of bands used in the reconstruction increases, so does the quality of the image. Even with a single band the reconstruction is visually close to the original." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.732, + 0.47, + 0.807 + ], + "angle": 0, + "content": "tative and quantitative results can be found in Fig. 3. Our model performs yeilds better PSNR and SSIM than all baselines and it also does not produce the tiling artifacts or the noise patterns present in the reconstructions created by other models." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.818, + 0.379, + 0.833 + ], + "angle": 0, + "content": "4.3. Accuracy and model size trade-off" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.84, + 0.471, + 0.901 + ], + "angle": 0, + "content": "In order to fit an image with a smaller or larger model, current architectures require a full retraining with a different number of parameters. The structure of our model allows us to provide a tradeoff between model size and reconstruction" + }, + { + "type": "table", + "bbox": [ + 0.504, + 0.428, + 0.89, + 0.546 + ], + "angle": 0, + "content": "
ModelΦ(↑)ρ(↑)Iun(↑)# Params. (↓)
Ours (1 band)0.12/10.830.50/22.870.74/26.58130K
Ours (2 bands)0.32/14.660.64/28.400.91/34.74270K
Ours (3 bands)0.42/14.420.65/28.590.92/34.43400K
Ours (4 bands)0.51/16.320.65/28.710.92/34.62530K
Ours (5 bands)0.64/17.680.67/28.870.92/36.74670K
Ours (Full model)0.79/18.080.76/31.750.92/36.001.3M
SIREN [52]0.59/15.960.67/28.200.70/28.23660K
ACORN [34]0.48/17.010.73/29.960.82/29.85530K
ReLU [38] w/P.E.0.64/18.300.76/30.990.81/32.13660K
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.556, + 0.892, + 0.598 + ], + "angle": 0, + "content": "Table 3. As more bands are used, the number of parameters grows along with the resulting performance (SSIM/PSNR). The metrics shown here are averages across our whole dataset." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.644, + 0.895, + 0.903 + ], + "angle": 0, + "content": "accuracy without retraining. Each band of the model learns a representation of the image when reconstructed with a different set of singular values. If the downstream task doesn't require incredibly high accuracy, and the user would rather save and transport a smaller set of model weights, they can just save the weights from the first band of the network and reconstruct the image with only the singular values from that band, or vice versa if more accuracy is required. A visualization of reconstruction quality using different numbers of bands can be seen in Fig. 5. See Table 3 for quantitative results using different bands of our network. With a similar number of parameters to the baseline models, it achieves comparable performance to all baseline architectures. Our full model outperforms all baselines on predicting AoLP \\((\\Phi)\\) and \\(\\mathbf{I}_{un}\\) maps. It is also worth noting that our full model achieves significant compression over storing raw data. The combined memory size of the AoLP, DoLP and \\(\\mathbf{I}_{un}\\) maps" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "16585" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.089, + 0.235, + 0.1 + ], + "angle": 0, + "content": "GT" + }, + { + "type": "image", + "bbox": [ + 0.142, + 0.105, + 0.307, + 0.232 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.365, + 0.089, + 0.432, + 0.102 + ], + "angle": 0, + "content": "SIREN [52]" + }, + { + "type": "image", + "bbox": [ + 0.317, + 0.105, + 0.482, + 0.232 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.53, + 0.089, + 0.614, + 0.102 + ], + "angle": 0, + "content": "ReLU P.E. [38]" + }, + { + "type": "image", + "bbox": [ + 0.49, + 0.105, + 0.657, + 0.232 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.732, + 0.09, + 0.761, + 0.102 + ], + "angle": 0, + "content": "Ours" + }, + { + "type": "image", + "bbox": [ + 0.664, + 0.105, + 0.83, + 0.232 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.143, + 0.233, + 0.224, + 0.309 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.225, + 0.233, + 0.305, + 0.309 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.318, + 0.233, + 0.397, + 0.309 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.399, + 0.233, + 0.48, + 0.31 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.492, + 0.233, + 0.571, + 0.309 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.574, + 0.233, + 0.653, + 0.309 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.666, + 0.233, + 0.745, + 0.31 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.748, + 0.233, + 0.826, + 0.31 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.324, + 0.895, + 0.366 + ], + "angle": 0, + "content": "Figure 6. Both SIREN [52] and the ReLU MLP [38] with positional encoding show artifacts when queried at a different resolution than they were trained on. Our model does not. We trained models at a resolution of \\(1024 \\times 1024\\) and queried them at a resolution of \\(512 \\times 512\\)" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.393, + 0.47, + 0.483 + ], + "angle": 0, + "content": "is 36 megabytes (MB), while the size of our full model is only 5 MB. Representing images with our model allows us to scale image size without scaling memory footprint as quickly. In this work we use small images, but the memory saved when reconstructing images at the mega or gigapixel scale would be significant." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.485, + 0.269, + 0.499 + ], + "angle": 0, + "content": "4.4. RGB reconstruction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.508, + 0.47, + 0.658 + ], + "angle": 0, + "content": "In addition to reconstructing the DoLP \\((\\rho)\\), AoLP \\((\\Phi)\\) and \\(\\mathbf{I}_{un}\\) maps with our model, we also present results for reconstructing the original RGB images captured by the camera. For a specific polarizing filter angle, we can reconstruct the value of a pixel captured by the camera through that filter using Eq. (1). Our model removes the artifacts present in the reconstructions from all baseline comparisons and retains more detail comparatively. See Fig. 4 for a visualization of reconstructions of images taken through a linear polarizer oriented at \\(0^{\\circ}\\)." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.66, + 0.365, + 0.676 + ], + "angle": 0, + "content": "4.5. Multiple resolution interpolation" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.683, + 0.47, + 0.835 + ], + "angle": 0, + "content": "We present results for fitting an image at one resolution and querying it at a second resolution. In this section we only compare to SIREN [52] and a ReLU MLP [38], as the dynamic tiling strategy of ACORN [34] does not allow us to simply query the representation at a different resolution. We train both models on the original scene at a resolution of \\(1024 \\times 1024\\) and then query them at a resolution of \\(512 \\times 512\\). Both baselines show artifacts when queried at this new resolution, while our model does not have this issue. In Fig. 6 we visualize these results on \\(\\mathbf{I}_{un}\\) maps." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.845, + 0.196, + 0.861 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.871, + 0.47, + 0.901 + ], + "angle": 0, + "content": "In summary, we have presented an attempt at creating neural representations of polarimetric information without" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.393, + 0.893, + 0.499 + ], + "angle": 0, + "content": "the artifacts introduced by current models. Compared to existing methods, our model shows an increase in image reconstruction quality on AoLP, DoLP and \\(\\mathbf{I}_{un}\\) maps, in addition to effectively removing the artifacts we were targeting. Having a compact representation of polarimetric images will facilitate future research in areas where this data is required." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.508, + 0.893, + 0.812 + ], + "angle": 0, + "content": "While our work provides noticeable improvement over current methods, it is not perfect. To achieve state of the art performance on reconstructing AoLP maps, we need quite a few bands in our network, which makes the number of parameters quite large compared to other architectures. A valuable next step could be creating a model that could achieve the same performance as ours while cutting down on the memory footprint. Furthermore, we only demonstrated the effectiveness of this approach on 2D data, since polarization is not well studied in three dimensions. Validating our approach on 3D data would be a useful next step, once the field has developed a greater understanding of the underlying physics. We motivated our method using polarimetric data, but there are many types of data in computational imaging [8]. Our method will be valuable in representing multiple physical quantities of a scene at once whenever at least one measurement contains high frequency details or noise, and future research could extending this work by demonstrating its effectiveness on other types of data encountered in computational imaging." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.825, + 0.893, + 0.9 + ], + "angle": 0, + "content": "Acknowledgements We thank members of the Visual Machines Group (VMG) at UCLA for feedback and support. A.K. was supported by an NSF CAREER award IIS-2046737 and Army Young Investigator Program (YIP) Award." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "16586" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.157 + ], + "angle": 0, + "content": "[1] G.A. Atkinson and E.R. Hancock. Recovery of surface orientation from diffuse polarization. IEEE Transactions on Image Processing, 15(6):1653-1664, 2006. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.157, + 0.471, + 0.186 + ], + "angle": 0, + "content": "[2] Gary A Atkinson. Polarisation photometric stereo. Computer Vision and Image Understanding, 160:158-167, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.186, + 0.471, + 0.227 + ], + "angle": 0, + "content": "[3] Gary A Atkinson and Jürgen D Ernst. High-sensitivity analysis of polarization by surface reflection. Machine Vision and Applications, 29(7):1171-1189, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.227, + 0.471, + 0.298 + ], + "angle": 0, + "content": "[4] Benjamin Attal, Selena Ling, Aaron Gokaslan, Christian Richardt, and James Tompkin. Matryodshka: Real-time 6dof video view synthesis using multi-sphere images. In European Conference on Computer Vision, pages 441-459. Springer, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.298, + 0.471, + 0.355 + ], + "angle": 0, + "content": "[5] Matan Atzmon and Yaron Lipman. Sal: Sign agnostic learning of shapes from raw data. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2565-2574, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.355, + 0.471, + 0.424 + ], + "angle": 0, + "content": "[6] Dejan Azinović, Olivier Maury, Christophe Hery, Matthias Nießner, and Justus Thies. High-res facial appearance capture from polarized smartphone images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.424, + 0.471, + 0.493 + ], + "angle": 0, + "content": "[7] Yunhao Ba, Alex Gilbert, Franklin Wang, Jina Yang, Rui Chen, Yiqin Wang, Lei Yan, Boxin Shi, and Achuta Kadambi. Deep shape from polarization. In European Conference on Computer Vision, pages 554-571. Springer, 2020. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.493, + 0.471, + 0.523 + ], + "angle": 0, + "content": "[8] Ayush Bhandari, Achuta Kadambi, and Ramesh Raskar. Computational Imaging. The MIT Press, 2022. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.523, + 0.471, + 0.592 + ], + "angle": 0, + "content": "[9] Michael Broxton, John Flynn, Ryan Overbeck, Daniel Erickson, Peter Hedman, Matthew Duvall, Jason Dourgarian, Jay Busch, Matt Whalen, and Paul Debevec. Immersive light field video with a layered mesh representation. ACM Transactions on Graphics (TOG), 39(4):86-1, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.592, + 0.471, + 0.663 + ], + "angle": 0, + "content": "[10] Rohan Chabra, Jan E Lenssen, Eddy Ilg, Tanner Schmidt, Julian Straub, Steven Lovegrove, and Richard Newcombe. Deep local shapes: Learning local sdf priors for detailed 3d reconstruction. In European Conference on Computer Vision, pages 608-625. Springer, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.663, + 0.471, + 0.746 + ], + "angle": 0, + "content": "[11] Eric R. Chan, Marco Monteiro, Petr Kellnhofer, Jiajun Wu, and Gordon Wetzstein. Pi-gan: Periodic implicit generative adversarial networks for 3d-aware image synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 5799-5809, June 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.746, + 0.471, + 0.815 + ], + "angle": 0, + "content": "[12] Tongbo Chen, Hendrik P. A. Lensch, Christian Fuchs, and Hans-Peter Seidel. Polarization and phase-shifting for 3d scanning of translucent objects. In 2007 IEEE Conference on Computer Vision and Pattern Recognition, pages 1-8, 2007. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.816, + 0.471, + 0.873 + ], + "angle": 0, + "content": "[13] Zhiqin Chen and Hao Zhang. Learning implicit fields for generative shape modeling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5939-5948, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.873, + 0.471, + 0.903 + ], + "angle": 0, + "content": "[14] Akshit Dave, Yongyi Zhao, and Ashok Veeraraghavan. Pandora: Polarization-aided neural decomposition of radiance." + }, + { + "type": "list", + "bbox": [ + 0.078, + 0.115, + 0.471, + 0.903 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.892, + 0.135 + ], + "angle": 0, + "content": "In Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part VII, pages 538-556. Springer, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.136, + 0.894, + 0.205 + ], + "angle": 0, + "content": "[15] O. Drbohlav and R. Sara. Unambiguous determination of shape from photometric stereo with unknown light sources. In Proceedings Eighth IEEE International Conference on Computer Vision. ICCV 2001, volume 1, pages 581-586 vol.1, 2001. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.206, + 0.892, + 0.275 + ], + "angle": 0, + "content": "[16] SM Ali Eslami, Danilo Jimenez Rezende, Frederic Besse, Fabio Viola, Ari S Morcos, Marta Garnelo, Avraham Ruderman, Andrei A Rusu, Ivo Danihelka, Karol Gregor, et al. Neural scene representation and rendering. Science, 360(6394):1204-1210, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.276, + 0.892, + 0.359 + ], + "angle": 0, + "content": "[17] John Flynn, Michael Broxton, Paul Debevec, Matthew DuVall, Graham Fyffe, Ryan Overbeck, Noah Snavely, and Richard Tucker. Deepview: View synthesis with learned gradient descent. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2367-2376, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.36, + 0.892, + 0.432 + ], + "angle": 0, + "content": "[18] Kyle Genova, Forrester Cole, Avneesh Sud, Aaron Sarna, and Thomas Funkhouser. Local deep implicit functions for 3d shape. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4857-4866, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.432, + 0.892, + 0.503 + ], + "angle": 0, + "content": "[19] Kyle Genova, Forrester Cole, Daniel Vlasic, Aaron Sarna, William T Freeman, and Thomas Funkhouser. Learning shape templates with structured implicit functions. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7154-7164, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.503, + 0.892, + 0.587 + ], + "angle": 0, + "content": "[20] Ciriaco Goddi, Ivan Martí-Vidal, Hugo Messias, Geoffrey C Bower, Avery E Broderick, Jason Dexter, Daniel P Marrone, Monika Moscibrodzka, Hiroshi Nagai, Juan Carlos Algaba, et al. Polarimetric properties of event horizon telescope targets from alma. The Astrophysical Journal Letters, 910(1):L14, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.587, + 0.892, + 0.63 + ], + "angle": 0, + "content": "[21] Amos Gropp, Lior Yariv, Niv Haim, Matan Atzmon, and Yaron Lipman. Implicit geometric regularization for learning shapes. arXiv preprint arXiv:2002.10099, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.63, + 0.892, + 0.687 + ], + "angle": 0, + "content": "[22] Peter Hedman, Julien Philip, True Price, Jan-Michael Frahm, George Drettakis, and Gabriel Brostow. Deep blending for free-viewpoint image-based rendering. ACM Transactions on Graphics (TOG), 37(6):1-15, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.687, + 0.892, + 0.745 + ], + "angle": 0, + "content": "[23] Philipp Henzler, Niloy J Mitra, and Tobias Ritschel. Escaping Plato's cave: 3d shape from adversarial rendering. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9984-9993, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.745, + 0.892, + 0.814 + ], + "angle": 0, + "content": "[24] Cong Phuoc Huynh, Antonio Robles-Kelly, and Edwin Hancock. Shape and refractive index recovery from single-view polarisation images. In 2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 1229-1236, 2010. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.814, + 0.892, + 0.873 + ], + "angle": 0, + "content": "[25] Cong Phuoc Huynh, Antonio Robles-Kelly, and Edwin R Hancock. Shape and refractive index from single-view spectro-polarimetric images. International journal of computer vision, 101(1):64-94, 2013. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.873, + 0.892, + 0.903 + ], + "angle": 0, + "content": "[26] Chiyu Jiang, Avneesh Sud, Ameesh Makadia, Jingwei Huang, Matthias Nießner, Thomas Funkhouser, et al. Local" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.903 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "16587" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.108, + 0.092, + 0.472, + 0.134 + ], + "angle": 0, + "content": "implicit grid representations for 3d scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6001-6010, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.136, + 0.47, + 0.191 + ], + "angle": 0, + "content": "[27] Achuta Kadambi, Vage Taamazyan, Boxin Shi, and Ramesh Raskar. Polarized 3d: High-quality depth sensing with polarization cues. In Proceedings of the IEEE International Conference on Computer Vision, pages 3370-3378, 2015. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.193, + 0.471, + 0.247 + ], + "angle": 0, + "content": "[28] Samruddhi Kahu and Reena Rahate. Image compression using singular value decomposition. International Journal of Advancements in Research & Technology, 2(8):244-248, 2013. 1, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.25, + 0.471, + 0.331 + ], + "angle": 0, + "content": "[29] Agastya Kalra, Vage Taamazyan, Supreeth Krishna Rao, Kartik Venkataraman, Ramesh Raskar, and Achuta Kadambi. Deep polarization cues for transparent object segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.334, + 0.471, + 0.404 + ], + "angle": 0, + "content": "[30] Shaohui Liu, Yinda Zhang, Songyou Peng, Boxin Shi, Marc Pollefeys, and Zhaopeng Cui. Dist: Rendering deep implicit signed distance function with differentiable sphere tracing. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2019-2028, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.405, + 0.469, + 0.46 + ], + "angle": 0, + "content": "[31] Stephen Lombardi, Tomas Simon, Jason Saragih, Gabriel Schwartz, Andreas Lehrmann, and Yaser Sheikh. Neural volumes: Learning dynamic renderable volumes from images. arXiv preprint arXiv:1906.07751, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.462, + 0.469, + 0.517 + ], + "angle": 0, + "content": "[32] Tomohiro Maeda, Achuta Kadambi, Yoav Y Schechner, and Ramesh Raskar. Dynamic heterodyne interferometry. In 2018 IEEE International Conference on Computational Photography (ICCP), pages 1-11. IEEE, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.519, + 0.469, + 0.575 + ], + "angle": 0, + "content": "[33] Ali H. Mahmoud, Moumen T. El-Melegy, and Aly A. Farag. Direct method for shape recovery from polarization and shading. In 2012 19th IEEE International Conference on Image Processing, pages 1769-1772, 2012. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.577, + 0.469, + 0.645 + ], + "angle": 0, + "content": "[34] Julien N. P. Martel, David B. Lindell, Connor Z. Lin, Eric R. Chan, Marco Monteiro, and Gordon Wetzstein. Acorn: Adaptive coordinate networks for neural scene representation. ACM Trans. Graph. (SIGGRAPH), 40(4), 2021. 2, 3, 5, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.647, + 0.469, + 0.716 + ], + "angle": 0, + "content": "[35] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3d reconstruction in function space. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4460-4470, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.718, + 0.469, + 0.8 + ], + "angle": 0, + "content": "[36] Ben Mildenhall, Peter Hedman, Ricardo Martin-Brualla, Pratul P Srinivasan, and Jonathan T Barron. Nerf in the dark: High dynamic range view synthesis from noisy raw images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16190-16199, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.803, + 0.469, + 0.871 + ], + "angle": 0, + "content": "[37] Ben Mildenhall, Pratul P Srinivasan, Rodrigo Ortiz-Cayon, Nima Khademi Kalantari, Ravi Ramamoorthi, Ren Ng, and Abhishek Kar. Local light field fusion: Practical view synthesis with prescriptive sampling guidelines. ACM Transactions on Graphics (TOG), 38(4):1-14, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.873, + 0.469, + 0.901 + ], + "angle": 0, + "content": "[38] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf:" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.472, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.892, + 0.134 + ], + "angle": 0, + "content": "Representing scenes as neural radiance fields for view synthesis. In European conference on computer vision, pages 405-421. Springer, 2020. 2, 5, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.136, + 0.892, + 0.189 + ], + "angle": 0, + "content": "[39] Miyazaki, Tan, Hara, and Ikeuchi. Polarization-based inverse rendering from a single view. In Proceedings Ninth IEEE International Conference on Computer Vision, pages 982-987 vol.2, 2003. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.191, + 0.892, + 0.245 + ], + "angle": 0, + "content": "[40] D. Miyazaki, M. Kagesawa, and K. Ikeuchi. Transparent surface modeling from a pair of polarization images. IEEE Transactions on Pattern Analysis and Machine Intelligence, 26(1):73-82, 2004. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.247, + 0.892, + 0.301 + ], + "angle": 0, + "content": "[41] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Transactions on Graphics (ToG), 41(4):1-15, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.303, + 0.892, + 0.356 + ], + "angle": 0, + "content": "[42] Shree K Nayar, Xi-Sheng Fang, and Terrance Boult. Separation of reflection components using color and polarization. International Journal of Computer Vision, 21(3):163-186, 1997. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.359, + 0.892, + 0.413 + ], + "angle": 0, + "content": "[43] Trung Ngo Thanh, Hajime Nagahara, and Rin-ichiro Taniguchi. Shape and light directions from shading and polarization. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), June 2015. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.415, + 0.892, + 0.482 + ], + "angle": 0, + "content": "[44] Thu H Nguyen-Phuoc, Christian Richardt, Long Mai, Yongliang Yang, and Niloy Mitra. Blockgan: Learning 3d object-aware scene representations from unlabelled images. Advances in Neural Information Processing Systems, 33:6767-6778, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.484, + 0.892, + 0.552 + ], + "angle": 0, + "content": "[45] Michael Niemeyer, Lars Mescheder, Michael Oechsle, and Andreas Geiger. Occupancy flow: 4d reconstruction by learning particle dynamics. In Proceedings of the IEEE/CVF international conference on computer vision, pages 5379-5389, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.554, + 0.892, + 0.622 + ], + "angle": 0, + "content": "[46] Michael Oechsle, Lars Mescheder, Michael Niemeyer, Thilo Strauss, and Andreas Geiger. Texture fields: Learning texture representations in function space. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4531-4540, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.624, + 0.892, + 0.692 + ], + "angle": 0, + "content": "[47] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. Deepsdf: Learning continuous signed distance functions for shape representation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 165-174, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.693, + 0.892, + 0.844 + ], + "angle": 0, + "content": "[48] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Kopf, Edward Yang, Zachary DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. Pytorch: An imperative style, high-performance deep learning library. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'Alché-Buc, E. Fox, and R. Garnett, editors, Advances in Neural Information Processing Systems 32, pages 8024-8035. Curran Associates, Inc., 2019. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.846, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[49] Songyou Peng, Michael Niemeyer, Lars Mescheder, Marc Pollefeys, and Andreas Geiger. Convolutional occupancy networks. In European Conference on Computer Vision, pages 523-540. Springer, 2020. 2" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "16588" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.468, + 0.134 + ], + "angle": 0, + "content": "[50] Gernot Riegler and Vladlen Koltun. Free view synthesis. In European Conference on Computer Vision, pages 623-640. Springer, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.136, + 0.468, + 0.205 + ], + "angle": 0, + "content": "[51] Y.Y. Schechner, S.G. Narasimhan, and S.K. Nayar. Instant dehazing of images using polarization. In Proceedings of the 2001 IEEE Computer Society Conference on Computer Vision and Pattern Recognition. CVPR 2001, volume 1, pages I-I, 2001. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.207, + 0.468, + 0.274 + ], + "angle": 0, + "content": "[52] Vincent Sitzmann, Julien Martel, Alexander Bergman, David Lindell, and Gordon Wetzstein. Implicit neural representations with periodic activation functions. Advances in Neural Information Processing Systems, 33:7462-7473, 2020. 2, 3, 4, 5, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.277, + 0.468, + 0.346 + ], + "angle": 0, + "content": "[53] Vincent Sitzmann, Justus Thies, Felix Heide, Matthias Nießner, Gordon Wetzstein, and Michael Zollhofer. Deepvoxels: Learning persistent 3d feature embeddings. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2437-2446, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.348, + 0.468, + 0.403 + ], + "angle": 0, + "content": "[54] Vincent Sitzmann, Michael Zollhöfer, and Gordon Wetzstein. Scene representation networks: Continuous 3d-structure-aware neural scene representations. Advances in Neural Information Processing Systems, 32, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.405, + 0.468, + 0.459 + ], + "angle": 0, + "content": "[55] Daniel Teo, Boxin Shi, Yinqiang Zheng, and Sai-Kit Yeung. Self-calibrating polarising radiometric calibration. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), June 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.461, + 0.468, + 0.514 + ], + "angle": 0, + "content": "[56] Justus Thies, Michael Zollhöfer, and Matthias Nießner. Deferred neural rendering: Image synthesis using neural textures. ACM Transactions on Graphics (TOG), 38(4):1-12, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.517, + 0.468, + 0.559 + ], + "angle": 0, + "content": "[57] Tali Treibitz and Yoav Y. Schechner. Active polarization rescattering. IEEE Transactions on Pattern Analysis and Machine Intelligence, 31(3):385-399, 2009. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.561, + 0.468, + 0.627 + ], + "angle": 0, + "content": "[58] Zhen Wang, Shijie Zhou, Jeong Joon Park, Despoina Paschalidou, Suya You, Gordon Wetzstein, Leonidas Guibas, and Achuta Kadambi. Alto: Alternating latent topologies for implicit 3d reconstruction. arXiv preprint arXiv:2212.04096, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.63, + 0.468, + 0.671 + ], + "angle": 0, + "content": "[59] Lawrence B Wolff. Polarization vision: a new sensory approach to image understanding. Image and Vision computing, 15(2):81-93, 1997. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.673, + 0.468, + 0.741 + ], + "angle": 0, + "content": "[60] Xiuming Zhang, Sean Fanello, Yun-Ta Tsai, Tiancheng Sun, Tianfan Xue, Rohit Pandey, Sergio Orts-Escalano, Philip Davidson, Christoph Rhemann, Paul Debevec, et al. Neural light transport for relighting and view synthesis. ACM Transactions on Graphics (TOG), 40(1):1-17, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.743, + 0.468, + 0.784 + ], + "angle": 0, + "content": "[61] Chu Zhou, Minggui Teng, Yufei Han, Chao Xu, and Boxin Shi. Learning to dehaze with polarization. Advances in Neural Information Processing Systems, 34, 2021. 3" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.468, + 0.784 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.956 + ], + "angle": 0, + "content": "16589" + } + ] +] \ No newline at end of file diff --git a/2023/pCON_ Polarimetric Coordinate Networks for Neural Scene Representations/762a8e4d-373c-4bab-83f9-f1ad8a1ae928_origin.pdf b/2023/pCON_ Polarimetric Coordinate Networks for Neural Scene Representations/762a8e4d-373c-4bab-83f9-f1ad8a1ae928_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..1304ad253bf3794b1f2b0b2efcd5c4b8c0d529dc --- /dev/null +++ b/2023/pCON_ Polarimetric Coordinate Networks for Neural Scene Representations/762a8e4d-373c-4bab-83f9-f1ad8a1ae928_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e695a529cab30176a183abb0ab8f467fd64a1a7876e1bef301f28f94440bd53 +size 8843902 diff --git a/2023/pCON_ Polarimetric Coordinate Networks for Neural Scene Representations/full.md b/2023/pCON_ Polarimetric Coordinate Networks for Neural Scene Representations/full.md new file mode 100644 index 0000000000000000000000000000000000000000..0bd429c5db70c399496b95509c32e84d11aaf04a --- /dev/null +++ b/2023/pCON_ Polarimetric Coordinate Networks for Neural Scene Representations/full.md @@ -0,0 +1,442 @@ +# pCON: Polarimetric Coordinate Networks for Neural Scene Representations + +Henry Peters\*,1, Yunhao Ba\*,2, Achuta Kadambi\*1,2 + +1Computer Science Department, University of California, Los Angeles (UCLA) + +$^{2}$ Electrical and Computer Engineering Department, UCLA + +hpeters@ucla.edu, yhba@ucla.edu, achuta@ee.ucla.edu + +# Abstract + +Neural scene representations have achieved great success in parameterizing and reconstructing images, but current state of the art models are not optimized with the preservation of physical quantities in mind. While current architectures can reconstruct color images correctly, they create artifacts when trying to fit maps of polar quantities. We propose polarimetric coordinate networks (pCON), a new model architecture for neural scene representations aimed at preserving polarimetric information while accurately parameterizing the scene. Our model removes artifacts created by current coordinate network architectures when reconstructing three polarimetric quantities of interest. All code and data can be found at this link: https://visual.ee.ucla.edu/pcon.htm. + +# 1. Introduction + +Neural scene representations are a popular and useful tool in many computer vision tasks, but these models are optimized to preserve visual content, not physical information. Current state-of-the-art models create artifacts due to the presence of a large range of spatial frequencies when reconstructing polarimetric data. Many tasks in polarimetric imaging rely on precise measurements, and thus even small artifacts are a hindrance for downstream tasks that would like to leverage neural reconstructions of polarization images. In this work we present pCON, a new architecture for neural scene representations. pCON leverages images' singular value decompositions to effectively allocate network capacity to learning the more difficult spatial frequencies at each pixel. Our model reconstructs polarimetric images without the artifacts introduced by state-of-the-art models. + +The polarization of light passing through a scene contains a wealth of information, and while current neural representations can represent single images accurately, but they produce noticeable visual artifacts when trying to represent + +multiple polarimetric quantities concurrently. + +We propose a new architecture for neural scene representations that can effectively reconstruct polarimetric images without artifacts. Our model reconstructs color images accurately while also ensuring the quality of three important polarimetric quantities, the degree $(\rho)$ and angle $(\phi)$ of linear polarization (DoLP and AoLP), and the unpolarized intensity $I_{un}$ . This information is generally captured using images of a scene taken through linear polarizing filters at four different angles. Instead of learning a representation of these images, our model operates directly on the DoLP, AoLP and unpolarized intensity maps. When learning to fit these images, current coordinate network architectures produce artifacts in the predicted DoLP and unpolarized intensity maps. To alleviate this issue, we take inspiration from traditional image compression techniques and fit images using their singular value decompositions. Images can be compressed by reconstructing them using only a subset of their singular values [28]. By utilizing different, non-overlapping sets of singular values to reconstruct an image, the original image can be recovered by summing the individual reconstructions together. Our model is supervised in a coarse-to-fine manner, which helps the model to represent both the low and high frequency details present in maps of polarimetric quantities without introducint noise or tiling artifacts. A demonstration of the efficacy our model can be seen in Fig. 1 and Table 1. Furthermore, our model is capable of representing images at varying levels of detail, creating a tradeoff between performance and model size without retraining. + +# 1.1. Contributions + +To summarize, the contributions of our work include: + +- a coordinate network architecture for neural scene representations of polarimetric images; +- a training strategy for our network which learns a series of representations using different sets of singular values, allowing for a trade-off between performance and model size without retraining; + +![](images/02728a7bd97bf70d83e908f9c7d4024761e0668f0685986a3e4b5e84b089a572.jpg) + +![](images/a723e30dcd4a5344d5b0f93b1f6f63fd91913e15137a08d35fa2997b2ec60923.jpg) +GT + +![](images/6ec418072ba275e19e442ec3ff2af11448aa68989cdba9ddf7748a1d2ef590e8.jpg) + +![](images/698fde1817b754765f8fdf902b1ca60cafd29d67dd04f421813d2bf59ccb134b.jpg) +SIREN [52] + +![](images/dbb35906ed7f177af3fc44779e4ae46970f959ec5c25a271294599f1fb91f5b3.jpg) + +![](images/7184b56b20ddc195b635ac1241896d3248b20ea336cfe88437f2cff2a537304e.jpg) +ACORN [34] + +![](images/c9e9aa7da74cec2789601569d4debbc3190a5d1f93f3ddb1fadad321e05326dd.jpg) + +![](images/d01da4bf678750ed3a2f92a7850c772288c407bb76790b460574d26e49c6fb4a.jpg) +ReLU P.E +Figure 1. Our model reconstructs the training scene more accurately than other architectures. Our model does not have the noise pattern present in reconstructions from SIREN [52] or a ReLU MLP with positional encoding [38], nor does it show tiling artifacts as in ACORN's [34] prediction. + +![](images/2e6ce86ef45c7a0f29ff4a6718fc698b4411a77fb7ec4a74bec36760446ac21c.jpg) + +![](images/736438e7bbd7e9e917f46f446a2213d208cd203a763e3145224b1ba67865d43a.jpg) +Ours + +- results demonstrating that our model reconstructs maps of polarimetric quantities without the artifacts created by current state-of-the-art approaches. + +# 2. Related work + +# 2.1. Neural scene representations + +The aim of neural scene representations is to parameterize a two or three dimensional scene in the weights of a neural network in order to accomplish some other task related to the scene. Most papers fall into one of three categories. Explicit representations model the scene directly, which allows them to quickly accomplish tasks such as scene reconstruction [9, 31], novel view synthesis [4, 17, 22, 36, 37, 41, 50, 53, 56] or relighting [60]. However, since the scene is modelled explicitly, these representations require more memory than the alternatives. + +Implicit representations do not model the scene directly, but instead use an MLP to map from a coordinate in either 2D or 3D space to some desired output value. This value could be the observed radiance or pixel intensity [11, 16, 38, 44, 46, 54], occupancy of a pixel or voxel [35, 45], a quantity related to shape [5, 10, 13, 18, 19, 21, 23, 26, 30, 44, 47, 54, 58], or any other quantity of interest. The final category of neural scene representations is a hybrid of the first two. The only work that fits directly into this category is ACORN [34], which accomplishes state-of-the-art performance on image and volume fitting by combining a coordinate network with an explicit grid or voxel represent- + +
ModelNoise PatternTiling ArtifactsResizing Artifacts
ACORN [34]MediumHighNot Supported
ReLU w/P.E. [38]MediumNoneYes
SIREN [52]HighNoneYes
ProposedMinimalNoneMinimal
+ +Table 1. Our model shows fewer artifacts than current state-of-the-art architectures. Since ACORN divides an image into a discrete grid, in order to query an image at a different resolution it is necessary to also reform the grid. The grid is created online during training, so it is not feasible to query a model at a different resolution without retraining. + +tation. Similar to ACORN, other works divide the scene into local regions and learn each of these regions implicitly [10,26,49]. + +To our knowledge, this work is the first to highlight the artifacts caused by existing neural scene representation architectures when fitting polarimetric data. While we are one of the first works to examine polarization and neural scene representations in the same context, we would like to acknowledge that PANDORA [14], a concurrent work, also utilizes polarization and neural scene representations. However, they focus on radiance decomposition rather than 2D reconstruction. + +# 2.2. Polarization vision + +Polarization is useful in a variety of computer vision tasks. It can be used to estimate surface normals [1, 2, + +7, 15, 24, 25, 33, 39, 43] or refine depth maps to represent incredibly fine details [27]. It can be used in radiometric calibration [55], dynamic interferometry [32], facial reconstruction [6] and separation of diffuse and specular reflection [39, 42]. It also can be used to remove the effects of scattering media like haze [51, 57, 61] and water [57], to augment the performance of computer vision tasks in the presence of transparent objects [12, 29, 40], or even to assist in imaging objects in space [20]. Traditionally, polarimetric data is captured by rotating a linear polarizing filter in front of a camera [3, 59], but recent advances in machine vision have produced cameras that can capture multiple polar images in a single shot. + +Our work uses a neural network to accurately parameterize polarimetric information captured from a scene. This allows for easier storage and transport of polarimetric data and facilitates its use in other deep learning based tasks. + +# 3. Method + +# 3.1. Polarization physics + +Polarized light can be modelled as a sine wave, and can thus be parameterized by three quantities. The degree of linear polarization (DoLP) is a quantity between 0 and 1 that represents how much of the total intensity of the wave is polarized and unpolarized. Completely polarized light will have a DoLP of 1, and completely unpolarized light will have a DoLP of 0. The angle of linear polarization (AoLP) corresponds to the orientation of the plane in which the wave is oscillating. The AoLP takes values from 0 to $\pi$ radians. The final quantity of interest is the unpolarized intensity, $I_{un}$ , of the wave, which corresponds to its amplitude. With these three quantities, it is possible to render a scene as viewed through a linear polarization filter at any angle using the following equation: + +$$ +I \left(\phi_ {p o l}\right) = I _ {u n} \left(1 + \rho c o s \left(2 \left(\phi - \phi_ {p o l}\right)\right)\right), \tag {1} +$$ + +where $I_{un}$ denotes unpolarized intensity, $\rho$ denotes DoLP, $\phi$ denotes AoLP and $\phi_{pol}$ denotes the desired filter angle at each pixel. This equation allows us to render images under any number of filter angles by saving only three quantities per pixel. In this paper we leverage the above equation to learn a representation for just these quantities, rather than the four original images. + +The DoLP $(\rho)$ and AoLP $(\phi)$ have uses beyond just rendering images. In the shape from polarization problem, these quantities are used to calculate the zenith and azimuth angles, respectively, of per-pixel surface normals. This relationship has been studied in previous work [1, 7]. Specifically, the azimuth angle, $\theta_{a}$ , of a surface normal can be + +calculated from the following relationship: + +$$ +\phi = \left\{ \begin{array}{l} \theta_ {a}, \text {w h e n d i f f u s e r e f l e c t i o n d o m i n a t e s} \\ \theta_ {a} - \frac {\pi}{2}, \text {w h e n s p e c u l a r r e f l e c t i o n d o m i n a t e s} \end{array} \right. \tag {2} +$$ + +DoLP, $\rho$ , is related to the zenith angle, $\theta_z$ , in terms of the refractive index, $n$ , of a surface. When diffuse reflection is dominant, the relationship can be written as: + +$$ +\rho = \frac {\left(n - \frac {1}{n}\right) ^ {2} \sin^ {2} \left(\theta_ {z}\right)}{2 + 2 n ^ {2} - \left(n - \frac {1}{n}\right) ^ {2} \sin^ {2} \left(\theta_ {z}\right) + 4 \cos \left(\theta_ {z}\right) \sqrt {n ^ {2} - \sin^ {2} \left(\theta_ {z}\right)}}. \tag {3} +$$ + +When specular reflection dominates, the relationship is different: + +$$ +\rho = \frac {2 \sin^ {2} (\theta_ {z}) \cos (\theta_ {z}) \sqrt {n ^ {2} - \sin^ {2} (\theta_ {z})}}{n ^ {2} - \sin^ {2} (\theta_ {z}) - n ^ {2} \sin^ {2} (\theta_ {z}) + 2 \sin^ {4} (\theta_ {z})}. \qquad (4) +$$ + +$\rho, \phi$ and $I_{un}$ can be calculated directly from a vector known as the Stokes vector at each pixel. This vector has four elements. The first three elements deal with the linear polarization of light, and the final one represents the circular polarization of the wave. In this paper we will focus on linear polarization. To measure the Stokes vector of a scene, at least three images are needed, taken through linear polarizing filters at 0, 45 and 90 degrees. Since the camera used in our setup also captures an image with a filter at 135 degrees, we use four images in our calculations of the Stokes vectors for robustness to noise. + +# 3.2. Learning from coarse to fine + +Current coordinate network architectures produce artifacts when fitting polarimetric images. SIREN [52] and similar architectures treat every coordinate equally when training, and they produce noise patterns in the resulting images when the spatial frequencies present in the training data differ widely (eg. the maximum magnitude frequency differs by an order of magnitude). In the polarimetric images we obtained, we found the maximum frequency magnitude of some AoLP maps was around $10^{7}$ , while the maximum magnitude for the intensity image was only around $10^{6}$ . ACORN [34] does not treat each coordinate in the same way, but its dynamic tiling strategy looks for regions of low variance in order to create larger blocks. This is difficult to do when attempting to fit multiple images containing varying frequencies. The resulting reconstructions end up looking blocky, and fine detail is lost in the process. Our method removes these artifacts by learning image representations using their singular value decompositions. One idea to help in reconstructing high frequency details could be to use an image's Fourier decomposition. We found that in practice the SVD works better for our use case. This is due to the propagation of errors during the forward and inverse + +![](images/61c05bfe9d55edf34492a8775d5f5cae962d9566528b3196c889b8f7ef74ed6b.jpg) +Figure 2. pCON learns to fit an image by learning a series of reconstructions with different singular values. The model is organized into a series of $n_b$ parallel MLPs (denoted here as $g_i$ ) with sine activations. A 2D coordinate vector representing a point on an image is passed through all bands separately ( $g_0$ to $g_n$ ). To supervise the training of each band, we reconstruct the full image maps of each quantity, and then calculate the MSE between the model prediction, $\hat{y}_i$ and their respective ground truth values, $y_i$ , at the input coordinate. The final output is the sum of all the intermediate reconstructions, which yields a set of images similar to the training data. + +Fourier transforms. The SVD does not require shifting between the spatial and frequency domains, which allows errors to propagate less than if we were supervising on Fourier frequencies. The singular value decomposition of an $m \times n$ matrix $\mathbf{A}$ is a set of matrices $\mathbf{U} \in \mathbb{R}^{m \times m}$ , $\boldsymbol{\Sigma} \in \mathbb{R}^{m \times n}$ and $\mathbf{V}^{\top} \in \mathbb{R}^{n \times n}$ such that $\mathbf{A} = \mathbf{U} \boldsymbol{\Sigma} \mathbf{V}^{\top}$ . This matrix product can be further decomposed: + +$$ +\begin{array}{l} \mathbf {U} \boldsymbol {\Sigma} \mathbf {V} ^ {\top} = \sum_ {i} ^ {r} \sigma_ {i} \mathbf {u} _ {i} \mathbf {v} _ {i} ^ {\top} \tag {5} \\ = \sum_ {i = 0} ^ {a _ {1}} \sigma_ {i} \mathbf {u} _ {i} \mathbf {v} _ {i} ^ {\top} + \sum_ {i = a _ {1}} ^ {a _ {2}} \sigma_ {i} \mathbf {u} _ {i} \mathbf {v} _ {i} ^ {\top} + \dots + \sum_ {i = a _ {n}} ^ {r} \sigma_ {i} \mathbf {u} _ {i} \mathbf {v} _ {i} ^ {\top} \\ \end{array} +$$ + +where $r$ is the rank of $\mathbf{A}$ , $\mathbf{u}_i$ is the $i$ -th column of $\mathbf{U}$ , $\mathbf{v}_i$ is the $i$ -th column of $\mathbf{V}$ , and $\sigma_i$ is the $i$ -th singular value. In the case of an image, this means that it is possible to calculate different pieces of the decomposition individually, and then sum them to obtain the original image. We leverage this property of the SVD in our model architecture. Using just the largest singular values to reconstruct an image yields a result containing only the low frequency details of the original [28]. As more singular values are used in the reconstruction, higher frequency details are captured. A single coordinate may have features in many reconstructions, and others may have features in only a few. Our network learns a series of reconstructions in parallel, which effectively allocates more model capacity to coordinates which have details at numerous frequencies. Since we are not dividing the image into a grid like ACORN, our reconstruc + +tions do not suffer from tiling artifacts, and they also do not exhibit the obvious noise pattern present in reconstructions from SIREN or ReLU MLPs. + +# 3.3. Network design + +Our network design takes inspiration from SIREN [52]. The original SIREN architecture was similar to an ordinary MLP, except that it used the sine activation function. Our network is divided into a series of $n_b$ fully-connected blocks which map from a 2D input image coordinate to the AoLP $(\phi)$ , DoLP $(\rho)$ and unpolarized intensity $I_{un}$ at that pixel. We call each of these MLPs a band of the network, and we will notate them as $g_i$ for $i \in 0,1,\dots,n_b - 1$ . To fit an image, we first take the singular value decomposition of the map of each polar quantity: + +$$ +\boldsymbol {\Phi} = \mathbf {U} _ {\phi} \boldsymbol {\Sigma} _ {\phi} \mathbf {V} _ {\phi} ^ {\top}, +$$ + +$$ +\boldsymbol {\rho} = \mathbf {U} _ {\rho} \boldsymbol {\Sigma} _ {\rho} \mathbf {V} _ {\rho} ^ {\top}, \tag {6} +$$ + +$$ +\mathbf {I} _ {u n} = \mathbf {U} _ {u n} \boldsymbol {\Sigma} _ {u n} \mathbf {V} _ {u n} ^ {\top}. +$$ + +$\Phi, \rho$ and $\mathbf{I}_{un}$ represent the full image maps of AoLP $(\phi)$ , DoLP $(\rho)$ and $I_{un}$ , respectively. The above equations are obtained by interpreting these maps as matrices and then using Eq. (5). We now define a series of $n_b$ thresholds for $\Phi, \rho$ and $\mathbf{I}_{un}$ as $t_{\phi,i}, t_{\rho,i}$ and $t_{\mathrm{un},i}$ , respectively. These thresholds dictate which singular values will be used to supervise each band of the network. We also define the ground truth intermediate reconstructions of each quantity using a subset of singular values as $y_{\phi,i}, y_{\rho,i}$ and $y_{\mathrm{un},i}$ . We denote their + +corresponding predictions as $\hat{y}_{\phi,i}$ , $\hat{y}_{\rho,i}$ and $\hat{y}_{\mathrm{un},i}$ . We can use Eq. (5) to decompose each of the SVDs from Eq. (6) into a set of sums. For example, we can write $\Phi$ as follows: + +$$ +y _ {\phi , i} = \sum_ {j = t _ {\phi , i - 1}} ^ {t _ {\phi , i}} \sigma_ {\phi , j} \mathbf {u} _ {\phi , j} \mathbf {v} _ {\phi , j} ^ {\top}. \tag {7} +$$ + +The reconstructions for the other quantities can be written with their respective SVDs and thresholds similar to Eq. (7). + +Each band learns a single reconstruction for these quantities at each pixel. + +$$ +g _ {i} (x, y) = \hat {y} _ {i} = (\hat {y} _ {\phi , i}, \hat {y} _ {\rho , i}, \hat {y} _ {\mathrm {u n}, i}). \tag {8} +$$ + +Here, $x$ and $y$ constitute the 2D pixel coordinate vector that serves as the input to the network. This coordinate is passed through each band of the network to compute all $\hat{y}_i$ , and then the fully reconstructed image is calculated as $\sum_{i}\hat{y}_{i}$ . See Fig. 2 for a visualization of this entire process. + +# 3.4. Loss functions + +Our network outputs a set of $n_b$ images. For each band, we compute the MSE between the cumulative sum of all outputs up to, and including, the current band. We define multiplicative factors for the three polar quantities as $\lambda_{\phi}$ , $\lambda_{\rho}$ and $\lambda_{\mathrm{un}}$ . We also define factors for each band as $\lambda_{b,i}$ . The loss of the network can be calculated as follows, where $L$ is the loss function and $x$ is the data point for which the loss is being calculated: + +$$ +\begin{array}{l} L (x) = \sum_ {i} \lambda_ {b, i} \sum_ {j = 0} ^ {i} \lambda_ {\phi} \left(\hat {y} _ {\phi , j} - y _ {\phi , j}\right) ^ {2} \tag {9} \\ + \lambda_ {\rho} (\hat {y} _ {\rho , j} - y _ {\rho , j}) ^ {2} + \lambda_ {\mathrm {u n}} (\hat {y} _ {\mathrm {u n}, j} - y _ {\mathrm {u n}, j}) ^ {2}. \\ \end{array} +$$ + +# 3.5. Implementation details + +# 3.5.1 Data + +We collected all of our own data using a Flir Blackfly S RGB polarization camera. From this camera's images, it is possible to calculate the desired polarimetric quantities using the physics discussed in Sec. 3.1. We release two datasets with this paper. The first contains the six scenes used to create figures in this paper. The second set contains twenty four additional scenes for use in validating our approach. The captured scenes represent a diverse set of polarization effects. The DoLP and AoLP values span the entire ranges (zero to one for DoLP and zero to pi for AoLP) of possible values. We capture interesting polarization phenomena such as transparent and reflective surfaces. All released images have a resolution of $1024 \times 1024$ . + +# 3.5.2 Hyperparameters + +We built all models in PyTorch [48]. We began all experiments with a learning rate of $1 \times 10^{-5}$ , and then multiplied + +it by 0.1 at 5000 epochs. Models were trained for a total of 10000 epochs. We also set the unitless frequency parameter $\omega_0$ of our sine activations to 90. For our best model, we used a total of 10 bands, each with 2 hidden layers and a hidden dimension of 256. + +We chose the singular value thresholds of each band based on the sum of the magnitudes of singular values. Band one was given roughly $90\%$ of the sum, then the others $99\%$ , $99.9\%$ , and so on. Exact values for $\lambda_{b_i}$ used in all presented experiments can be found in the supplement. + +For our experiments, we set $\lambda_{\phi} = 1.0$ , $\lambda_{\rho} = 5.0$ and $\lambda_{\mathrm{un}} = 5.0$ . + +# 4. Experiments + +In this section, we present comparisons between our model, SIREN [52], ACORN [34] and an MLP using ReLU activations and positional encoding, as used in NeRF [38]. We changed the number of parameters and output values of the baseline architectures, since originally these models were designed to fit only a single image at a time. We also changed the frequency parameter $\omega_0$ of the SIREN sine activations to 90 to match the parameter used in our own model. All our models were trained using the training strategy discussed in Sec. 3.5. + +# 4.1. Validation of proposed failure case + +We hypothesized the reason for the poor performance of baseline models when fitting polarimetric images was due to the presence of details at high spatial frequencies in the captured AoLP maps. To validate this hypothesis, we performed low-pass filtering on AoLP maps of a scene and then fit a model on the resulting AoLP, DoLP and $\mathbf{I}_{un}$ maps. We found a clear trend in the reconstruction quality as we filtered out higher percentages of high spatial frequencies. All models performed better when fewer high frequency details were present in the target images. This aligns with our idea that these details create difficult scenes for networks to reconstruct. For the scene in Fig. 3, the AoLP reconstruction SSIMs with different amounts of frequencies removed from the GT AoLP maps can be seen in Table 2. + +
% Highest Frequencies RemovedSIREN [52]ACORN [34]ReLU P.E. [38]
0%0.600.510.63
75%0.540.800.93
80.5%0.890.970.98
93.75%0.950.990.99
+ +Table 2. All baseline models reconstruct AoLP maps better when details at higher spatial frequencies are filtered out. This trend validates our hypothesis that images with high frequency details are more difficult for a network to reconstruct. + +# 4.2. Comparison with others + +We trained both our model and the baselines to predict AoLP $(\Phi)$ , DoLP $(\rho)$ and $\mathbf{I}_{un}$ maps directly. Quali + +![](images/894ac63c8e6f5837dbd9828496ed69bf8abcee697de7b7d44226dcf7419db036.jpg) +GT + +![](images/219672bd146029b50af6bef2d4f2582e90313335177502790d4a8c0ec26fc225.jpg) +SIREN [52] +SSIM/PSNR: 0.60/14.32 + +![](images/4b0ba5d5299bc6bb52b5e971debba80e0322daa05ac40dd28118f14ee7ffe6fb.jpg) +ACORN [34] +SSIM/PSNR: 0.51/15.99 + +![](images/d7ec3e4ed80f1182d343c1558e0a86b338935748308aa4f646857122dca29c5c.jpg) +ReLU P.E. [38] +SSIM/PSNR: 0.63/17.18 + +![](images/78e5251bffc2a2a040676706242845dae107efe0588ed652a9842e431f03c553.jpg) +Ours +SSIM/PSNR: 0.77/16.57 + +![](images/206b75d1ec28eba3527f38730b6db8ec9d66c652c4aa1ba6348f618db675bb11.jpg) +AoLP +DoLP + +![](images/8da038295be72acd62631f8f71f1341c349f38cb63016f8be6e9a1707236e427.jpg) +SSIM/PSNR: 0.73/29.83 + +![](images/9f0017eefa0a779cf8a6cd643d76b9401bc6e8e734907c866007124c1863ac2e.jpg) +SSIM/PSNR: 0.80/31.78 + +![](images/c53b8f4cb5579fdbebc74c94e747e74788f917aca4fc3b41b2d54fa45e445e94.jpg) +SSIM/PSNR: 0.79/32.06 + +![](images/4758abb3f2fc53c31d87da3a469712556e0aa964ce1cf461476be42389ff52d6.jpg) +SSIM: 0.82/34.56 + +![](images/a371f3204275bd6c0aaa77cee788ca9027b40fdd877be2d94b15253e4de5382b.jpg) + +![](images/26056e5fb16626ad87bea0c504957cf7d1f2c8f77583efc5c843907d85af0968.jpg) + +![](images/fb5aa89caaa1b15d862b2f0e9dd53831566d0dd71cbf26be800f07b6a036f5ff.jpg) + +![](images/cfdb3ca828d32759100f4f68dc7ec63365cdce609436299de164a0a4be84ad20.jpg) + +![](images/54c3d7daa3605fe026e756029b47cebd0af1fdfb5af5301a2e340b3df09c10de.jpg) + +![](images/d864f0d4325f7617845db59c764cc4a668493db20c6d71fe8e8e68fb461f1388.jpg) + +![](images/8ac2ef750c0bd8d7d9a189a174af23d9ce98f8e2c538b45959dd3b63f0fcf84a.jpg) +SSIM/PSNR: 0.59/26.42 + +![](images/83870ba8fe998c9046d9ba6feff9bc288e91e9c58ecf5d1946a9989ca00d7288.jpg) +SSIM/PSNR: 0.77/28.43 + +![](images/9b07c75b56190125d26fc54b6df16a8b12f5557b37a655c0abb1afaae6dc9978.jpg) +SSIM/PSNR: 0.71/29.58 + +![](images/8106318c099bf3b71b3b5de39926488f31d3886d0233125d74418fc7ab2d7de5.jpg) +SSIM/PSNR: 0.89/34.82 + +![](images/8a40f8d92246985a22b1bbd5416e3c6b04bd7c1f879a9c5c96bd49ea8d92b904.jpg) +Un +Figure 3. Our model shows higher SSIM and fewer artifacts on predicted $\Phi$ , $\rho$ and $\mathbf{I}_{un}$ maps. Baseline models cause noise or tiling which is clearly visible on the checkerboard pattern on the floor, where all three quantities take large values. The artifacts are present on objects exhibiting both specular reflections, like the floor, and diffuse reflections, like the wall and doors in the background. + +![](images/98d88428449630d099696dea35ad62ee492fd2d7db84c6cd0b76212e63e369e6.jpg) + +![](images/341859f58aaab3e72eb3cd4651d7fa10cf3bfee85d205741c9ba500ac7aee56b.jpg) + +![](images/f6a518f6b76776f65c51a7106d12d995e4d0ed70541d258e2b0b3198091be6c1.jpg) + +![](images/be7421ee9c4e38f2f692300c794cf956350e3925399473df3b793a58c41c22b4.jpg) + +![](images/e67bcc94689dddb038c7f79dcb9e3bb084b487f258ce89580cb074e29b0004d6.jpg) +Figure 4. Our model can more accurately reconstruct RGB images taken through different polarizing filter angles when compared to SIREN [52], ACORN [34] and a ReLU MLP [38] with positional encoding. The images reconstructed here are the scene as viewed through a linear polarizer oriented at $0^{\circ}$ + +![](images/66d00254f3228d5be010764d4712f8d1062b608523cc0f37c91b6b3e0ac3eaa8.jpg) + +![](images/e6a40800a18645bf213449b0c15d773fb744e8b6d83c4b2f53a38cb261f74753.jpg) + +![](images/31487e577aaf55b395f5f8d380eb899fad8d8d5345f20e470895f563cdf1ea7d.jpg) +GT + +![](images/c84cf0120ade6da08702c7a070e1ef93891f5e4b3a5bd60618f42cb6112b85de.jpg) + +![](images/649f17527a60f65c786f89ea05a02ce34d05256fb198b59a516718bbad72dbe0.jpg) + +![](images/12614e630b9e65dc6eac9610fd8e867c0203de53f61a18a75de8bacf03bfcd98.jpg) +1 band + +![](images/abb8ce532ae5b737c544cde659a2e027d0a872c6355b790033fb5f1970b688cf.jpg) + +![](images/622473d89340986fb169b15080195530e6295f6500efa7b0641b28132a4cb8bf.jpg) + +![](images/e70480a6a1d77b181db6326f428bf2a1cd6d827291f234e6cf7bdbfb43593f54.jpg) +4 bands + +![](images/29308e05b1197201051ad12ef8bf555dfbdce11afebf6672d48d1da33d66ba2a.jpg) + +![](images/5f28cc4526343d84b6c91deaa7dfef579598ba1cc0ad0605bc9c4c5bba9ff6ab.jpg) + +![](images/b4ed5028ce6eda169e1a9dd073a0472ee7405d464aeb6c3d83552c279be89412.jpg) +Full model +Figure 5. As the number of bands used in the reconstruction increases, so does the quality of the image. Even with a single band the reconstruction is visually close to the original. + +tative and quantitative results can be found in Fig. 3. Our model performs yeilds better PSNR and SSIM than all baselines and it also does not produce the tiling artifacts or the noise patterns present in the reconstructions created by other models. + +# 4.3. Accuracy and model size trade-off + +In order to fit an image with a smaller or larger model, current architectures require a full retraining with a different number of parameters. The structure of our model allows us to provide a tradeoff between model size and reconstruction + +
ModelΦ(↑)ρ(↑)Iun(↑)# Params. (↓)
Ours (1 band)0.12/10.830.50/22.870.74/26.58130K
Ours (2 bands)0.32/14.660.64/28.400.91/34.74270K
Ours (3 bands)0.42/14.420.65/28.590.92/34.43400K
Ours (4 bands)0.51/16.320.65/28.710.92/34.62530K
Ours (5 bands)0.64/17.680.67/28.870.92/36.74670K
Ours (Full model)0.79/18.080.76/31.750.92/36.001.3M
SIREN [52]0.59/15.960.67/28.200.70/28.23660K
ACORN [34]0.48/17.010.73/29.960.82/29.85530K
ReLU [38] w/P.E.0.64/18.300.76/30.990.81/32.13660K
+ +Table 3. As more bands are used, the number of parameters grows along with the resulting performance (SSIM/PSNR). The metrics shown here are averages across our whole dataset. + +accuracy without retraining. Each band of the model learns a representation of the image when reconstructed with a different set of singular values. If the downstream task doesn't require incredibly high accuracy, and the user would rather save and transport a smaller set of model weights, they can just save the weights from the first band of the network and reconstruct the image with only the singular values from that band, or vice versa if more accuracy is required. A visualization of reconstruction quality using different numbers of bands can be seen in Fig. 5. See Table 3 for quantitative results using different bands of our network. With a similar number of parameters to the baseline models, it achieves comparable performance to all baseline architectures. Our full model outperforms all baselines on predicting AoLP $(\Phi)$ and $\mathbf{I}_{un}$ maps. It is also worth noting that our full model achieves significant compression over storing raw data. The combined memory size of the AoLP, DoLP and $\mathbf{I}_{un}$ maps + +![](images/e8a748f34c8407db99d3fc22351abb39391121ed1ba0deb0ce1f34254235d847.jpg) +GT + +![](images/3a799a3959456abb28f5b11eb6a9db3843f0f60ec9effd7382718067d9c9c661.jpg) +SIREN [52] + +![](images/bdcc38bafd91773a855aea6aa7264f9315c9c900f420c0c31f3cb7679a963096.jpg) +ReLU P.E. [38] + +![](images/6dd43389def5f008bbc1518ccb1cff2e547fc5173a837f82dd4890c785393944.jpg) +Ours + +![](images/1f9978b468cadc6b997f8cb2b6c6a6f96e89c6781160056f51ce6ac3cbb0a60b.jpg) +Figure 6. Both SIREN [52] and the ReLU MLP [38] with positional encoding show artifacts when queried at a different resolution than they were trained on. Our model does not. We trained models at a resolution of $1024 \times 1024$ and queried them at a resolution of $512 \times 512$ + +![](images/6cfc053ac7741db96885e2d107be96e4557b6775b50c8efb2609888200cb0976.jpg) + +![](images/b34f5f4be91ed9ad1dd6457b904dfb8e61a4a817068735796680244200acd835.jpg) + +![](images/8aae601fef50ca2cf4eaff39fc9c4b8eff02bc0cd424f74e3d6cae508c579435.jpg) + +![](images/5ea0d19bad2190dfe707fcb8e515330ea3c3a214e5282e448e15ed67036ea3df.jpg) + +![](images/e25c1b92080f6fd6a635fbb603067a3022b048a5516790978d8761a9809b3850.jpg) + +![](images/3cbda8f9cc88ca0bb2d0da2e5cb4c2f418c68dd635ea70e075017793ff02a7b2.jpg) + +![](images/70d6f0bb7083b1dc106f5d20ecbe104c7c65044cf2b3d2543b917f8de3f9032a.jpg) + +is 36 megabytes (MB), while the size of our full model is only 5 MB. Representing images with our model allows us to scale image size without scaling memory footprint as quickly. In this work we use small images, but the memory saved when reconstructing images at the mega or gigapixel scale would be significant. + +# 4.4. RGB reconstruction + +In addition to reconstructing the DoLP $(\rho)$ , AoLP $(\Phi)$ and $\mathbf{I}_{un}$ maps with our model, we also present results for reconstructing the original RGB images captured by the camera. For a specific polarizing filter angle, we can reconstruct the value of a pixel captured by the camera through that filter using Eq. (1). Our model removes the artifacts present in the reconstructions from all baseline comparisons and retains more detail comparatively. See Fig. 4 for a visualization of reconstructions of images taken through a linear polarizer oriented at $0^{\circ}$ . + +# 4.5. Multiple resolution interpolation + +We present results for fitting an image at one resolution and querying it at a second resolution. In this section we only compare to SIREN [52] and a ReLU MLP [38], as the dynamic tiling strategy of ACORN [34] does not allow us to simply query the representation at a different resolution. We train both models on the original scene at a resolution of $1024 \times 1024$ and then query them at a resolution of $512 \times 512$ . Both baselines show artifacts when queried at this new resolution, while our model does not have this issue. In Fig. 6 we visualize these results on $\mathbf{I}_{un}$ maps. + +# 5. Conclusion + +In summary, we have presented an attempt at creating neural representations of polarimetric information without + +the artifacts introduced by current models. Compared to existing methods, our model shows an increase in image reconstruction quality on AoLP, DoLP and $\mathbf{I}_{un}$ maps, in addition to effectively removing the artifacts we were targeting. Having a compact representation of polarimetric images will facilitate future research in areas where this data is required. + +While our work provides noticeable improvement over current methods, it is not perfect. To achieve state of the art performance on reconstructing AoLP maps, we need quite a few bands in our network, which makes the number of parameters quite large compared to other architectures. A valuable next step could be creating a model that could achieve the same performance as ours while cutting down on the memory footprint. Furthermore, we only demonstrated the effectiveness of this approach on 2D data, since polarization is not well studied in three dimensions. Validating our approach on 3D data would be a useful next step, once the field has developed a greater understanding of the underlying physics. We motivated our method using polarimetric data, but there are many types of data in computational imaging [8]. Our method will be valuable in representing multiple physical quantities of a scene at once whenever at least one measurement contains high frequency details or noise, and future research could extending this work by demonstrating its effectiveness on other types of data encountered in computational imaging. + +Acknowledgements We thank members of the Visual Machines Group (VMG) at UCLA for feedback and support. A.K. was supported by an NSF CAREER award IIS-2046737 and Army Young Investigator Program (YIP) Award. + +# References + +[1] G.A. Atkinson and E.R. Hancock. Recovery of surface orientation from diffuse polarization. IEEE Transactions on Image Processing, 15(6):1653-1664, 2006. 2, 3 +[2] Gary A Atkinson. Polarisation photometric stereo. Computer Vision and Image Understanding, 160:158-167, 2017. 2 +[3] Gary A Atkinson and Jürgen D Ernst. High-sensitivity analysis of polarization by surface reflection. Machine Vision and Applications, 29(7):1171-1189, 2018. 3 +[4] Benjamin Attal, Selena Ling, Aaron Gokaslan, Christian Richardt, and James Tompkin. Matryodshka: Real-time 6dof video view synthesis using multi-sphere images. In European Conference on Computer Vision, pages 441-459. Springer, 2020. 2 +[5] Matan Atzmon and Yaron Lipman. Sal: Sign agnostic learning of shapes from raw data. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2565-2574, 2020. 2 +[6] Dejan Azinović, Olivier Maury, Christophe Hery, Matthias Nießner, and Justus Thies. High-res facial appearance capture from polarized smartphone images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2023. 3 +[7] Yunhao Ba, Alex Gilbert, Franklin Wang, Jina Yang, Rui Chen, Yiqin Wang, Lei Yan, Boxin Shi, and Achuta Kadambi. Deep shape from polarization. In European Conference on Computer Vision, pages 554-571. Springer, 2020. 2, 3 +[8] Ayush Bhandari, Achuta Kadambi, and Ramesh Raskar. Computational Imaging. The MIT Press, 2022. 8 +[9] Michael Broxton, John Flynn, Ryan Overbeck, Daniel Erickson, Peter Hedman, Matthew Duvall, Jason Dourgarian, Jay Busch, Matt Whalen, and Paul Debevec. Immersive light field video with a layered mesh representation. ACM Transactions on Graphics (TOG), 39(4):86-1, 2020. 2 +[10] Rohan Chabra, Jan E Lenssen, Eddy Ilg, Tanner Schmidt, Julian Straub, Steven Lovegrove, and Richard Newcombe. Deep local shapes: Learning local sdf priors for detailed 3d reconstruction. In European Conference on Computer Vision, pages 608-625. Springer, 2020. 2 +[11] Eric R. Chan, Marco Monteiro, Petr Kellnhofer, Jiajun Wu, and Gordon Wetzstein. Pi-gan: Periodic implicit generative adversarial networks for 3d-aware image synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 5799-5809, June 2021. 2 +[12] Tongbo Chen, Hendrik P. A. Lensch, Christian Fuchs, and Hans-Peter Seidel. Polarization and phase-shifting for 3d scanning of translucent objects. In 2007 IEEE Conference on Computer Vision and Pattern Recognition, pages 1-8, 2007. 3 +[13] Zhiqin Chen and Hao Zhang. Learning implicit fields for generative shape modeling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5939-5948, 2019. 2 +[14] Akshit Dave, Yongyi Zhao, and Ashok Veeraraghavan. Pandora: Polarization-aided neural decomposition of radiance. + +In Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part VII, pages 538-556. Springer, 2022. 2 +[15] O. Drbohlav and R. Sara. Unambiguous determination of shape from photometric stereo with unknown light sources. In Proceedings Eighth IEEE International Conference on Computer Vision. ICCV 2001, volume 1, pages 581-586 vol.1, 2001. 2 +[16] SM Ali Eslami, Danilo Jimenez Rezende, Frederic Besse, Fabio Viola, Ari S Morcos, Marta Garnelo, Avraham Ruderman, Andrei A Rusu, Ivo Danihelka, Karol Gregor, et al. Neural scene representation and rendering. Science, 360(6394):1204-1210, 2018. 2 +[17] John Flynn, Michael Broxton, Paul Debevec, Matthew DuVall, Graham Fyffe, Ryan Overbeck, Noah Snavely, and Richard Tucker. Deepview: View synthesis with learned gradient descent. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2367-2376, 2019. 2 +[18] Kyle Genova, Forrester Cole, Avneesh Sud, Aaron Sarna, and Thomas Funkhouser. Local deep implicit functions for 3d shape. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4857-4866, 2020. 2 +[19] Kyle Genova, Forrester Cole, Daniel Vlasic, Aaron Sarna, William T Freeman, and Thomas Funkhouser. Learning shape templates with structured implicit functions. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7154-7164, 2019. 2 +[20] Ciriaco Goddi, Ivan Martí-Vidal, Hugo Messias, Geoffrey C Bower, Avery E Broderick, Jason Dexter, Daniel P Marrone, Monika Moscibrodzka, Hiroshi Nagai, Juan Carlos Algaba, et al. Polarimetric properties of event horizon telescope targets from alma. The Astrophysical Journal Letters, 910(1):L14, 2021. 3 +[21] Amos Gropp, Lior Yariv, Niv Haim, Matan Atzmon, and Yaron Lipman. Implicit geometric regularization for learning shapes. arXiv preprint arXiv:2002.10099, 2020. 2 +[22] Peter Hedman, Julien Philip, True Price, Jan-Michael Frahm, George Drettakis, and Gabriel Brostow. Deep blending for free-viewpoint image-based rendering. ACM Transactions on Graphics (TOG), 37(6):1-15, 2018. 2 +[23] Philipp Henzler, Niloy J Mitra, and Tobias Ritschel. Escaping Plato's cave: 3d shape from adversarial rendering. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9984-9993, 2019. 2 +[24] Cong Phuoc Huynh, Antonio Robles-Kelly, and Edwin Hancock. Shape and refractive index recovery from single-view polarisation images. In 2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 1229-1236, 2010. 2 +[25] Cong Phuoc Huynh, Antonio Robles-Kelly, and Edwin R Hancock. Shape and refractive index from single-view spectro-polarimetric images. International journal of computer vision, 101(1):64-94, 2013. 2 +[26] Chiyu Jiang, Avneesh Sud, Ameesh Makadia, Jingwei Huang, Matthias Nießner, Thomas Funkhouser, et al. Local + +implicit grid representations for 3d scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6001-6010, 2020. 2 +[27] Achuta Kadambi, Vage Taamazyan, Boxin Shi, and Ramesh Raskar. Polarized 3d: High-quality depth sensing with polarization cues. In Proceedings of the IEEE International Conference on Computer Vision, pages 3370-3378, 2015. 3 +[28] Samruddhi Kahu and Reena Rahate. Image compression using singular value decomposition. International Journal of Advancements in Research & Technology, 2(8):244-248, 2013. 1, 4 +[29] Agastya Kalra, Vage Taamazyan, Supreeth Krishna Rao, Kartik Venkataraman, Ramesh Raskar, and Achuta Kadambi. Deep polarization cues for transparent object segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2020. 3 +[30] Shaohui Liu, Yinda Zhang, Songyou Peng, Boxin Shi, Marc Pollefeys, and Zhaopeng Cui. Dist: Rendering deep implicit signed distance function with differentiable sphere tracing. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2019-2028, 2020. 2 +[31] Stephen Lombardi, Tomas Simon, Jason Saragih, Gabriel Schwartz, Andreas Lehrmann, and Yaser Sheikh. Neural volumes: Learning dynamic renderable volumes from images. arXiv preprint arXiv:1906.07751, 2019. 2 +[32] Tomohiro Maeda, Achuta Kadambi, Yoav Y Schechner, and Ramesh Raskar. Dynamic heterodyne interferometry. In 2018 IEEE International Conference on Computational Photography (ICCP), pages 1-11. IEEE, 2018. 3 +[33] Ali H. Mahmoud, Moumen T. El-Melegy, and Aly A. Farag. Direct method for shape recovery from polarization and shading. In 2012 19th IEEE International Conference on Image Processing, pages 1769-1772, 2012. 2 +[34] Julien N. P. Martel, David B. Lindell, Connor Z. Lin, Eric R. Chan, Marco Monteiro, and Gordon Wetzstein. Acorn: Adaptive coordinate networks for neural scene representation. ACM Trans. Graph. (SIGGRAPH), 40(4), 2021. 2, 3, 5, 6, 7, 8 +[35] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3d reconstruction in function space. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4460-4470, 2019. 2 +[36] Ben Mildenhall, Peter Hedman, Ricardo Martin-Brualla, Pratul P Srinivasan, and Jonathan T Barron. Nerf in the dark: High dynamic range view synthesis from noisy raw images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16190-16199, 2022. 2 +[37] Ben Mildenhall, Pratul P Srinivasan, Rodrigo Ortiz-Cayon, Nima Khademi Kalantari, Ravi Ramamoorthi, Ren Ng, and Abhishek Kar. Local light field fusion: Practical view synthesis with prescriptive sampling guidelines. ACM Transactions on Graphics (TOG), 38(4):1-14, 2019. 2 +[38] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: + +Representing scenes as neural radiance fields for view synthesis. In European conference on computer vision, pages 405-421. Springer, 2020. 2, 5, 6, 7, 8 +[39] Miyazaki, Tan, Hara, and Ikeuchi. Polarization-based inverse rendering from a single view. In Proceedings Ninth IEEE International Conference on Computer Vision, pages 982-987 vol.2, 2003. 2, 3 +[40] D. Miyazaki, M. Kagesawa, and K. Ikeuchi. Transparent surface modeling from a pair of polarization images. IEEE Transactions on Pattern Analysis and Machine Intelligence, 26(1):73-82, 2004. 3 +[41] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Transactions on Graphics (ToG), 41(4):1-15, 2022. 2 +[42] Shree K Nayar, Xi-Sheng Fang, and Terrance Boult. Separation of reflection components using color and polarization. International Journal of Computer Vision, 21(3):163-186, 1997. 3 +[43] Trung Ngo Thanh, Hajime Nagahara, and Rin-ichiro Taniguchi. Shape and light directions from shading and polarization. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), June 2015. 2 +[44] Thu H Nguyen-Phuoc, Christian Richardt, Long Mai, Yongliang Yang, and Niloy Mitra. Blockgan: Learning 3d object-aware scene representations from unlabelled images. Advances in Neural Information Processing Systems, 33:6767-6778, 2020. 2 +[45] Michael Niemeyer, Lars Mescheder, Michael Oechsle, and Andreas Geiger. Occupancy flow: 4d reconstruction by learning particle dynamics. In Proceedings of the IEEE/CVF international conference on computer vision, pages 5379-5389, 2019. 2 +[46] Michael Oechsle, Lars Mescheder, Michael Niemeyer, Thilo Strauss, and Andreas Geiger. Texture fields: Learning texture representations in function space. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4531-4540, 2019. 2 +[47] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. Deepsdf: Learning continuous signed distance functions for shape representation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 165-174, 2019. 2 +[48] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Kopf, Edward Yang, Zachary DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. Pytorch: An imperative style, high-performance deep learning library. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'Alché-Buc, E. Fox, and R. Garnett, editors, Advances in Neural Information Processing Systems 32, pages 8024-8035. Curran Associates, Inc., 2019. 5 +[49] Songyou Peng, Michael Niemeyer, Lars Mescheder, Marc Pollefeys, and Andreas Geiger. Convolutional occupancy networks. In European Conference on Computer Vision, pages 523-540. Springer, 2020. 2 + +[50] Gernot Riegler and Vladlen Koltun. Free view synthesis. In European Conference on Computer Vision, pages 623-640. Springer, 2020. 2 +[51] Y.Y. Schechner, S.G. Narasimhan, and S.K. Nayar. Instant dehazing of images using polarization. In Proceedings of the 2001 IEEE Computer Society Conference on Computer Vision and Pattern Recognition. CVPR 2001, volume 1, pages I-I, 2001. 3 +[52] Vincent Sitzmann, Julien Martel, Alexander Bergman, David Lindell, and Gordon Wetzstein. Implicit neural representations with periodic activation functions. Advances in Neural Information Processing Systems, 33:7462-7473, 2020. 2, 3, 4, 5, 6, 7, 8 +[53] Vincent Sitzmann, Justus Thies, Felix Heide, Matthias Nießner, Gordon Wetzstein, and Michael Zollhofer. Deepvoxels: Learning persistent 3d feature embeddings. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2437-2446, 2019. 2 +[54] Vincent Sitzmann, Michael Zollhöfer, and Gordon Wetzstein. Scene representation networks: Continuous 3d-structure-aware neural scene representations. Advances in Neural Information Processing Systems, 32, 2019. 2 +[55] Daniel Teo, Boxin Shi, Yinqiang Zheng, and Sai-Kit Yeung. Self-calibrating polarising radiometric calibration. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), June 2018. 3 +[56] Justus Thies, Michael Zollhöfer, and Matthias Nießner. Deferred neural rendering: Image synthesis using neural textures. ACM Transactions on Graphics (TOG), 38(4):1-12, 2019. 2 +[57] Tali Treibitz and Yoav Y. Schechner. Active polarization rescattering. IEEE Transactions on Pattern Analysis and Machine Intelligence, 31(3):385-399, 2009. 3 +[58] Zhen Wang, Shijie Zhou, Jeong Joon Park, Despoina Paschalidou, Suya You, Gordon Wetzstein, Leonidas Guibas, and Achuta Kadambi. Alto: Alternating latent topologies for implicit 3d reconstruction. arXiv preprint arXiv:2212.04096, 2022. 2 +[59] Lawrence B Wolff. Polarization vision: a new sensory approach to image understanding. Image and Vision computing, 15(2):81-93, 1997. 3 +[60] Xiuming Zhang, Sean Fanello, Yun-Ta Tsai, Tiancheng Sun, Tianfan Xue, Rohit Pandey, Sergio Orts-Escalano, Philip Davidson, Christoph Rhemann, Paul Debevec, et al. Neural light transport for relighting and view synthesis. ACM Transactions on Graphics (TOG), 40(1):1-17, 2021. 2 +[61] Chu Zhou, Minggui Teng, Yufei Han, Chao Xu, and Boxin Shi. Learning to dehaze with polarization. Advances in Neural Information Processing Systems, 34, 2021. 3 \ No newline at end of file diff --git a/2023/pCON_ Polarimetric Coordinate Networks for Neural Scene Representations/images.zip b/2023/pCON_ Polarimetric Coordinate Networks for Neural Scene Representations/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..841478e3bcb1925625b7ffd3b1402c5570be993b --- /dev/null +++ b/2023/pCON_ Polarimetric Coordinate Networks for Neural Scene Representations/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dac54024c93e7859d4f445977e3a2d00c719e20183ff812d4cdc4c342e0e2274 +size 852767 diff --git a/2023/pCON_ Polarimetric Coordinate Networks for Neural Scene Representations/layout.json b/2023/pCON_ Polarimetric Coordinate Networks for Neural Scene Representations/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..b0314902a9ebbc02035612fd3675de27d1e9f3d1 --- /dev/null +++ b/2023/pCON_ Polarimetric Coordinate Networks for Neural Scene Representations/layout.json @@ -0,0 +1,12125 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 60, + 103, + 533, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 103, + 533, + 121 + ], + "spans": [ + { + "bbox": [ + 60, + 103, + 533, + 121 + ], + "type": "text", + "content": "pCON: Polarimetric Coordinate Networks for Neural Scene Representations" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 173, + 143, + 418, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 173, + 143, + 418, + 157 + ], + "spans": [ + { + "bbox": [ + 173, + 143, + 418, + 157 + ], + "type": "text", + "content": "Henry Peters\\*,1, Yunhao Ba\\*,2, Achuta Kadambi\\*1,2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 157, + 488, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 157, + 488, + 171 + ], + "spans": [ + { + "bbox": [ + 104, + 157, + 488, + 171 + ], + "type": "text", + "content": "1Computer Science Department, University of California, Los Angeles (UCLA)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 156, + 171, + 438, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 171, + 438, + 186 + ], + "spans": [ + { + "bbox": [ + 156, + 171, + 438, + 186 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 156, + 171, + 438, + 186 + ], + "type": "text", + "content": "Electrical and Computer Engineering Department, UCLA" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 156, + 187, + 433, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 187, + 433, + 198 + ], + "spans": [ + { + "bbox": [ + 156, + 187, + 433, + 198 + ], + "type": "text", + "content": "hpeters@ucla.edu, yhba@ucla.edu, achuta@ee.ucla.edu" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 143, + 227, + 192, + 239 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 227, + 192, + 239 + ], + "spans": [ + { + "bbox": [ + 143, + 227, + 192, + 239 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 252, + 289, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 252, + 289, + 422 + ], + "spans": [ + { + "bbox": [ + 46, + 252, + 289, + 422 + ], + "type": "text", + "content": "Neural scene representations have achieved great success in parameterizing and reconstructing images, but current state of the art models are not optimized with the preservation of physical quantities in mind. While current architectures can reconstruct color images correctly, they create artifacts when trying to fit maps of polar quantities. We propose polarimetric coordinate networks (pCON), a new model architecture for neural scene representations aimed at preserving polarimetric information while accurately parameterizing the scene. Our model removes artifacts created by current coordinate network architectures when reconstructing three polarimetric quantities of interest. All code and data can be found at this link: https://visual.ee.ucla.edu/pcon.htm." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 445, + 128, + 456 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 445, + 128, + 456 + ], + "spans": [ + { + "bbox": [ + 47, + 445, + 128, + 456 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 465, + 287, + 644 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 465, + 287, + 644 + ], + "spans": [ + { + "bbox": [ + 46, + 465, + 287, + 644 + ], + "type": "text", + "content": "Neural scene representations are a popular and useful tool in many computer vision tasks, but these models are optimized to preserve visual content, not physical information. Current state-of-the-art models create artifacts due to the presence of a large range of spatial frequencies when reconstructing polarimetric data. Many tasks in polarimetric imaging rely on precise measurements, and thus even small artifacts are a hindrance for downstream tasks that would like to leverage neural reconstructions of polarization images. In this work we present pCON, a new architecture for neural scene representations. pCON leverages images' singular value decompositions to effectively allocate network capacity to learning the more difficult spatial frequencies at each pixel. Our model reconstructs polarimetric images without the artifacts introduced by state-of-the-art models." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 645, + 287, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 645, + 287, + 693 + ], + "spans": [ + { + "bbox": [ + 46, + 645, + 287, + 693 + ], + "type": "text", + "content": "The polarization of light passing through a scene contains a wealth of information, and while current neural representations can represent single images accurately, but they produce noticeable visual artifacts when trying to represent" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 228, + 489, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 228, + 489, + 240 + ], + "spans": [ + { + "bbox": [ + 306, + 228, + 489, + 240 + ], + "type": "text", + "content": "multiple polarimetric quantities concurrently." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 241, + 547, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 241, + 547, + 588 + ], + "spans": [ + { + "bbox": [ + 304, + 241, + 547, + 588 + ], + "type": "text", + "content": "We propose a new architecture for neural scene representations that can effectively reconstruct polarimetric images without artifacts. Our model reconstructs color images accurately while also ensuring the quality of three important polarimetric quantities, the degree " + }, + { + "bbox": [ + 304, + 241, + 547, + 588 + ], + "type": "inline_equation", + "content": "(\\rho)" + }, + { + "bbox": [ + 304, + 241, + 547, + 588 + ], + "type": "text", + "content": " and angle " + }, + { + "bbox": [ + 304, + 241, + 547, + 588 + ], + "type": "inline_equation", + "content": "(\\phi)" + }, + { + "bbox": [ + 304, + 241, + 547, + 588 + ], + "type": "text", + "content": " of linear polarization (DoLP and AoLP), and the unpolarized intensity " + }, + { + "bbox": [ + 304, + 241, + 547, + 588 + ], + "type": "inline_equation", + "content": "I_{un}" + }, + { + "bbox": [ + 304, + 241, + 547, + 588 + ], + "type": "text", + "content": ". This information is generally captured using images of a scene taken through linear polarizing filters at four different angles. Instead of learning a representation of these images, our model operates directly on the DoLP, AoLP and unpolarized intensity maps. When learning to fit these images, current coordinate network architectures produce artifacts in the predicted DoLP and unpolarized intensity maps. To alleviate this issue, we take inspiration from traditional image compression techniques and fit images using their singular value decompositions. Images can be compressed by reconstructing them using only a subset of their singular values [28]. By utilizing different, non-overlapping sets of singular values to reconstruct an image, the original image can be recovered by summing the individual reconstructions together. Our model is supervised in a coarse-to-fine manner, which helps the model to represent both the low and high frequency details present in maps of polarimetric quantities without introducint noise or tiling artifacts. A demonstration of the efficacy our model can be seen in Fig. 1 and Table 1. Furthermore, our model is capable of representing images at varying levels of detail, creating a tradeoff between performance and model size without retraining." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 598, + 394, + 610 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 598, + 394, + 610 + ], + "spans": [ + { + "bbox": [ + 306, + 598, + 394, + 610 + ], + "type": "text", + "content": "1.1. Contributions" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 318, + 618, + 533, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 618, + 533, + 629 + ], + "spans": [ + { + "bbox": [ + 318, + 618, + 533, + 629 + ], + "type": "text", + "content": "To summarize, the contributions of our work include:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 317, + 642, + 545, + 712 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 317, + 642, + 545, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 642, + 545, + 665 + ], + "spans": [ + { + "bbox": [ + 317, + 642, + 545, + 665 + ], + "type": "text", + "content": "- a coordinate network architecture for neural scene representations of polarimetric images;" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 317, + 666, + 545, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 666, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 317, + 666, + 545, + 712 + ], + "type": "text", + "content": "- a training strategy for our network which learns a series of representations using different sets of singular values, allowing for a trade-off between performance and model size without retraining;" + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 702, + 126, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 702, + 126, + 712 + ], + "spans": [ + { + "bbox": [ + 58, + 702, + 126, + 712 + ], + "type": "text", + "content": "*Equal contribution." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "16579" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 66, + 69, + 157, + 161 + ], + "blocks": [ + { + "bbox": [ + 66, + 69, + 157, + 161 + ], + "lines": [ + { + "bbox": [ + 66, + 69, + 157, + 161 + ], + "spans": [ + { + "bbox": [ + 66, + 69, + 157, + 161 + ], + "type": "image", + "image_path": "02728a7bd97bf70d83e908f9c7d4024761e0668f0685986a3e4b5e84b089a572.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 66, + 162, + 156, + 277 + ], + "blocks": [ + { + "bbox": [ + 66, + 162, + 156, + 277 + ], + "lines": [ + { + "bbox": [ + 66, + 162, + 156, + 277 + ], + "spans": [ + { + "bbox": [ + 66, + 162, + 156, + 277 + ], + "type": "image", + "image_path": "a723e30dcd4a5344d5b0f93b1f6f63fd91913e15137a08d35fa2997b2ec60923.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 281, + 118, + 289 + ], + "lines": [ + { + "bbox": [ + 104, + 281, + 118, + 289 + ], + "spans": [ + { + "bbox": [ + 104, + 281, + 118, + 289 + ], + "type": "text", + "content": "GT" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 159, + 70, + 250, + 161 + ], + "blocks": [ + { + "bbox": [ + 159, + 70, + 250, + 161 + ], + "lines": [ + { + "bbox": [ + 159, + 70, + 250, + 161 + ], + "spans": [ + { + "bbox": [ + 159, + 70, + 250, + 161 + ], + "type": "image", + "image_path": "6ec418072ba275e19e442ec3ff2af11448aa68989cdba9ddf7748a1d2ef590e8.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 160, + 161, + 249, + 277 + ], + "blocks": [ + { + "bbox": [ + 160, + 161, + 249, + 277 + ], + "lines": [ + { + "bbox": [ + 160, + 161, + 249, + 277 + ], + "spans": [ + { + "bbox": [ + 160, + 161, + 249, + 277 + ], + "type": "image", + "image_path": "698fde1817b754765f8fdf902b1ca60cafd29d67dd04f421813d2bf59ccb134b.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 181, + 280, + 227, + 291 + ], + "lines": [ + { + "bbox": [ + 181, + 280, + 227, + 291 + ], + "spans": [ + { + "bbox": [ + 181, + 280, + 227, + 291 + ], + "type": "text", + "content": "SIREN [52]" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 251, + 70, + 342, + 161 + ], + "blocks": [ + { + "bbox": [ + 251, + 70, + 342, + 161 + ], + "lines": [ + { + "bbox": [ + 251, + 70, + 342, + 161 + ], + "spans": [ + { + "bbox": [ + 251, + 70, + 342, + 161 + ], + "type": "image", + "image_path": "dbb35906ed7f177af3fc44779e4ae46970f959ec5c25a271294599f1fb91f5b3.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 252, + 162, + 341, + 277 + ], + "blocks": [ + { + "bbox": [ + 252, + 162, + 341, + 277 + ], + "lines": [ + { + "bbox": [ + 252, + 162, + 341, + 277 + ], + "spans": [ + { + "bbox": [ + 252, + 162, + 341, + 277 + ], + "type": "image", + "image_path": "7184b56b20ddc195b635ac1241896d3248b20ea336cfe88437f2cff2a537304e.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 272, + 281, + 321, + 291 + ], + "lines": [ + { + "bbox": [ + 272, + 281, + 321, + 291 + ], + "spans": [ + { + "bbox": [ + 272, + 281, + 321, + 291 + ], + "type": "text", + "content": "ACORN [34]" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 343, + 70, + 435, + 161 + ], + "blocks": [ + { + "bbox": [ + 343, + 70, + 435, + 161 + ], + "lines": [ + { + "bbox": [ + 343, + 70, + 435, + 161 + ], + "spans": [ + { + "bbox": [ + 343, + 70, + 435, + 161 + ], + "type": "image", + "image_path": "c9e9aa7da74cec2789601569d4debbc3190a5d1f93f3ddb1fadad321e05326dd.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 344, + 162, + 434, + 277 + ], + "blocks": [ + { + "bbox": [ + 344, + 162, + 434, + 277 + ], + "lines": [ + { + "bbox": [ + 344, + 162, + 434, + 277 + ], + "spans": [ + { + "bbox": [ + 344, + 162, + 434, + 277 + ], + "type": "image", + "image_path": "d01da4bf678750ed3a2f92a7850c772288c407bb76790b460574d26e49c6fb4a.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 370, + 281, + 408, + 290 + ], + "lines": [ + { + "bbox": [ + 370, + 281, + 408, + 290 + ], + "spans": [ + { + "bbox": [ + 370, + 281, + 408, + 290 + ], + "type": "text", + "content": "ReLU P.E" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 46, + 301, + 547, + 335 + ], + "lines": [ + { + "bbox": [ + 46, + 301, + 547, + 335 + ], + "spans": [ + { + "bbox": [ + 46, + 301, + 547, + 335 + ], + "type": "text", + "content": "Figure 1. Our model reconstructs the training scene more accurately than other architectures. Our model does not have the noise pattern present in reconstructions from SIREN [52] or a ReLU MLP with positional encoding [38], nor does it show tiling artifacts as in ACORN's [34] prediction." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 437, + 70, + 528, + 161 + ], + "blocks": [ + { + "bbox": [ + 437, + 70, + 528, + 161 + ], + "lines": [ + { + "bbox": [ + 437, + 70, + 528, + 161 + ], + "spans": [ + { + "bbox": [ + 437, + 70, + 528, + 161 + ], + "type": "image", + "image_path": "2e6ce86ef45c7a0f29ff4a6718fc698b4411a77fb7ec4a74bec36760446ac21c.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 437, + 162, + 526, + 277 + ], + "blocks": [ + { + "bbox": [ + 437, + 162, + 526, + 277 + ], + "lines": [ + { + "bbox": [ + 437, + 162, + 526, + 277 + ], + "spans": [ + { + "bbox": [ + 437, + 162, + 526, + 277 + ], + "type": "image", + "image_path": "736438e7bbd7e9e917f46f446a2213d208cd203a763e3145224b1ba67865d43a.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 473, + 281, + 492, + 290 + ], + "lines": [ + { + "bbox": [ + 473, + 281, + 492, + 290 + ], + "spans": [ + { + "bbox": [ + 473, + 281, + 492, + 290 + ], + "type": "text", + "content": "Ours" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "bbox": [ + 59, + 356, + 287, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 356, + 287, + 392 + ], + "spans": [ + { + "bbox": [ + 59, + 356, + 287, + 392 + ], + "type": "text", + "content": "- results demonstrating that our model reconstructs maps of polarimetric quantities without the artifacts created by current state-of-the-art approaches." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 47, + 407, + 132, + 420 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 407, + 132, + 420 + ], + "spans": [ + { + "bbox": [ + 47, + 407, + 132, + 420 + ], + "type": "text", + "content": "2. Related work" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 47, + 429, + 205, + 442 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 429, + 205, + 442 + ], + "spans": [ + { + "bbox": [ + 47, + 429, + 205, + 442 + ], + "type": "text", + "content": "2.1. Neural scene representations" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 46, + 449, + 287, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 449, + 287, + 568 + ], + "spans": [ + { + "bbox": [ + 46, + 449, + 287, + 568 + ], + "type": "text", + "content": "The aim of neural scene representations is to parameterize a two or three dimensional scene in the weights of a neural network in order to accomplish some other task related to the scene. Most papers fall into one of three categories. Explicit representations model the scene directly, which allows them to quickly accomplish tasks such as scene reconstruction [9, 31], novel view synthesis [4, 17, 22, 36, 37, 41, 50, 53, 56] or relighting [60]. However, since the scene is modelled explicitly, these representations require more memory than the alternatives." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 46, + 570, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 570, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 570, + 288, + 714 + ], + "type": "text", + "content": "Implicit representations do not model the scene directly, but instead use an MLP to map from a coordinate in either 2D or 3D space to some desired output value. This value could be the observed radiance or pixel intensity [11, 16, 38, 44, 46, 54], occupancy of a pixel or voxel [35, 45], a quantity related to shape [5, 10, 13, 18, 19, 21, 23, 26, 30, 44, 47, 54, 58], or any other quantity of interest. The final category of neural scene representations is a hybrid of the first two. The only work that fits directly into this category is ACORN [34], which accomplishes state-of-the-art performance on image and volume fitting by combining a coordinate network with an explicit grid or voxel represent-" + } + ] + } + ], + "index": 20 + }, + { + "type": "table", + "bbox": [ + 307, + 354, + 537, + 423 + ], + "blocks": [ + { + "bbox": [ + 307, + 354, + 537, + 423 + ], + "lines": [ + { + "bbox": [ + 307, + 354, + 537, + 423 + ], + "spans": [ + { + "bbox": [ + 307, + 354, + 537, + 423 + ], + "type": "table", + "html": "
ModelNoise PatternTiling ArtifactsResizing Artifacts
ACORN [34]MediumHighNot Supported
ReLU w/P.E. [38]MediumNoneYes
SIREN [52]HighNoneYes
ProposedMinimalNoneMinimal
", + "image_path": "f1566f989fce43094b50570770020803e1534e5f266cca02bf288e74717dc1cf.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "table_body" + } + ], + "index": 21 + }, + { + "bbox": [ + 304, + 431, + 547, + 498 + ], + "lines": [ + { + "bbox": [ + 304, + 431, + 547, + 498 + ], + "spans": [ + { + "bbox": [ + 304, + 431, + 547, + 498 + ], + "type": "text", + "content": "Table 1. Our model shows fewer artifacts than current state-of-the-art architectures. Since ACORN divides an image into a discrete grid, in order to query an image at a different resolution it is necessary to also reform the grid. The grid is created online during training, so it is not feasible to query a model at a different resolution without retraining." + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 304, + 518, + 545, + 554 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 518, + 545, + 554 + ], + "spans": [ + { + "bbox": [ + 304, + 518, + 545, + 554 + ], + "type": "text", + "content": "tation. Similar to ACORN, other works divide the scene into local regions and learn each of these regions implicitly [10,26,49]." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 304, + 555, + 546, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 555, + 546, + 662 + ], + "spans": [ + { + "bbox": [ + 304, + 555, + 546, + 662 + ], + "type": "text", + "content": "To our knowledge, this work is the first to highlight the artifacts caused by existing neural scene representation architectures when fitting polarimetric data. While we are one of the first works to examine polarization and neural scene representations in the same context, we would like to acknowledge that PANDORA [14], a concurrent work, also utilizes polarization and neural scene representations. However, they focus on radiance decomposition rather than 2D reconstruction." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 306, + 670, + 416, + 682 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 670, + 416, + 682 + ], + "spans": [ + { + "bbox": [ + 306, + 670, + 416, + 682 + ], + "type": "text", + "content": "2.2. Polarization vision" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 306, + 689, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 689, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 689, + 547, + 713 + ], + "type": "text", + "content": "Polarization is useful in a variety of computer vision tasks. It can be used to estimate surface normals [1, 2," + } + ] + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "16580" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 228 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 228 + ], + "type": "text", + "content": "7, 15, 24, 25, 33, 39, 43] or refine depth maps to represent incredibly fine details [27]. It can be used in radiometric calibration [55], dynamic interferometry [32], facial reconstruction [6] and separation of diffuse and specular reflection [39, 42]. It also can be used to remove the effects of scattering media like haze [51, 57, 61] and water [57], to augment the performance of computer vision tasks in the presence of transparent objects [12, 29, 40], or even to assist in imaging objects in space [20]. Traditionally, polarimetric data is captured by rotating a linear polarizing filter in front of a camera [3, 59], but recent advances in machine vision have produced cameras that can capture multiple polar images in a single shot." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 231, + 288, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 231, + 288, + 281 + ], + "spans": [ + { + "bbox": [ + 46, + 231, + 288, + 281 + ], + "type": "text", + "content": "Our work uses a neural network to accurately parameterize polarimetric information captured from a scene. This allows for easier storage and transport of polarimetric data and facilitates its use in other deep learning based tasks." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 299, + 104, + 311 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 299, + 104, + 311 + ], + "spans": [ + { + "bbox": [ + 47, + 299, + 104, + 311 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 321, + 164, + 335 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 321, + 164, + 335 + ], + "spans": [ + { + "bbox": [ + 47, + 321, + 164, + 335 + ], + "type": "text", + "content": "3.1. Polarization physics" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 343, + 289, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 343, + 289, + 510 + ], + "spans": [ + { + "bbox": [ + 46, + 343, + 289, + 510 + ], + "type": "text", + "content": "Polarized light can be modelled as a sine wave, and can thus be parameterized by three quantities. The degree of linear polarization (DoLP) is a quantity between 0 and 1 that represents how much of the total intensity of the wave is polarized and unpolarized. Completely polarized light will have a DoLP of 1, and completely unpolarized light will have a DoLP of 0. The angle of linear polarization (AoLP) corresponds to the orientation of the plane in which the wave is oscillating. The AoLP takes values from 0 to " + }, + { + "bbox": [ + 46, + 343, + 289, + 510 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 46, + 343, + 289, + 510 + ], + "type": "text", + "content": " radians. The final quantity of interest is the unpolarized intensity, " + }, + { + "bbox": [ + 46, + 343, + 289, + 510 + ], + "type": "inline_equation", + "content": "I_{un}" + }, + { + "bbox": [ + 46, + 343, + 289, + 510 + ], + "type": "text", + "content": ", of the wave, which corresponds to its amplitude. With these three quantities, it is possible to render a scene as viewed through a linear polarization filter at any angle using the following equation:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 85, + 526, + 287, + 540 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 526, + 287, + 540 + ], + "spans": [ + { + "bbox": [ + 85, + 526, + 287, + 540 + ], + "type": "interline_equation", + "content": "I \\left(\\phi_ {p o l}\\right) = I _ {u n} \\left(1 + \\rho c o s \\left(2 \\left(\\phi - \\phi_ {p o l}\\right)\\right)\\right), \\tag {1}", + "image_path": "62fb5385d58f1ff5398b273b1653208a5e09d8247bb2ec2fd295defc4824f3a5.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 555, + 287, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 555, + 287, + 639 + ], + "spans": [ + { + "bbox": [ + 46, + 555, + 287, + 639 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 555, + 287, + 639 + ], + "type": "inline_equation", + "content": "I_{un}" + }, + { + "bbox": [ + 46, + 555, + 287, + 639 + ], + "type": "text", + "content": " denotes unpolarized intensity, " + }, + { + "bbox": [ + 46, + 555, + 287, + 639 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 46, + 555, + 287, + 639 + ], + "type": "text", + "content": " denotes DoLP, " + }, + { + "bbox": [ + 46, + 555, + 287, + 639 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 46, + 555, + 287, + 639 + ], + "type": "text", + "content": " denotes AoLP and " + }, + { + "bbox": [ + 46, + 555, + 287, + 639 + ], + "type": "inline_equation", + "content": "\\phi_{pol}" + }, + { + "bbox": [ + 46, + 555, + 287, + 639 + ], + "type": "text", + "content": " denotes the desired filter angle at each pixel. This equation allows us to render images under any number of filter angles by saving only three quantities per pixel. In this paper we leverage the above equation to learn a representation for just these quantities, rather than the four original images." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 641, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 641, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 641, + 288, + 714 + ], + "type": "text", + "content": "The DoLP " + }, + { + "bbox": [ + 46, + 641, + 288, + 714 + ], + "type": "inline_equation", + "content": "(\\rho)" + }, + { + "bbox": [ + 46, + 641, + 288, + 714 + ], + "type": "text", + "content": " and AoLP " + }, + { + "bbox": [ + 46, + 641, + 288, + 714 + ], + "type": "inline_equation", + "content": "(\\phi)" + }, + { + "bbox": [ + 46, + 641, + 288, + 714 + ], + "type": "text", + "content": " have uses beyond just rendering images. In the shape from polarization problem, these quantities are used to calculate the zenith and azimuth angles, respectively, of per-pixel surface normals. This relationship has been studied in previous work [1, 7]. Specifically, the azimuth angle, " + }, + { + "bbox": [ + 46, + 641, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\theta_{a}" + }, + { + "bbox": [ + 46, + 641, + 288, + 714 + ], + "type": "text", + "content": ", of a surface normal can be" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 305, + 72, + 480, + 85 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 72, + 480, + 85 + ], + "spans": [ + { + "bbox": [ + 305, + 72, + 480, + 85 + ], + "type": "text", + "content": "calculated from the following relationship:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 315, + 95, + 545, + 135 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 95, + 545, + 135 + ], + "spans": [ + { + "bbox": [ + 315, + 95, + 545, + 135 + ], + "type": "interline_equation", + "content": "\\phi = \\left\\{ \\begin{array}{l} \\theta_ {a}, \\text {w h e n d i f f u s e r e f l e c t i o n d o m i n a t e s} \\\\ \\theta_ {a} - \\frac {\\pi}{2}, \\text {w h e n s p e c u l a r r e f l e c t i o n d o m i n a t e s} \\end{array} \\right. \\tag {2}", + "image_path": "7a454155970e2b79f9a72fe0033f11083b5855022e77f6ed0fd831c683dc4a85.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 137, + 545, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 137, + 545, + 172 + ], + "spans": [ + { + "bbox": [ + 305, + 137, + 545, + 172 + ], + "type": "text", + "content": "DoLP, " + }, + { + "bbox": [ + 305, + 137, + 545, + 172 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 305, + 137, + 545, + 172 + ], + "type": "text", + "content": ", is related to the zenith angle, " + }, + { + "bbox": [ + 305, + 137, + 545, + 172 + ], + "type": "inline_equation", + "content": "\\theta_z" + }, + { + "bbox": [ + 305, + 137, + 545, + 172 + ], + "type": "text", + "content": ", in terms of the refractive index, " + }, + { + "bbox": [ + 305, + 137, + 545, + 172 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 305, + 137, + 545, + 172 + ], + "type": "text", + "content": ", of a surface. When diffuse reflection is dominant, the relationship can be written as:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 318, + 182, + 545, + 205 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 182, + 545, + 205 + ], + "spans": [ + { + "bbox": [ + 318, + 182, + 545, + 205 + ], + "type": "interline_equation", + "content": "\\rho = \\frac {\\left(n - \\frac {1}{n}\\right) ^ {2} \\sin^ {2} \\left(\\theta_ {z}\\right)}{2 + 2 n ^ {2} - \\left(n - \\frac {1}{n}\\right) ^ {2} \\sin^ {2} \\left(\\theta_ {z}\\right) + 4 \\cos \\left(\\theta_ {z}\\right) \\sqrt {n ^ {2} - \\sin^ {2} \\left(\\theta_ {z}\\right)}}. \\tag {3}", + "image_path": "9a588ec6d53ae8dbde174da0f108a1523ec2cc206e391767de413b2fff65444a.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 213, + 545, + 237 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 213, + 545, + 237 + ], + "spans": [ + { + "bbox": [ + 305, + 213, + 545, + 237 + ], + "type": "text", + "content": "When specular reflection dominates, the relationship is different:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 322, + 247, + 545, + 281 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 322, + 247, + 545, + 281 + ], + "spans": [ + { + "bbox": [ + 322, + 247, + 545, + 281 + ], + "type": "interline_equation", + "content": "\\rho = \\frac {2 \\sin^ {2} (\\theta_ {z}) \\cos (\\theta_ {z}) \\sqrt {n ^ {2} - \\sin^ {2} (\\theta_ {z})}}{n ^ {2} - \\sin^ {2} (\\theta_ {z}) - n ^ {2} \\sin^ {2} (\\theta_ {z}) + 2 \\sin^ {4} (\\theta_ {z})}. \\qquad (4)", + "image_path": "bcfe715e9757a5b2e155db449289217e12d3b9edd646c9d76e362d77bfa5625c.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 290, + 545, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 290, + 545, + 422 + ], + "spans": [ + { + "bbox": [ + 305, + 290, + 545, + 422 + ], + "type": "inline_equation", + "content": "\\rho, \\phi" + }, + { + "bbox": [ + 305, + 290, + 545, + 422 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 290, + 545, + 422 + ], + "type": "inline_equation", + "content": "I_{un}" + }, + { + "bbox": [ + 305, + 290, + 545, + 422 + ], + "type": "text", + "content": " can be calculated directly from a vector known as the Stokes vector at each pixel. This vector has four elements. The first three elements deal with the linear polarization of light, and the final one represents the circular polarization of the wave. In this paper we will focus on linear polarization. To measure the Stokes vector of a scene, at least three images are needed, taken through linear polarizing filters at 0, 45 and 90 degrees. Since the camera used in our setup also captures an image with a filter at 135 degrees, we use four images in our calculations of the Stokes vectors for robustness to noise." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 431, + 462, + 443 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 431, + 462, + 443 + ], + "spans": [ + { + "bbox": [ + 306, + 431, + 462, + 443 + ], + "type": "text", + "content": "3.2. Learning from coarse to fine" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 450, + 545, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 450, + 545, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 450, + 545, + 715 + ], + "type": "text", + "content": "Current coordinate network architectures produce artifacts when fitting polarimetric images. SIREN [52] and similar architectures treat every coordinate equally when training, and they produce noise patterns in the resulting images when the spatial frequencies present in the training data differ widely (eg. the maximum magnitude frequency differs by an order of magnitude). In the polarimetric images we obtained, we found the maximum frequency magnitude of some AoLP maps was around " + }, + { + "bbox": [ + 304, + 450, + 545, + 715 + ], + "type": "inline_equation", + "content": "10^{7}" + }, + { + "bbox": [ + 304, + 450, + 545, + 715 + ], + "type": "text", + "content": ", while the maximum magnitude for the intensity image was only around " + }, + { + "bbox": [ + 304, + 450, + 545, + 715 + ], + "type": "inline_equation", + "content": "10^{6}" + }, + { + "bbox": [ + 304, + 450, + 545, + 715 + ], + "type": "text", + "content": ". ACORN [34] does not treat each coordinate in the same way, but its dynamic tiling strategy looks for regions of low variance in order to create larger blocks. This is difficult to do when attempting to fit multiple images containing varying frequencies. The resulting reconstructions end up looking blocky, and fine detail is lost in the process. Our method removes these artifacts by learning image representations using their singular value decompositions. One idea to help in reconstructing high frequency details could be to use an image's Fourier decomposition. We found that in practice the SVD works better for our use case. This is due to the propagation of errors during the forward and inverse" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "16581" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 51, + 76, + 545, + 273 + ], + "blocks": [ + { + "bbox": [ + 51, + 76, + 545, + 273 + ], + "lines": [ + { + "bbox": [ + 51, + 76, + 545, + 273 + ], + "spans": [ + { + "bbox": [ + 51, + 76, + 545, + 273 + ], + "type": "image", + "image_path": "61c05bfe9d55edf34492a8775d5f5cae962d9566528b3196c889b8f7ef74ed6b.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 282, + 547, + 339 + ], + "lines": [ + { + "bbox": [ + 46, + 282, + 547, + 339 + ], + "spans": [ + { + "bbox": [ + 46, + 282, + 547, + 339 + ], + "type": "text", + "content": "Figure 2. pCON learns to fit an image by learning a series of reconstructions with different singular values. The model is organized into a series of " + }, + { + "bbox": [ + 46, + 282, + 547, + 339 + ], + "type": "inline_equation", + "content": "n_b" + }, + { + "bbox": [ + 46, + 282, + 547, + 339 + ], + "type": "text", + "content": " parallel MLPs (denoted here as " + }, + { + "bbox": [ + 46, + 282, + 547, + 339 + ], + "type": "inline_equation", + "content": "g_i" + }, + { + "bbox": [ + 46, + 282, + 547, + 339 + ], + "type": "text", + "content": ") with sine activations. A 2D coordinate vector representing a point on an image is passed through all bands separately (" + }, + { + "bbox": [ + 46, + 282, + 547, + 339 + ], + "type": "inline_equation", + "content": "g_0" + }, + { + "bbox": [ + 46, + 282, + 547, + 339 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 46, + 282, + 547, + 339 + ], + "type": "inline_equation", + "content": "g_n" + }, + { + "bbox": [ + 46, + 282, + 547, + 339 + ], + "type": "text", + "content": "). To supervise the training of each band, we reconstruct the full image maps of each quantity, and then calculate the MSE between the model prediction, " + }, + { + "bbox": [ + 46, + 282, + 547, + 339 + ], + "type": "inline_equation", + "content": "\\hat{y}_i" + }, + { + "bbox": [ + 46, + 282, + 547, + 339 + ], + "type": "text", + "content": " and their respective ground truth values, " + }, + { + "bbox": [ + 46, + 282, + 547, + 339 + ], + "type": "inline_equation", + "content": "y_i" + }, + { + "bbox": [ + 46, + 282, + 547, + 339 + ], + "type": "text", + "content": ", at the input coordinate. The final output is the sum of all the intermediate reconstructions, which yields a set of images similar to the training data." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 358, + 289, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 358, + 289, + 443 + ], + "spans": [ + { + "bbox": [ + 46, + 358, + 289, + 443 + ], + "type": "text", + "content": "Fourier transforms. The SVD does not require shifting between the spatial and frequency domains, which allows errors to propagate less than if we were supervising on Fourier frequencies. The singular value decomposition of an " + }, + { + "bbox": [ + 46, + 358, + 289, + 443 + ], + "type": "inline_equation", + "content": "m \\times n" + }, + { + "bbox": [ + 46, + 358, + 289, + 443 + ], + "type": "text", + "content": " matrix " + }, + { + "bbox": [ + 46, + 358, + 289, + 443 + ], + "type": "inline_equation", + "content": "\\mathbf{A}" + }, + { + "bbox": [ + 46, + 358, + 289, + 443 + ], + "type": "text", + "content": " is a set of matrices " + }, + { + "bbox": [ + 46, + 358, + 289, + 443 + ], + "type": "inline_equation", + "content": "\\mathbf{U} \\in \\mathbb{R}^{m \\times m}" + }, + { + "bbox": [ + 46, + 358, + 289, + 443 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 358, + 289, + 443 + ], + "type": "inline_equation", + "content": "\\boldsymbol{\\Sigma} \\in \\mathbb{R}^{m \\times n}" + }, + { + "bbox": [ + 46, + 358, + 289, + 443 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 358, + 289, + 443 + ], + "type": "inline_equation", + "content": "\\mathbf{V}^{\\top} \\in \\mathbb{R}^{n \\times n}" + }, + { + "bbox": [ + 46, + 358, + 289, + 443 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 46, + 358, + 289, + 443 + ], + "type": "inline_equation", + "content": "\\mathbf{A} = \\mathbf{U} \\boldsymbol{\\Sigma} \\mathbf{V}^{\\top}" + }, + { + "bbox": [ + 46, + 358, + 289, + 443 + ], + "type": "text", + "content": ". This matrix product can be further decomposed:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 62, + 449, + 287, + 515 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 449, + 287, + 515 + ], + "spans": [ + { + "bbox": [ + 62, + 449, + 287, + 515 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathbf {U} \\boldsymbol {\\Sigma} \\mathbf {V} ^ {\\top} = \\sum_ {i} ^ {r} \\sigma_ {i} \\mathbf {u} _ {i} \\mathbf {v} _ {i} ^ {\\top} \\tag {5} \\\\ = \\sum_ {i = 0} ^ {a _ {1}} \\sigma_ {i} \\mathbf {u} _ {i} \\mathbf {v} _ {i} ^ {\\top} + \\sum_ {i = a _ {1}} ^ {a _ {2}} \\sigma_ {i} \\mathbf {u} _ {i} \\mathbf {v} _ {i} ^ {\\top} + \\dots + \\sum_ {i = a _ {n}} ^ {r} \\sigma_ {i} \\mathbf {u} _ {i} \\mathbf {v} _ {i} ^ {\\top} \\\\ \\end{array}", + "image_path": "89e57ed0dd1a366419a11f27670e15467ab25f9cf074679f598403064709d334.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 521, + 289, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 521, + 289, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 521, + 289, + 714 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 521, + 289, + 714 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 46, + 521, + 289, + 714 + ], + "type": "text", + "content": " is the rank of " + }, + { + "bbox": [ + 46, + 521, + 289, + 714 + ], + "type": "inline_equation", + "content": "\\mathbf{A}" + }, + { + "bbox": [ + 46, + 521, + 289, + 714 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 521, + 289, + 714 + ], + "type": "inline_equation", + "content": "\\mathbf{u}_i" + }, + { + "bbox": [ + 46, + 521, + 289, + 714 + ], + "type": "text", + "content": " is the " + }, + { + "bbox": [ + 46, + 521, + 289, + 714 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 46, + 521, + 289, + 714 + ], + "type": "text", + "content": "-th column of " + }, + { + "bbox": [ + 46, + 521, + 289, + 714 + ], + "type": "inline_equation", + "content": "\\mathbf{U}" + }, + { + "bbox": [ + 46, + 521, + 289, + 714 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 521, + 289, + 714 + ], + "type": "inline_equation", + "content": "\\mathbf{v}_i" + }, + { + "bbox": [ + 46, + 521, + 289, + 714 + ], + "type": "text", + "content": " is the " + }, + { + "bbox": [ + 46, + 521, + 289, + 714 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 46, + 521, + 289, + 714 + ], + "type": "text", + "content": "-th column of " + }, + { + "bbox": [ + 46, + 521, + 289, + 714 + ], + "type": "inline_equation", + "content": "\\mathbf{V}" + }, + { + "bbox": [ + 46, + 521, + 289, + 714 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 46, + 521, + 289, + 714 + ], + "type": "inline_equation", + "content": "\\sigma_i" + }, + { + "bbox": [ + 46, + 521, + 289, + 714 + ], + "type": "text", + "content": " is the " + }, + { + "bbox": [ + 46, + 521, + 289, + 714 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 46, + 521, + 289, + 714 + ], + "type": "text", + "content": "-th singular value. In the case of an image, this means that it is possible to calculate different pieces of the decomposition individually, and then sum them to obtain the original image. We leverage this property of the SVD in our model architecture. Using just the largest singular values to reconstruct an image yields a result containing only the low frequency details of the original [28]. As more singular values are used in the reconstruction, higher frequency details are captured. A single coordinate may have features in many reconstructions, and others may have features in only a few. Our network learns a series of reconstructions in parallel, which effectively allocates more model capacity to coordinates which have details at numerous frequencies. Since we are not dividing the image into a grid like ACORN, our reconstruc" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 358, + 547, + 395 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 358, + 547, + 395 + ], + "spans": [ + { + "bbox": [ + 304, + 358, + 547, + 395 + ], + "type": "text", + "content": "tions do not suffer from tiling artifacts, and they also do not exhibit the obvious noise pattern present in reconstructions from SIREN or ReLU MLPs." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 305, + 402, + 403, + 415 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 402, + 403, + 415 + ], + "spans": [ + { + "bbox": [ + 305, + 402, + 403, + 415 + ], + "type": "text", + "content": "3.3. Network design" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 420, + 547, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 420, + 547, + 540 + ], + "spans": [ + { + "bbox": [ + 304, + 420, + 547, + 540 + ], + "type": "text", + "content": "Our network design takes inspiration from SIREN [52]. The original SIREN architecture was similar to an ordinary MLP, except that it used the sine activation function. Our network is divided into a series of " + }, + { + "bbox": [ + 304, + 420, + 547, + 540 + ], + "type": "inline_equation", + "content": "n_b" + }, + { + "bbox": [ + 304, + 420, + 547, + 540 + ], + "type": "text", + "content": " fully-connected blocks which map from a 2D input image coordinate to the AoLP " + }, + { + "bbox": [ + 304, + 420, + 547, + 540 + ], + "type": "inline_equation", + "content": "(\\phi)" + }, + { + "bbox": [ + 304, + 420, + 547, + 540 + ], + "type": "text", + "content": ", DoLP " + }, + { + "bbox": [ + 304, + 420, + 547, + 540 + ], + "type": "inline_equation", + "content": "(\\rho)" + }, + { + "bbox": [ + 304, + 420, + 547, + 540 + ], + "type": "text", + "content": " and unpolarized intensity " + }, + { + "bbox": [ + 304, + 420, + 547, + 540 + ], + "type": "inline_equation", + "content": "I_{un}" + }, + { + "bbox": [ + 304, + 420, + 547, + 540 + ], + "type": "text", + "content": " at that pixel. We call each of these MLPs a band of the network, and we will notate them as " + }, + { + "bbox": [ + 304, + 420, + 547, + 540 + ], + "type": "inline_equation", + "content": "g_i" + }, + { + "bbox": [ + 304, + 420, + 547, + 540 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 304, + 420, + 547, + 540 + ], + "type": "inline_equation", + "content": "i \\in 0,1,\\dots,n_b - 1" + }, + { + "bbox": [ + 304, + 420, + 547, + 540 + ], + "type": "text", + "content": ". To fit an image, we first take the singular value decomposition of the map of each polar quantity:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 389, + 548, + 460, + 563 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 548, + 460, + 563 + ], + "spans": [ + { + "bbox": [ + 389, + 548, + 460, + 563 + ], + "type": "interline_equation", + "content": "\\boldsymbol {\\Phi} = \\mathbf {U} _ {\\phi} \\boldsymbol {\\Sigma} _ {\\phi} \\mathbf {V} _ {\\phi} ^ {\\top},", + "image_path": "230546613ee51206e39687eb5ecefe0acc4e5d2f27796bafd1caaff456595d21.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 392, + 565, + 545, + 581 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 392, + 565, + 545, + 581 + ], + "spans": [ + { + "bbox": [ + 392, + 565, + 545, + 581 + ], + "type": "interline_equation", + "content": "\\boldsymbol {\\rho} = \\mathbf {U} _ {\\rho} \\boldsymbol {\\Sigma} _ {\\rho} \\mathbf {V} _ {\\rho} ^ {\\top}, \\tag {6}", + "image_path": "f1c096397ebf58ddfae891466d0cf1e2b9dd3ed23edca057882ceb7446c88a4f.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 385, + 582, + 466, + 597 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 385, + 582, + 466, + 597 + ], + "spans": [ + { + "bbox": [ + 385, + 582, + 466, + 597 + ], + "type": "interline_equation", + "content": "\\mathbf {I} _ {u n} = \\mathbf {U} _ {u n} \\boldsymbol {\\Sigma} _ {u n} \\mathbf {V} _ {u n} ^ {\\top}.", + "image_path": "bc1d04dfa6c7102f2dd55da607a044d1c70499aecd4289f5a2d4577c2ae2ed02.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "inline_equation", + "content": "\\Phi, \\rho" + }, + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_{un}" + }, + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "text", + "content": " represent the full image maps of AoLP " + }, + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "inline_equation", + "content": "(\\phi)" + }, + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "text", + "content": ", DoLP " + }, + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "inline_equation", + "content": "(\\rho)" + }, + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "inline_equation", + "content": "I_{un}" + }, + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "text", + "content": ", respectively. The above equations are obtained by interpreting these maps as matrices and then using Eq. (5). We now define a series of " + }, + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "inline_equation", + "content": "n_b" + }, + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "text", + "content": " thresholds for " + }, + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "inline_equation", + "content": "\\Phi, \\rho" + }, + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_{un}" + }, + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "inline_equation", + "content": "t_{\\phi,i}, t_{\\rho,i}" + }, + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "inline_equation", + "content": "t_{\\mathrm{un},i}" + }, + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "text", + "content": ", respectively. These thresholds dictate which singular values will be used to supervise each band of the network. We also define the ground truth intermediate reconstructions of each quantity using a subset of singular values as " + }, + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "inline_equation", + "content": "y_{\\phi,i}, y_{\\rho,i}" + }, + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "inline_equation", + "content": "y_{\\mathrm{un},i}" + }, + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "text", + "content": ". We denote their" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "16582" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 109 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 109 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 109 + ], + "type": "text", + "content": "corresponding predictions as " + }, + { + "bbox": [ + 46, + 72, + 287, + 109 + ], + "type": "inline_equation", + "content": "\\hat{y}_{\\phi,i}" + }, + { + "bbox": [ + 46, + 72, + 287, + 109 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 72, + 287, + 109 + ], + "type": "inline_equation", + "content": "\\hat{y}_{\\rho,i}" + }, + { + "bbox": [ + 46, + 72, + 287, + 109 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 72, + 287, + 109 + ], + "type": "inline_equation", + "content": "\\hat{y}_{\\mathrm{un},i}" + }, + { + "bbox": [ + 46, + 72, + 287, + 109 + ], + "type": "text", + "content": ". We can use Eq. (5) to decompose each of the SVDs from Eq. (6) into a set of sums. For example, we can write " + }, + { + "bbox": [ + 46, + 72, + 287, + 109 + ], + "type": "inline_equation", + "content": "\\Phi" + }, + { + "bbox": [ + 46, + 72, + 287, + 109 + ], + "type": "text", + "content": " as follows:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 114, + 287, + 148 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 114, + 287, + 148 + ], + "spans": [ + { + "bbox": [ + 106, + 114, + 287, + 148 + ], + "type": "interline_equation", + "content": "y _ {\\phi , i} = \\sum_ {j = t _ {\\phi , i - 1}} ^ {t _ {\\phi , i}} \\sigma_ {\\phi , j} \\mathbf {u} _ {\\phi , j} \\mathbf {v} _ {\\phi , j} ^ {\\top}. \\tag {7}", + "image_path": "4dd67cbb424abf663f0688619d0e483a122a0edca6a92301c081277abec66fa5.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 153, + 287, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 153, + 287, + 177 + ], + "spans": [ + { + "bbox": [ + 47, + 153, + 287, + 177 + ], + "type": "text", + "content": "The reconstructions for the other quantities can be written with their respective SVDs and thresholds similar to Eq. (7)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 177, + 287, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 177, + 287, + 201 + ], + "spans": [ + { + "bbox": [ + 47, + 177, + 287, + 201 + ], + "type": "text", + "content": "Each band learns a single reconstruction for these quantities at each pixel." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 99, + 207, + 287, + 220 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 207, + 287, + 220 + ], + "spans": [ + { + "bbox": [ + 99, + 207, + 287, + 220 + ], + "type": "interline_equation", + "content": "g _ {i} (x, y) = \\hat {y} _ {i} = (\\hat {y} _ {\\phi , i}, \\hat {y} _ {\\rho , i}, \\hat {y} _ {\\mathrm {u n}, i}). \\tag {8}", + "image_path": "8d37ced8862680ea5b5b2113a9bcf60070a5ce0faccee845e1da4af31d47691e.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 225, + 287, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 225, + 287, + 285 + ], + "spans": [ + { + "bbox": [ + 46, + 225, + 287, + 285 + ], + "type": "text", + "content": "Here, " + }, + { + "bbox": [ + 46, + 225, + 287, + 285 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 46, + 225, + 287, + 285 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 225, + 287, + 285 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 46, + 225, + 287, + 285 + ], + "type": "text", + "content": " constitute the 2D pixel coordinate vector that serves as the input to the network. This coordinate is passed through each band of the network to compute all " + }, + { + "bbox": [ + 46, + 225, + 287, + 285 + ], + "type": "inline_equation", + "content": "\\hat{y}_i" + }, + { + "bbox": [ + 46, + 225, + 287, + 285 + ], + "type": "text", + "content": ", and then the fully reconstructed image is calculated as " + }, + { + "bbox": [ + 46, + 225, + 287, + 285 + ], + "type": "inline_equation", + "content": "\\sum_{i}\\hat{y}_{i}" + }, + { + "bbox": [ + 46, + 225, + 287, + 285 + ], + "type": "text", + "content": ". See Fig. 2 for a visualization of this entire process." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 291, + 137, + 303 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 291, + 137, + 303 + ], + "spans": [ + { + "bbox": [ + 47, + 291, + 137, + 303 + ], + "type": "text", + "content": "3.4. Loss functions" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 308, + 287, + 404 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 308, + 287, + 404 + ], + "spans": [ + { + "bbox": [ + 46, + 308, + 287, + 404 + ], + "type": "text", + "content": "Our network outputs a set of " + }, + { + "bbox": [ + 46, + 308, + 287, + 404 + ], + "type": "inline_equation", + "content": "n_b" + }, + { + "bbox": [ + 46, + 308, + 287, + 404 + ], + "type": "text", + "content": " images. For each band, we compute the MSE between the cumulative sum of all outputs up to, and including, the current band. We define multiplicative factors for the three polar quantities as " + }, + { + "bbox": [ + 46, + 308, + 287, + 404 + ], + "type": "inline_equation", + "content": "\\lambda_{\\phi}" + }, + { + "bbox": [ + 46, + 308, + 287, + 404 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 308, + 287, + 404 + ], + "type": "inline_equation", + "content": "\\lambda_{\\rho}" + }, + { + "bbox": [ + 46, + 308, + 287, + 404 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 308, + 287, + 404 + ], + "type": "inline_equation", + "content": "\\lambda_{\\mathrm{un}}" + }, + { + "bbox": [ + 46, + 308, + 287, + 404 + ], + "type": "text", + "content": ". We also define factors for each band as " + }, + { + "bbox": [ + 46, + 308, + 287, + 404 + ], + "type": "inline_equation", + "content": "\\lambda_{b,i}" + }, + { + "bbox": [ + 46, + 308, + 287, + 404 + ], + "type": "text", + "content": ". The loss of the network can be calculated as follows, where " + }, + { + "bbox": [ + 46, + 308, + 287, + 404 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 46, + 308, + 287, + 404 + ], + "type": "text", + "content": " is the loss function and " + }, + { + "bbox": [ + 46, + 308, + 287, + 404 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 46, + 308, + 287, + 404 + ], + "type": "text", + "content": " is the data point for which the loss is being calculated:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 71, + 410, + 287, + 460 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 410, + 287, + 460 + ], + "spans": [ + { + "bbox": [ + 71, + 410, + 287, + 460 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} L (x) = \\sum_ {i} \\lambda_ {b, i} \\sum_ {j = 0} ^ {i} \\lambda_ {\\phi} \\left(\\hat {y} _ {\\phi , j} - y _ {\\phi , j}\\right) ^ {2} \\tag {9} \\\\ + \\lambda_ {\\rho} (\\hat {y} _ {\\rho , j} - y _ {\\rho , j}) ^ {2} + \\lambda_ {\\mathrm {u n}} (\\hat {y} _ {\\mathrm {u n}, j} - y _ {\\mathrm {u n}, j}) ^ {2}. \\\\ \\end{array}", + "image_path": "29da923095e3fad82c802831941592a2131ef3a86876620d94d564f455b950bb.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 464, + 178, + 476 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 464, + 178, + 476 + ], + "spans": [ + { + "bbox": [ + 47, + 464, + 178, + 476 + ], + "type": "text", + "content": "3.5. Implementation details" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 483, + 100, + 493 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 483, + 100, + 493 + ], + "spans": [ + { + "bbox": [ + 47, + 483, + 100, + 493 + ], + "type": "text", + "content": "3.5.1 Data" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 501, + 287, + 656 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 501, + 287, + 656 + ], + "spans": [ + { + "bbox": [ + 46, + 501, + 287, + 656 + ], + "type": "text", + "content": "We collected all of our own data using a Flir Blackfly S RGB polarization camera. From this camera's images, it is possible to calculate the desired polarimetric quantities using the physics discussed in Sec. 3.1. We release two datasets with this paper. The first contains the six scenes used to create figures in this paper. The second set contains twenty four additional scenes for use in validating our approach. The captured scenes represent a diverse set of polarization effects. The DoLP and AoLP values span the entire ranges (zero to one for DoLP and zero to pi for AoLP) of possible values. We capture interesting polarization phenomena such as transparent and reflective surfaces. All released images have a resolution of " + }, + { + "bbox": [ + 46, + 501, + 287, + 656 + ], + "type": "inline_equation", + "content": "1024 \\times 1024" + }, + { + "bbox": [ + 46, + 501, + 287, + 656 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 670, + 156, + 682 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 670, + 156, + 682 + ], + "spans": [ + { + "bbox": [ + 47, + 670, + 156, + 682 + ], + "type": "text", + "content": "3.5.2 Hyperparameters" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 46, + 689, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 689, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 689, + 287, + 713 + ], + "type": "text", + "content": "We built all models in PyTorch [48]. We began all experiments with a learning rate of " + }, + { + "bbox": [ + 46, + 689, + 287, + 713 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-5}" + }, + { + "bbox": [ + 46, + 689, + 287, + 713 + ], + "type": "text", + "content": ", and then multiplied" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 72, + 545, + 131 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 131 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 131 + ], + "type": "text", + "content": "it by 0.1 at 5000 epochs. Models were trained for a total of 10000 epochs. We also set the unitless frequency parameter " + }, + { + "bbox": [ + 304, + 72, + 545, + 131 + ], + "type": "inline_equation", + "content": "\\omega_0" + }, + { + "bbox": [ + 304, + 72, + 545, + 131 + ], + "type": "text", + "content": " of our sine activations to 90. For our best model, we used a total of 10 bands, each with 2 hidden layers and a hidden dimension of 256." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 133, + 545, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 133, + 545, + 191 + ], + "spans": [ + { + "bbox": [ + 304, + 133, + 545, + 191 + ], + "type": "text", + "content": "We chose the singular value thresholds of each band based on the sum of the magnitudes of singular values. Band one was given roughly " + }, + { + "bbox": [ + 304, + 133, + 545, + 191 + ], + "type": "inline_equation", + "content": "90\\%" + }, + { + "bbox": [ + 304, + 133, + 545, + 191 + ], + "type": "text", + "content": " of the sum, then the others " + }, + { + "bbox": [ + 304, + 133, + 545, + 191 + ], + "type": "inline_equation", + "content": "99\\%" + }, + { + "bbox": [ + 304, + 133, + 545, + 191 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 133, + 545, + 191 + ], + "type": "inline_equation", + "content": "99.9\\%" + }, + { + "bbox": [ + 304, + 133, + 545, + 191 + ], + "type": "text", + "content": ", and so on. Exact values for " + }, + { + "bbox": [ + 304, + 133, + 545, + 191 + ], + "type": "inline_equation", + "content": "\\lambda_{b_i}" + }, + { + "bbox": [ + 304, + 133, + 545, + 191 + ], + "type": "text", + "content": " used in all presented experiments can be found in the supplement." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 192, + 545, + 215 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 192, + 545, + 215 + ], + "spans": [ + { + "bbox": [ + 306, + 192, + 545, + 215 + ], + "type": "text", + "content": "For our experiments, we set " + }, + { + "bbox": [ + 306, + 192, + 545, + 215 + ], + "type": "inline_equation", + "content": "\\lambda_{\\phi} = 1.0" + }, + { + "bbox": [ + 306, + 192, + 545, + 215 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 306, + 192, + 545, + 215 + ], + "type": "inline_equation", + "content": "\\lambda_{\\rho} = 5.0" + }, + { + "bbox": [ + 306, + 192, + 545, + 215 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 306, + 192, + 545, + 215 + ], + "type": "inline_equation", + "content": "\\lambda_{\\mathrm{un}} = 5.0" + }, + { + "bbox": [ + 306, + 192, + 545, + 215 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 225, + 386, + 238 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 225, + 386, + 238 + ], + "spans": [ + { + "bbox": [ + 306, + 225, + 386, + 238 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 245, + 545, + 364 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 245, + 545, + 364 + ], + "spans": [ + { + "bbox": [ + 304, + 245, + 545, + 364 + ], + "type": "text", + "content": "In this section, we present comparisons between our model, SIREN [52], ACORN [34] and an MLP using ReLU activations and positional encoding, as used in NeRF [38]. We changed the number of parameters and output values of the baseline architectures, since originally these models were designed to fit only a single image at a time. We also changed the frequency parameter " + }, + { + "bbox": [ + 304, + 245, + 545, + 364 + ], + "type": "inline_equation", + "content": "\\omega_0" + }, + { + "bbox": [ + 304, + 245, + 545, + 364 + ], + "type": "text", + "content": " of the SIREN sine activations to 90 to match the parameter used in our own model. All our models were trained using the training strategy discussed in Sec. 3.5." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 306, + 371, + 490, + 384 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 371, + 490, + 384 + ], + "spans": [ + { + "bbox": [ + 306, + 371, + 490, + 384 + ], + "type": "text", + "content": "4.1. Validation of proposed failure case" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 389, + 545, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 389, + 545, + 556 + ], + "spans": [ + { + "bbox": [ + 304, + 389, + 545, + 556 + ], + "type": "text", + "content": "We hypothesized the reason for the poor performance of baseline models when fitting polarimetric images was due to the presence of details at high spatial frequencies in the captured AoLP maps. To validate this hypothesis, we performed low-pass filtering on AoLP maps of a scene and then fit a model on the resulting AoLP, DoLP and " + }, + { + "bbox": [ + 304, + 389, + 545, + 556 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_{un}" + }, + { + "bbox": [ + 304, + 389, + 545, + 556 + ], + "type": "text", + "content": " maps. We found a clear trend in the reconstruction quality as we filtered out higher percentages of high spatial frequencies. All models performed better when fewer high frequency details were present in the target images. This aligns with our idea that these details create difficult scenes for networks to reconstruct. For the scene in Fig. 3, the AoLP reconstruction SSIMs with different amounts of frequencies removed from the GT AoLP maps can be seen in Table 2." + } + ] + } + ], + "index": 20 + }, + { + "type": "table", + "bbox": [ + 306, + 564, + 545, + 616 + ], + "blocks": [ + { + "bbox": [ + 306, + 564, + 545, + 616 + ], + "lines": [ + { + "bbox": [ + 306, + 564, + 545, + 616 + ], + "spans": [ + { + "bbox": [ + 306, + 564, + 545, + 616 + ], + "type": "table", + "html": "
% Highest Frequencies RemovedSIREN [52]ACORN [34]ReLU P.E. [38]
0%0.600.510.63
75%0.540.800.93
80.5%0.890.970.98
93.75%0.950.990.99
", + "image_path": "de75df019bacd942c24557e9b56e723774a6dafc574818fa4c51857709e0bf47.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "table_body" + } + ], + "index": 21 + }, + { + "bbox": [ + 304, + 624, + 545, + 667 + ], + "lines": [ + { + "bbox": [ + 304, + 624, + 545, + 667 + ], + "spans": [ + { + "bbox": [ + 304, + 624, + 545, + 667 + ], + "type": "text", + "content": "Table 2. All baseline models reconstruct AoLP maps better when details at higher spatial frequencies are filtered out. This trend validates our hypothesis that images with high frequency details are more difficult for a network to reconstruct." + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 306, + 671, + 441, + 684 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 671, + 441, + 684 + ], + "spans": [ + { + "bbox": [ + 306, + 671, + 441, + 684 + ], + "type": "text", + "content": "4.2. Comparison with others" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "text", + "content": "We trained both our model and the baselines to predict AoLP " + }, + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "inline_equation", + "content": "(\\Phi)" + }, + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "text", + "content": ", DoLP " + }, + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "inline_equation", + "content": "(\\rho)" + }, + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_{un}" + }, + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "text", + "content": " maps directly. Quali" + } + ] + } + ], + "index": 24 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "16583" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 54, + 83, + 149, + 178 + ], + "blocks": [ + { + "bbox": [ + 96, + 74, + 108, + 82 + ], + "lines": [ + { + "bbox": [ + 96, + 74, + 108, + 82 + ], + "spans": [ + { + "bbox": [ + 96, + 74, + 108, + 82 + ], + "type": "text", + "content": "GT" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 54, + 83, + 149, + 178 + ], + "lines": [ + { + "bbox": [ + 54, + 83, + 149, + 178 + ], + "spans": [ + { + "bbox": [ + 54, + 83, + 149, + 178 + ], + "type": "image", + "image_path": "894ac63c8e6f5837dbd9828496ed69bf8abcee697de7b7d44226dcf7419db036.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 151, + 83, + 247, + 179 + ], + "blocks": [ + { + "bbox": [ + 179, + 74, + 219, + 83 + ], + "lines": [ + { + "bbox": [ + 179, + 74, + 219, + 83 + ], + "spans": [ + { + "bbox": [ + 179, + 74, + 219, + 83 + ], + "type": "text", + "content": "SIREN [52]" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 151, + 83, + 247, + 179 + ], + "lines": [ + { + "bbox": [ + 151, + 83, + 247, + 179 + ], + "spans": [ + { + "bbox": [ + 151, + 83, + 247, + 179 + ], + "type": "image", + "image_path": "219672bd146029b50af6bef2d4f2582e90313335177502790d4a8c0ec26fc225.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 159, + 181, + 239, + 189 + ], + "lines": [ + { + "bbox": [ + 159, + 181, + 239, + 189 + ], + "spans": [ + { + "bbox": [ + 159, + 181, + 239, + 189 + ], + "type": "text", + "content": "SSIM/PSNR: 0.60/14.32" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 249, + 83, + 345, + 179 + ], + "blocks": [ + { + "bbox": [ + 275, + 74, + 318, + 83 + ], + "lines": [ + { + "bbox": [ + 275, + 74, + 318, + 83 + ], + "spans": [ + { + "bbox": [ + 275, + 74, + 318, + 83 + ], + "type": "text", + "content": "ACORN [34]" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 249, + 83, + 345, + 179 + ], + "lines": [ + { + "bbox": [ + 249, + 83, + 345, + 179 + ], + "spans": [ + { + "bbox": [ + 249, + 83, + 345, + 179 + ], + "type": "image", + "image_path": "4b0ba5d5299bc6bb52b5e971debba80e0322daa05ac40dd28118f14ee7ffe6fb.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 257, + 181, + 337, + 189 + ], + "lines": [ + { + "bbox": [ + 257, + 181, + 337, + 189 + ], + "spans": [ + { + "bbox": [ + 257, + 181, + 337, + 189 + ], + "type": "text", + "content": "SSIM/PSNR: 0.51/15.99" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 347, + 83, + 442, + 179 + ], + "blocks": [ + { + "bbox": [ + 369, + 74, + 419, + 83 + ], + "lines": [ + { + "bbox": [ + 369, + 74, + 419, + 83 + ], + "spans": [ + { + "bbox": [ + 369, + 74, + 419, + 83 + ], + "type": "text", + "content": "ReLU P.E. [38]" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 347, + 83, + 442, + 179 + ], + "lines": [ + { + "bbox": [ + 347, + 83, + 442, + 179 + ], + "spans": [ + { + "bbox": [ + 347, + 83, + 442, + 179 + ], + "type": "image", + "image_path": "d7ec3e4ed80f1182d343c1558e0a86b338935748308aa4f646857122dca29c5c.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 354, + 181, + 435, + 189 + ], + "lines": [ + { + "bbox": [ + 354, + 181, + 435, + 189 + ], + "spans": [ + { + "bbox": [ + 354, + 181, + 435, + 189 + ], + "type": "text", + "content": "SSIM/PSNR: 0.63/17.18" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 443, + 83, + 541, + 179 + ], + "blocks": [ + { + "bbox": [ + 483, + 75, + 500, + 82 + ], + "lines": [ + { + "bbox": [ + 483, + 75, + 500, + 82 + ], + "spans": [ + { + "bbox": [ + 483, + 75, + 500, + 82 + ], + "type": "text", + "content": "Ours" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 443, + 83, + 541, + 179 + ], + "lines": [ + { + "bbox": [ + 443, + 83, + 541, + 179 + ], + "spans": [ + { + "bbox": [ + 443, + 83, + 541, + 179 + ], + "type": "image", + "image_path": "78e5251bffc2a2a040676706242845dae107efe0588ed652a9842e431f03c553.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 452, + 181, + 531, + 189 + ], + "lines": [ + { + "bbox": [ + 452, + 181, + 531, + 189 + ], + "spans": [ + { + "bbox": [ + 452, + 181, + 531, + 189 + ], + "type": "text", + "content": "SSIM/PSNR: 0.77/16.57" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 53, + 191, + 149, + 287 + ], + "blocks": [ + { + "bbox": [ + 92, + 181, + 112, + 189 + ], + "lines": [ + { + "bbox": [ + 92, + 181, + 112, + 189 + ], + "spans": [ + { + "bbox": [ + 92, + 181, + 112, + 189 + ], + "type": "text", + "content": "AoLP" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 53, + 191, + 149, + 287 + ], + "lines": [ + { + "bbox": [ + 53, + 191, + 149, + 287 + ], + "spans": [ + { + "bbox": [ + 53, + 191, + 149, + 287 + ], + "type": "image", + "image_path": "206b75d1ec28eba3527f38730b6db8ec9d66c652c4aa1ba6348f618db675bb11.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 91, + 289, + 112, + 297 + ], + "lines": [ + { + "bbox": [ + 91, + 289, + 112, + 297 + ], + "spans": [ + { + "bbox": [ + 91, + 289, + 112, + 297 + ], + "type": "text", + "content": "DoLP" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 151, + 191, + 247, + 287 + ], + "blocks": [ + { + "bbox": [ + 151, + 191, + 247, + 287 + ], + "lines": [ + { + "bbox": [ + 151, + 191, + 247, + 287 + ], + "spans": [ + { + "bbox": [ + 151, + 191, + 247, + 287 + ], + "type": "image", + "image_path": "8da038295be72acd62631f8f71f1341c349f38cb63016f8be6e9a1707236e427.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 159, + 289, + 239, + 297 + ], + "lines": [ + { + "bbox": [ + 159, + 289, + 239, + 297 + ], + "spans": [ + { + "bbox": [ + 159, + 289, + 239, + 297 + ], + "type": "text", + "content": "SSIM/PSNR: 0.73/29.83" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 249, + 191, + 345, + 287 + ], + "blocks": [ + { + "bbox": [ + 249, + 191, + 345, + 287 + ], + "lines": [ + { + "bbox": [ + 249, + 191, + 345, + 287 + ], + "spans": [ + { + "bbox": [ + 249, + 191, + 345, + 287 + ], + "type": "image", + "image_path": "9f0017eefa0a779cf8a6cd643d76b9401bc6e8e734907c866007124c1863ac2e.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 257, + 289, + 337, + 297 + ], + "lines": [ + { + "bbox": [ + 257, + 289, + 337, + 297 + ], + "spans": [ + { + "bbox": [ + 257, + 289, + 337, + 297 + ], + "type": "text", + "content": "SSIM/PSNR: 0.80/31.78" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 347, + 191, + 442, + 287 + ], + "blocks": [ + { + "bbox": [ + 347, + 191, + 442, + 287 + ], + "lines": [ + { + "bbox": [ + 347, + 191, + 442, + 287 + ], + "spans": [ + { + "bbox": [ + 347, + 191, + 442, + 287 + ], + "type": "image", + "image_path": "c53b8f4cb5579fdbebc74c94e747e74788f917aca4fc3b41b2d54fa45e445e94.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 354, + 289, + 435, + 297 + ], + "lines": [ + { + "bbox": [ + 354, + 289, + 435, + 297 + ], + "spans": [ + { + "bbox": [ + 354, + 289, + 435, + 297 + ], + "type": "text", + "content": "SSIM/PSNR: 0.79/32.06" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 443, + 191, + 541, + 287 + ], + "blocks": [ + { + "bbox": [ + 443, + 191, + 541, + 287 + ], + "lines": [ + { + "bbox": [ + 443, + 191, + 541, + 287 + ], + "spans": [ + { + "bbox": [ + 443, + 191, + 541, + 287 + ], + "type": "image", + "image_path": "4758abb3f2fc53c31d87da3a469712556e0aa964ce1cf461476be42389ff52d6.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 463, + 289, + 521, + 297 + ], + "lines": [ + { + "bbox": [ + 463, + 289, + 521, + 297 + ], + "spans": [ + { + "bbox": [ + 463, + 289, + 521, + 297 + ], + "type": "text", + "content": "SSIM: 0.82/34.56" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 54, + 299, + 149, + 414 + ], + "blocks": [ + { + "bbox": [ + 54, + 299, + 149, + 414 + ], + "lines": [ + { + "bbox": [ + 54, + 299, + 149, + 414 + ], + "spans": [ + { + "bbox": [ + 54, + 299, + 149, + 414 + ], + "type": "image", + "image_path": "a371f3204275bd6c0aaa77cee788ca9027b40fdd877be2d94b15253e4de5382b.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 151, + 299, + 247, + 414 + ], + "blocks": [ + { + "bbox": [ + 151, + 299, + 247, + 414 + ], + "lines": [ + { + "bbox": [ + 151, + 299, + 247, + 414 + ], + "spans": [ + { + "bbox": [ + 151, + 299, + 247, + 414 + ], + "type": "image", + "image_path": "26056e5fb16626ad87bea0c504957cf7d1f2c8f77583efc5c843907d85af0968.jpg" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_body" + } + ], + "index": 26 + }, + { + "type": "image", + "bbox": [ + 249, + 299, + 344, + 414 + ], + "blocks": [ + { + "bbox": [ + 249, + 299, + 344, + 414 + ], + "lines": [ + { + "bbox": [ + 249, + 299, + 344, + 414 + ], + "spans": [ + { + "bbox": [ + 249, + 299, + 344, + 414 + ], + "type": "image", + "image_path": "fb5aa89caaa1b15d862b2f0e9dd53831566d0dd71cbf26be800f07b6a036f5ff.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + } + ], + "index": 27 + }, + { + "type": "image", + "bbox": [ + 347, + 299, + 442, + 414 + ], + "blocks": [ + { + "bbox": [ + 347, + 299, + 442, + 414 + ], + "lines": [ + { + "bbox": [ + 347, + 299, + 442, + 414 + ], + "spans": [ + { + "bbox": [ + 347, + 299, + 442, + 414 + ], + "type": "image", + "image_path": "cfdb3ca828d32759100f4f68dc7ec63365cdce609436299de164a0a4be84ad20.jpg" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_body" + } + ], + "index": 28 + }, + { + "type": "image", + "bbox": [ + 443, + 299, + 539, + 414 + ], + "blocks": [ + { + "bbox": [ + 443, + 299, + 539, + 414 + ], + "lines": [ + { + "bbox": [ + 443, + 299, + 539, + 414 + ], + "spans": [ + { + "bbox": [ + 443, + 299, + 539, + 414 + ], + "type": "image", + "image_path": "54c3d7daa3605fe026e756029b47cebd0af1fdfb5af5301a2e340b3df09c10de.jpg" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_body" + } + ], + "index": 29 + }, + { + "type": "image", + "bbox": [ + 53, + 415, + 149, + 511 + ], + "blocks": [ + { + "bbox": [ + 53, + 415, + 149, + 511 + ], + "lines": [ + { + "bbox": [ + 53, + 415, + 149, + 511 + ], + "spans": [ + { + "bbox": [ + 53, + 415, + 149, + 511 + ], + "type": "image", + "image_path": "d864f0d4325f7617845db59c764cc4a668493db20c6d71fe8e8e68fb461f1388.jpg" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_body" + } + ], + "index": 30 + }, + { + "type": "image", + "bbox": [ + 151, + 415, + 247, + 511 + ], + "blocks": [ + { + "bbox": [ + 151, + 415, + 247, + 511 + ], + "lines": [ + { + "bbox": [ + 151, + 415, + 247, + 511 + ], + "spans": [ + { + "bbox": [ + 151, + 415, + 247, + 511 + ], + "type": "image", + "image_path": "8ac2ef750c0bd8d7d9a189a174af23d9ce98f8e2c538b45959dd3b63f0fcf84a.jpg" + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 159, + 513, + 239, + 521 + ], + "lines": [ + { + "bbox": [ + 159, + 513, + 239, + 521 + ], + "spans": [ + { + "bbox": [ + 159, + 513, + 239, + 521 + ], + "type": "text", + "content": "SSIM/PSNR: 0.59/26.42" + } + ] + } + ], + "index": 33, + "angle": 0, + "type": "image_caption" + } + ], + "index": 32 + }, + { + "type": "image", + "bbox": [ + 249, + 415, + 345, + 511 + ], + "blocks": [ + { + "bbox": [ + 249, + 415, + 345, + 511 + ], + "lines": [ + { + "bbox": [ + 249, + 415, + 345, + 511 + ], + "spans": [ + { + "bbox": [ + 249, + 415, + 345, + 511 + ], + "type": "image", + "image_path": "83870ba8fe998c9046d9ba6feff9bc288e91e9c58ecf5d1946a9989ca00d7288.jpg" + } + ] + } + ], + "index": 34, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 257, + 513, + 337, + 521 + ], + "lines": [ + { + "bbox": [ + 257, + 513, + 337, + 521 + ], + "spans": [ + { + "bbox": [ + 257, + 513, + 337, + 521 + ], + "type": "text", + "content": "SSIM/PSNR: 0.77/28.43" + } + ] + } + ], + "index": 35, + "angle": 0, + "type": "image_caption" + } + ], + "index": 34 + }, + { + "type": "image", + "bbox": [ + 347, + 415, + 442, + 511 + ], + "blocks": [ + { + "bbox": [ + 347, + 415, + 442, + 511 + ], + "lines": [ + { + "bbox": [ + 347, + 415, + 442, + 511 + ], + "spans": [ + { + "bbox": [ + 347, + 415, + 442, + 511 + ], + "type": "image", + "image_path": "9b07c75b56190125d26fc54b6df16a8b12f5557b37a655c0abb1afaae6dc9978.jpg" + } + ] + } + ], + "index": 36, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 354, + 513, + 435, + 521 + ], + "lines": [ + { + "bbox": [ + 354, + 513, + 435, + 521 + ], + "spans": [ + { + "bbox": [ + 354, + 513, + 435, + 521 + ], + "type": "text", + "content": "SSIM/PSNR: 0.71/29.58" + } + ] + } + ], + "index": 37, + "angle": 0, + "type": "image_caption" + } + ], + "index": 36 + }, + { + "type": "image", + "bbox": [ + 443, + 415, + 541, + 511 + ], + "blocks": [ + { + "bbox": [ + 443, + 415, + 541, + 511 + ], + "lines": [ + { + "bbox": [ + 443, + 415, + 541, + 511 + ], + "spans": [ + { + "bbox": [ + 443, + 415, + 541, + 511 + ], + "type": "image", + "image_path": "8106318c099bf3b71b3b5de39926488f31d3886d0233125d74418fc7ab2d7de5.jpg" + } + ] + } + ], + "index": 38, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 452, + 513, + 532, + 521 + ], + "lines": [ + { + "bbox": [ + 452, + 513, + 532, + 521 + ], + "spans": [ + { + "bbox": [ + 452, + 513, + 532, + 521 + ], + "type": "text", + "content": "SSIM/PSNR: 0.89/34.82" + } + ] + } + ], + "index": 39, + "angle": 0, + "type": "image_caption" + } + ], + "index": 38 + }, + { + "type": "image", + "bbox": [ + 54, + 523, + 149, + 637 + ], + "blocks": [ + { + "bbox": [ + 94, + 514, + 108, + 521 + ], + "lines": [ + { + "bbox": [ + 94, + 514, + 108, + 521 + ], + "spans": [ + { + "bbox": [ + 94, + 514, + 108, + 521 + ], + "type": "text", + "content": "Un" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 54, + 523, + 149, + 637 + ], + "lines": [ + { + "bbox": [ + 54, + 523, + 149, + 637 + ], + "spans": [ + { + "bbox": [ + 54, + 523, + 149, + 637 + ], + "type": "image", + "image_path": "8a40f8d92246985a22b1bbd5416e3c6b04bd7c1f879a9c5c96bd49ea8d92b904.jpg" + } + ] + } + ], + "index": 40, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 648, + 547, + 682 + ], + "lines": [ + { + "bbox": [ + 46, + 648, + 547, + 682 + ], + "spans": [ + { + "bbox": [ + 46, + 648, + 547, + 682 + ], + "type": "text", + "content": "Figure 3. Our model shows higher SSIM and fewer artifacts on predicted " + }, + { + "bbox": [ + 46, + 648, + 547, + 682 + ], + "type": "inline_equation", + "content": "\\Phi" + }, + { + "bbox": [ + 46, + 648, + 547, + 682 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 648, + 547, + 682 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 46, + 648, + 547, + 682 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 648, + 547, + 682 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_{un}" + }, + { + "bbox": [ + 46, + 648, + 547, + 682 + ], + "type": "text", + "content": " maps. Baseline models cause noise or tiling which is clearly visible on the checkerboard pattern on the floor, where all three quantities take large values. The artifacts are present on objects exhibiting both specular reflections, like the floor, and diffuse reflections, like the wall and doors in the background." + } + ] + } + ], + "index": 45, + "angle": 0, + "type": "image_caption" + } + ], + "index": 40 + }, + { + "type": "image", + "bbox": [ + 151, + 523, + 247, + 637 + ], + "blocks": [ + { + "bbox": [ + 151, + 523, + 247, + 637 + ], + "lines": [ + { + "bbox": [ + 151, + 523, + 247, + 637 + ], + "spans": [ + { + "bbox": [ + 151, + 523, + 247, + 637 + ], + "type": "image", + "image_path": "98d88428449630d099696dea35ad62ee492fd2d7db84c6cd0b76212e63e369e6.jpg" + } + ] + } + ], + "index": 41, + "angle": 0, + "type": "image_body" + } + ], + "index": 41 + }, + { + "type": "image", + "bbox": [ + 249, + 523, + 344, + 637 + ], + "blocks": [ + { + "bbox": [ + 249, + 523, + 344, + 637 + ], + "lines": [ + { + "bbox": [ + 249, + 523, + 344, + 637 + ], + "spans": [ + { + "bbox": [ + 249, + 523, + 344, + 637 + ], + "type": "image", + "image_path": "341859f58aaab3e72eb3cd4651d7fa10cf3bfee85d205741c9ba500ac7aee56b.jpg" + } + ] + } + ], + "index": 42, + "angle": 0, + "type": "image_body" + } + ], + "index": 42 + }, + { + "type": "image", + "bbox": [ + 347, + 523, + 442, + 637 + ], + "blocks": [ + { + "bbox": [ + 347, + 523, + 442, + 637 + ], + "lines": [ + { + "bbox": [ + 347, + 523, + 442, + 637 + ], + "spans": [ + { + "bbox": [ + 347, + 523, + 442, + 637 + ], + "type": "image", + "image_path": "f6a518f6b76776f65c51a7106d12d995e4d0ed70541d258e2b0b3198091be6c1.jpg" + } + ] + } + ], + "index": 43, + "angle": 0, + "type": "image_body" + } + ], + "index": 43 + }, + { + "type": "image", + "bbox": [ + 443, + 523, + 539, + 637 + ], + "blocks": [ + { + "bbox": [ + 443, + 523, + 539, + 637 + ], + "lines": [ + { + "bbox": [ + 443, + 523, + 539, + 637 + ], + "spans": [ + { + "bbox": [ + 443, + 523, + 539, + 637 + ], + "type": "image", + "image_path": "be7421ee9c4e38f2f692300c794cf956350e3925399473df3b793a58c41c22b4.jpg" + } + ] + } + ], + "index": 44, + "angle": 0, + "type": "image_body" + } + ], + "index": 44 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "type": "text", + "content": "16584" + } + ] + } + ], + "index": 46 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 66, + 71, + 529, + 275 + ], + "blocks": [ + { + "bbox": [ + 66, + 71, + 529, + 275 + ], + "lines": [ + { + "bbox": [ + 66, + 71, + 529, + 275 + ], + "spans": [ + { + "bbox": [ + 66, + 71, + 529, + 275 + ], + "type": "image", + "image_path": "e67bcc94689dddb038c7f79dcb9e3bb084b487f258ce89580cb074e29b0004d6.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 285, + 547, + 319 + ], + "lines": [ + { + "bbox": [ + 46, + 285, + 547, + 319 + ], + "spans": [ + { + "bbox": [ + 46, + 285, + 547, + 319 + ], + "type": "text", + "content": "Figure 4. Our model can more accurately reconstruct RGB images taken through different polarizing filter angles when compared to SIREN [52], ACORN [34] and a ReLU MLP [38] with positional encoding. The images reconstructed here are the scene as viewed through a linear polarizer oriented at " + }, + { + "bbox": [ + 46, + 285, + 547, + 319 + ], + "type": "inline_equation", + "content": "0^{\\circ}" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 50, + 337, + 108, + 393 + ], + "blocks": [ + { + "bbox": [ + 50, + 337, + 108, + 393 + ], + "lines": [ + { + "bbox": [ + 50, + 337, + 108, + 393 + ], + "spans": [ + { + "bbox": [ + 50, + 337, + 108, + 393 + ], + "type": "image", + "image_path": "66d00254f3228d5be010764d4712f8d1062b608523cc0f37c91b6b3e0ac3eaa8.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 51, + 395, + 107, + 449 + ], + "blocks": [ + { + "bbox": [ + 51, + 395, + 107, + 449 + ], + "lines": [ + { + "bbox": [ + 51, + 395, + 107, + 449 + ], + "spans": [ + { + "bbox": [ + 51, + 395, + 107, + 449 + ], + "type": "image", + "image_path": "e6a40800a18645bf213449b0c15d773fb744e8b6d83c4b2f53a38cb261f74753.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 51, + 449, + 107, + 505 + ], + "blocks": [ + { + "bbox": [ + 51, + 449, + 107, + 505 + ], + "lines": [ + { + "bbox": [ + 51, + 449, + 107, + 505 + ], + "spans": [ + { + "bbox": [ + 51, + 449, + 107, + 505 + ], + "type": "image", + "image_path": "31487e577aaf55b395f5f8d380eb899fad8d8d5345f20e470895f563cdf1ea7d.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 72, + 506, + 85, + 515 + ], + "lines": [ + { + "bbox": [ + 72, + 506, + 85, + 515 + ], + "spans": [ + { + "bbox": [ + 72, + 506, + 85, + 515 + ], + "type": "text", + "content": "GT" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 110, + 337, + 167, + 393 + ], + "blocks": [ + { + "bbox": [ + 110, + 337, + 167, + 393 + ], + "lines": [ + { + "bbox": [ + 110, + 337, + 167, + 393 + ], + "spans": [ + { + "bbox": [ + 110, + 337, + 167, + 393 + ], + "type": "image", + "image_path": "c84cf0120ade6da08702c7a070e1ef93891f5e4b3a5bd60618f42cb6112b85de.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 110, + 394, + 167, + 449 + ], + "blocks": [ + { + "bbox": [ + 110, + 394, + 167, + 449 + ], + "lines": [ + { + "bbox": [ + 110, + 394, + 167, + 449 + ], + "spans": [ + { + "bbox": [ + 110, + 394, + 167, + 449 + ], + "type": "image", + "image_path": "649f17527a60f65c786f89ea05a02ce34d05256fb198b59a516718bbad72dbe0.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 110, + 449, + 167, + 505 + ], + "blocks": [ + { + "bbox": [ + 110, + 449, + 167, + 505 + ], + "lines": [ + { + "bbox": [ + 110, + 449, + 167, + 505 + ], + "spans": [ + { + "bbox": [ + 110, + 449, + 167, + 505 + ], + "type": "image", + "image_path": "12614e630b9e65dc6eac9610fd8e867c0203de53f61a18a75de8bacf03bfcd98.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 127, + 506, + 149, + 515 + ], + "lines": [ + { + "bbox": [ + 127, + 506, + 149, + 515 + ], + "spans": [ + { + "bbox": [ + 127, + 506, + 149, + 515 + ], + "type": "text", + "content": "1 band" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 168, + 337, + 225, + 393 + ], + "blocks": [ + { + "bbox": [ + 168, + 337, + 225, + 393 + ], + "lines": [ + { + "bbox": [ + 168, + 337, + 225, + 393 + ], + "spans": [ + { + "bbox": [ + 168, + 337, + 225, + 393 + ], + "type": "image", + "image_path": "abb8ce532ae5b737c544cde659a2e027d0a872c6355b790033fb5f1970b688cf.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 168, + 394, + 225, + 449 + ], + "blocks": [ + { + "bbox": [ + 168, + 394, + 225, + 449 + ], + "lines": [ + { + "bbox": [ + 168, + 394, + 225, + 449 + ], + "spans": [ + { + "bbox": [ + 168, + 394, + 225, + 449 + ], + "type": "image", + "image_path": "622473d89340986fb169b15080195530e6295f6500efa7b0641b28132a4cb8bf.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 168, + 449, + 225, + 505 + ], + "blocks": [ + { + "bbox": [ + 168, + 449, + 225, + 505 + ], + "lines": [ + { + "bbox": [ + 168, + 449, + 225, + 505 + ], + "spans": [ + { + "bbox": [ + 168, + 449, + 225, + 505 + ], + "type": "image", + "image_path": "e70480a6a1d77b181db6326f428bf2a1cd6d827291f234e6cf7bdbfb43593f54.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 183, + 506, + 210, + 515 + ], + "lines": [ + { + "bbox": [ + 183, + 506, + 210, + 515 + ], + "spans": [ + { + "bbox": [ + 183, + 506, + 210, + 515 + ], + "type": "text", + "content": "4 bands" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 227, + 337, + 284, + 393 + ], + "blocks": [ + { + "bbox": [ + 227, + 337, + 284, + 393 + ], + "lines": [ + { + "bbox": [ + 227, + 337, + 284, + 393 + ], + "spans": [ + { + "bbox": [ + 227, + 337, + 284, + 393 + ], + "type": "image", + "image_path": "29308e05b1197201051ad12ef8bf555dfbdce11afebf6672d48d1da33d66ba2a.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 227, + 394, + 283, + 449 + ], + "blocks": [ + { + "bbox": [ + 227, + 394, + 283, + 449 + ], + "lines": [ + { + "bbox": [ + 227, + 394, + 283, + 449 + ], + "spans": [ + { + "bbox": [ + 227, + 394, + 283, + 449 + ], + "type": "image", + "image_path": "5f28cc4526343d84b6c91deaa7dfef579598ba1cc0ad0605bc9c4c5bba9ff6ab.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 227, + 449, + 283, + 505 + ], + "blocks": [ + { + "bbox": [ + 227, + 449, + 283, + 505 + ], + "lines": [ + { + "bbox": [ + 227, + 449, + 283, + 505 + ], + "spans": [ + { + "bbox": [ + 227, + 449, + 283, + 505 + ], + "type": "image", + "image_path": "b4ed5028ce6eda169e1a9dd073a0472ee7405d464aeb6c3d83552c279be89412.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 237, + 506, + 274, + 515 + ], + "lines": [ + { + "bbox": [ + 237, + 506, + 274, + 515 + ], + "spans": [ + { + "bbox": [ + 237, + 506, + 274, + 515 + ], + "type": "text", + "content": "Full model" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 46, + 525, + 287, + 559 + ], + "lines": [ + { + "bbox": [ + 46, + 525, + 287, + 559 + ], + "spans": [ + { + "bbox": [ + 46, + 525, + 287, + 559 + ], + "type": "text", + "content": "Figure 5. As the number of bands used in the reconstruction increases, so does the quality of the image. Even with a single band the reconstruction is visually close to the original." + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "bbox": [ + 46, + 579, + 287, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 579, + 287, + 639 + ], + "spans": [ + { + "bbox": [ + 46, + 579, + 287, + 639 + ], + "type": "text", + "content": "tative and quantitative results can be found in Fig. 3. Our model performs yeilds better PSNR and SSIM than all baselines and it also does not produce the tiling artifacts or the noise patterns present in the reconstructions created by other models." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 47, + 647, + 231, + 659 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 647, + 231, + 659 + ], + "spans": [ + { + "bbox": [ + 47, + 647, + 231, + 659 + ], + "type": "text", + "content": "4.3. Accuracy and model size trade-off" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 46, + 665, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 665, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 665, + 288, + 713 + ], + "type": "text", + "content": "In order to fit an image with a smaller or larger model, current architectures require a full retraining with a different number of parameters. The structure of our model allows us to provide a tradeoff between model size and reconstruction" + } + ] + } + ], + "index": 21 + }, + { + "type": "table", + "bbox": [ + 308, + 338, + 544, + 432 + ], + "blocks": [ + { + "bbox": [ + 308, + 338, + 544, + 432 + ], + "lines": [ + { + "bbox": [ + 308, + 338, + 544, + 432 + ], + "spans": [ + { + "bbox": [ + 308, + 338, + 544, + 432 + ], + "type": "table", + "html": "
ModelΦ(↑)ρ(↑)Iun(↑)# Params. (↓)
Ours (1 band)0.12/10.830.50/22.870.74/26.58130K
Ours (2 bands)0.32/14.660.64/28.400.91/34.74270K
Ours (3 bands)0.42/14.420.65/28.590.92/34.43400K
Ours (4 bands)0.51/16.320.65/28.710.92/34.62530K
Ours (5 bands)0.64/17.680.67/28.870.92/36.74670K
Ours (Full model)0.79/18.080.76/31.750.92/36.001.3M
SIREN [52]0.59/15.960.67/28.200.70/28.23660K
ACORN [34]0.48/17.010.73/29.960.82/29.85530K
ReLU [38] w/P.E.0.64/18.300.76/30.990.81/32.13660K
", + "image_path": "3006662a51133d83bb14d245e9c9684af74131cf2a0fd84faf3b32fbd6294cac.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "table_body" + } + ], + "index": 22 + }, + { + "bbox": [ + 305, + 440, + 545, + 473 + ], + "lines": [ + { + "bbox": [ + 305, + 440, + 545, + 473 + ], + "spans": [ + { + "bbox": [ + 305, + 440, + 545, + 473 + ], + "type": "text", + "content": "Table 3. As more bands are used, the number of parameters grows along with the resulting performance (SSIM/PSNR). The metrics shown here are averages across our whole dataset." + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 304, + 510, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 510, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 510, + 547, + 715 + ], + "type": "text", + "content": "accuracy without retraining. Each band of the model learns a representation of the image when reconstructed with a different set of singular values. If the downstream task doesn't require incredibly high accuracy, and the user would rather save and transport a smaller set of model weights, they can just save the weights from the first band of the network and reconstruct the image with only the singular values from that band, or vice versa if more accuracy is required. A visualization of reconstruction quality using different numbers of bands can be seen in Fig. 5. See Table 3 for quantitative results using different bands of our network. With a similar number of parameters to the baseline models, it achieves comparable performance to all baseline architectures. Our full model outperforms all baselines on predicting AoLP " + }, + { + "bbox": [ + 304, + 510, + 547, + 715 + ], + "type": "inline_equation", + "content": "(\\Phi)" + }, + { + "bbox": [ + 304, + 510, + 547, + 715 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 510, + 547, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_{un}" + }, + { + "bbox": [ + 304, + 510, + 547, + 715 + ], + "type": "text", + "content": " maps. It is also worth noting that our full model achieves significant compression over storing raw data. The combined memory size of the AoLP, DoLP and " + }, + { + "bbox": [ + 304, + 510, + 547, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_{un}" + }, + { + "bbox": [ + 304, + 510, + 547, + 715 + ], + "type": "text", + "content": " maps" + } + ] + } + ], + "index": 24 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "16585" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 86, + 83, + 187, + 183 + ], + "blocks": [ + { + "bbox": [ + 130, + 70, + 143, + 79 + ], + "lines": [ + { + "bbox": [ + 130, + 70, + 143, + 79 + ], + "spans": [ + { + "bbox": [ + 130, + 70, + 143, + 79 + ], + "type": "text", + "content": "GT" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 86, + 83, + 187, + 183 + ], + "lines": [ + { + "bbox": [ + 86, + 83, + 187, + 183 + ], + "spans": [ + { + "bbox": [ + 86, + 83, + 187, + 183 + ], + "type": "image", + "image_path": "e8a748f34c8407db99d3fc22351abb39391121ed1ba0deb0ce1f34254235d847.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 194, + 83, + 294, + 183 + ], + "blocks": [ + { + "bbox": [ + 223, + 70, + 264, + 80 + ], + "lines": [ + { + "bbox": [ + 223, + 70, + 264, + 80 + ], + "spans": [ + { + "bbox": [ + 223, + 70, + 264, + 80 + ], + "type": "text", + "content": "SIREN [52]" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 194, + 83, + 294, + 183 + ], + "lines": [ + { + "bbox": [ + 194, + 83, + 294, + 183 + ], + "spans": [ + { + "bbox": [ + 194, + 83, + 294, + 183 + ], + "type": "image", + "image_path": "3a799a3959456abb28f5b11eb6a9db3843f0f60ec9effd7382718067d9c9c661.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 299, + 83, + 402, + 183 + ], + "blocks": [ + { + "bbox": [ + 324, + 70, + 375, + 80 + ], + "lines": [ + { + "bbox": [ + 324, + 70, + 375, + 80 + ], + "spans": [ + { + "bbox": [ + 324, + 70, + 375, + 80 + ], + "type": "text", + "content": "ReLU P.E. [38]" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 299, + 83, + 402, + 183 + ], + "lines": [ + { + "bbox": [ + 299, + 83, + 402, + 183 + ], + "spans": [ + { + "bbox": [ + 299, + 83, + 402, + 183 + ], + "type": "image", + "image_path": "bdcc38bafd91773a855aea6aa7264f9315c9c900f420c0c31f3cb7679a963096.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 406, + 83, + 507, + 183 + ], + "blocks": [ + { + "bbox": [ + 447, + 71, + 465, + 80 + ], + "lines": [ + { + "bbox": [ + 447, + 71, + 465, + 80 + ], + "spans": [ + { + "bbox": [ + 447, + 71, + 465, + 80 + ], + "type": "text", + "content": "Ours" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 406, + 83, + 507, + 183 + ], + "lines": [ + { + "bbox": [ + 406, + 83, + 507, + 183 + ], + "spans": [ + { + "bbox": [ + 406, + 83, + 507, + 183 + ], + "type": "image", + "image_path": "6dd43389def5f008bbc1518ccb1cff2e547fc5173a837f82dd4890c785393944.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 87, + 184, + 137, + 244 + ], + "blocks": [ + { + "bbox": [ + 87, + 184, + 137, + 244 + ], + "lines": [ + { + "bbox": [ + 87, + 184, + 137, + 244 + ], + "spans": [ + { + "bbox": [ + 87, + 184, + 137, + 244 + ], + "type": "image", + "image_path": "1f9978b468cadc6b997f8cb2b6c6a6f96e89c6781160056f51ce6ac3cbb0a60b.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 256, + 547, + 289 + ], + "lines": [ + { + "bbox": [ + 46, + 256, + 547, + 289 + ], + "spans": [ + { + "bbox": [ + 46, + 256, + 547, + 289 + ], + "type": "text", + "content": "Figure 6. Both SIREN [52] and the ReLU MLP [38] with positional encoding show artifacts when queried at a different resolution than they were trained on. Our model does not. We trained models at a resolution of " + }, + { + "bbox": [ + 46, + 256, + 547, + 289 + ], + "type": "inline_equation", + "content": "1024 \\times 1024" + }, + { + "bbox": [ + 46, + 256, + 547, + 289 + ], + "type": "text", + "content": " and queried them at a resolution of " + }, + { + "bbox": [ + 46, + 256, + 547, + 289 + ], + "type": "inline_equation", + "content": "512 \\times 512" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 137, + 184, + 186, + 244 + ], + "blocks": [ + { + "bbox": [ + 137, + 184, + 186, + 244 + ], + "lines": [ + { + "bbox": [ + 137, + 184, + 186, + 244 + ], + "spans": [ + { + "bbox": [ + 137, + 184, + 186, + 244 + ], + "type": "image", + "image_path": "6cfc053ac7741db96885e2d107be96e4557b6775b50c8efb2609888200cb0976.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 194, + 184, + 242, + 244 + ], + "blocks": [ + { + "bbox": [ + 194, + 184, + 242, + 244 + ], + "lines": [ + { + "bbox": [ + 194, + 184, + 242, + 244 + ], + "spans": [ + { + "bbox": [ + 194, + 184, + 242, + 244 + ], + "type": "image", + "image_path": "b34f5f4be91ed9ad1dd6457b904dfb8e61a4a817068735796680244200acd835.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 244, + 184, + 293, + 245 + ], + "blocks": [ + { + "bbox": [ + 244, + 184, + 293, + 245 + ], + "lines": [ + { + "bbox": [ + 244, + 184, + 293, + 245 + ], + "spans": [ + { + "bbox": [ + 244, + 184, + 293, + 245 + ], + "type": "image", + "image_path": "8aae601fef50ca2cf4eaff39fc9c4b8eff02bc0cd424f74e3d6cae508c579435.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 301, + 184, + 349, + 244 + ], + "blocks": [ + { + "bbox": [ + 301, + 184, + 349, + 244 + ], + "lines": [ + { + "bbox": [ + 301, + 184, + 349, + 244 + ], + "spans": [ + { + "bbox": [ + 301, + 184, + 349, + 244 + ], + "type": "image", + "image_path": "5ea0d19bad2190dfe707fcb8e515330ea3c3a214e5282e448e15ed67036ea3df.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 351, + 184, + 399, + 244 + ], + "blocks": [ + { + "bbox": [ + 351, + 184, + 399, + 244 + ], + "lines": [ + { + "bbox": [ + 351, + 184, + 399, + 244 + ], + "spans": [ + { + "bbox": [ + 351, + 184, + 399, + 244 + ], + "type": "image", + "image_path": "e25c1b92080f6fd6a635fbb603067a3022b048a5516790978d8761a9809b3850.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 407, + 184, + 455, + 245 + ], + "blocks": [ + { + "bbox": [ + 407, + 184, + 455, + 245 + ], + "lines": [ + { + "bbox": [ + 407, + 184, + 455, + 245 + ], + "spans": [ + { + "bbox": [ + 407, + 184, + 455, + 245 + ], + "type": "image", + "image_path": "3cbda8f9cc88ca0bb2d0da2e5cb4c2f418c68dd635ea70e075017793ff02a7b2.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 457, + 184, + 505, + 245 + ], + "blocks": [ + { + "bbox": [ + 457, + 184, + 505, + 245 + ], + "lines": [ + { + "bbox": [ + 457, + 184, + 505, + 245 + ], + "spans": [ + { + "bbox": [ + 457, + 184, + 505, + 245 + ], + "type": "image", + "image_path": "70d6f0bb7083b1dc106f5d20ecbe104c7c65044cf2b3d2543b917f8de3f9032a.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 46, + 311, + 287, + 382 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 311, + 287, + 382 + ], + "spans": [ + { + "bbox": [ + 46, + 311, + 287, + 382 + ], + "type": "text", + "content": "is 36 megabytes (MB), while the size of our full model is only 5 MB. Representing images with our model allows us to scale image size without scaling memory footprint as quickly. In this work we use small images, but the memory saved when reconstructing images at the mega or gigapixel scale would be significant." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 47, + 384, + 164, + 395 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 384, + 164, + 395 + ], + "spans": [ + { + "bbox": [ + 47, + 384, + 164, + 395 + ], + "type": "text", + "content": "4.4. RGB reconstruction" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 46, + 402, + 287, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 402, + 287, + 521 + ], + "spans": [ + { + "bbox": [ + 46, + 402, + 287, + 521 + ], + "type": "text", + "content": "In addition to reconstructing the DoLP " + }, + { + "bbox": [ + 46, + 402, + 287, + 521 + ], + "type": "inline_equation", + "content": "(\\rho)" + }, + { + "bbox": [ + 46, + 402, + 287, + 521 + ], + "type": "text", + "content": ", AoLP " + }, + { + "bbox": [ + 46, + 402, + 287, + 521 + ], + "type": "inline_equation", + "content": "(\\Phi)" + }, + { + "bbox": [ + 46, + 402, + 287, + 521 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 402, + 287, + 521 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_{un}" + }, + { + "bbox": [ + 46, + 402, + 287, + 521 + ], + "type": "text", + "content": " maps with our model, we also present results for reconstructing the original RGB images captured by the camera. For a specific polarizing filter angle, we can reconstruct the value of a pixel captured by the camera through that filter using Eq. (1). Our model removes the artifacts present in the reconstructions from all baseline comparisons and retains more detail comparatively. See Fig. 4 for a visualization of reconstructions of images taken through a linear polarizer oriented at " + }, + { + "bbox": [ + 46, + 402, + 287, + 521 + ], + "type": "inline_equation", + "content": "0^{\\circ}" + }, + { + "bbox": [ + 46, + 402, + 287, + 521 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 47, + 522, + 223, + 535 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 522, + 223, + 535 + ], + "spans": [ + { + "bbox": [ + 47, + 522, + 223, + 535 + ], + "type": "text", + "content": "4.5. Multiple resolution interpolation" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 46, + 540, + 287, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 540, + 287, + 661 + ], + "spans": [ + { + "bbox": [ + 46, + 540, + 287, + 661 + ], + "type": "text", + "content": "We present results for fitting an image at one resolution and querying it at a second resolution. In this section we only compare to SIREN [52] and a ReLU MLP [38], as the dynamic tiling strategy of ACORN [34] does not allow us to simply query the representation at a different resolution. We train both models on the original scene at a resolution of " + }, + { + "bbox": [ + 46, + 540, + 287, + 661 + ], + "type": "inline_equation", + "content": "1024 \\times 1024" + }, + { + "bbox": [ + 46, + 540, + 287, + 661 + ], + "type": "text", + "content": " and then query them at a resolution of " + }, + { + "bbox": [ + 46, + 540, + 287, + 661 + ], + "type": "inline_equation", + "content": "512 \\times 512" + }, + { + "bbox": [ + 46, + 540, + 287, + 661 + ], + "type": "text", + "content": ". Both baselines show artifacts when queried at this new resolution, while our model does not have this issue. In Fig. 6 we visualize these results on " + }, + { + "bbox": [ + 46, + 540, + 287, + 661 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_{un}" + }, + { + "bbox": [ + 46, + 540, + 287, + 661 + ], + "type": "text", + "content": " maps." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 47, + 669, + 119, + 681 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 669, + 119, + 681 + ], + "spans": [ + { + "bbox": [ + 47, + 669, + 119, + 681 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "text", + "content": "In summary, we have presented an attempt at creating neural representations of polarimetric information without" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 304, + 311, + 546, + 395 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 311, + 546, + 395 + ], + "spans": [ + { + "bbox": [ + 304, + 311, + 546, + 395 + ], + "type": "text", + "content": "the artifacts introduced by current models. Compared to existing methods, our model shows an increase in image reconstruction quality on AoLP, DoLP and " + }, + { + "bbox": [ + 304, + 311, + 546, + 395 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_{un}" + }, + { + "bbox": [ + 304, + 311, + 546, + 395 + ], + "type": "text", + "content": " maps, in addition to effectively removing the artifacts we were targeting. Having a compact representation of polarimetric images will facilitate future research in areas where this data is required." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 304, + 402, + 546, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 402, + 546, + 643 + ], + "spans": [ + { + "bbox": [ + 304, + 402, + 546, + 643 + ], + "type": "text", + "content": "While our work provides noticeable improvement over current methods, it is not perfect. To achieve state of the art performance on reconstructing AoLP maps, we need quite a few bands in our network, which makes the number of parameters quite large compared to other architectures. A valuable next step could be creating a model that could achieve the same performance as ours while cutting down on the memory footprint. Furthermore, we only demonstrated the effectiveness of this approach on 2D data, since polarization is not well studied in three dimensions. Validating our approach on 3D data would be a useful next step, once the field has developed a greater understanding of the underlying physics. We motivated our method using polarimetric data, but there are many types of data in computational imaging [8]. Our method will be valuable in representing multiple physical quantities of a scene at once whenever at least one measurement contains high frequency details or noise, and future research could extending this work by demonstrating its effectiveness on other types of data encountered in computational imaging." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 304, + 653, + 546, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 653, + 546, + 712 + ], + "spans": [ + { + "bbox": [ + 304, + 653, + 546, + 712 + ], + "type": "text", + "content": "Acknowledgements We thank members of the Visual Machines Group (VMG) at UCLA for feedback and support. A.K. was supported by an NSF CAREER award IIS-2046737 and Army Young Investigator Program (YIP) Award." + } + ] + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "16586" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 91, + 288, + 715 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 124 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 124 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 124 + ], + "type": "text", + "content": "[1] G.A. Atkinson and E.R. Hancock. Recovery of surface orientation from diffuse polarization. IEEE Transactions on Image Processing, 15(6):1653-1664, 2006. 2, 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 124, + 288, + 147 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 124, + 288, + 147 + ], + "spans": [ + { + "bbox": [ + 53, + 124, + 288, + 147 + ], + "type": "text", + "content": "[2] Gary A Atkinson. Polarisation photometric stereo. Computer Vision and Image Understanding, 160:158-167, 2017. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 147, + 288, + 179 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 147, + 288, + 179 + ], + "spans": [ + { + "bbox": [ + 53, + 147, + 288, + 179 + ], + "type": "text", + "content": "[3] Gary A Atkinson and Jürgen D Ernst. High-sensitivity analysis of polarization by surface reflection. Machine Vision and Applications, 29(7):1171-1189, 2018. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 179, + 288, + 236 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 179, + 288, + 236 + ], + "spans": [ + { + "bbox": [ + 53, + 179, + 288, + 236 + ], + "type": "text", + "content": "[4] Benjamin Attal, Selena Ling, Aaron Gokaslan, Christian Richardt, and James Tompkin. Matryodshka: Real-time 6dof video view synthesis using multi-sphere images. In European Conference on Computer Vision, pages 441-459. Springer, 2020. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 236, + 288, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 236, + 288, + 281 + ], + "spans": [ + { + "bbox": [ + 53, + 236, + 288, + 281 + ], + "type": "text", + "content": "[5] Matan Atzmon and Yaron Lipman. Sal: Sign agnostic learning of shapes from raw data. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2565-2574, 2020. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 281, + 288, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 281, + 288, + 335 + ], + "spans": [ + { + "bbox": [ + 53, + 281, + 288, + 335 + ], + "type": "text", + "content": "[6] Dejan Azinović, Olivier Maury, Christophe Hery, Matthias Nießner, and Justus Thies. High-res facial appearance capture from polarized smartphone images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2023. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 335, + 288, + 390 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 335, + 288, + 390 + ], + "spans": [ + { + "bbox": [ + 53, + 335, + 288, + 390 + ], + "type": "text", + "content": "[7] Yunhao Ba, Alex Gilbert, Franklin Wang, Jina Yang, Rui Chen, Yiqin Wang, Lei Yan, Boxin Shi, and Achuta Kadambi. Deep shape from polarization. In European Conference on Computer Vision, pages 554-571. Springer, 2020. 2, 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 390, + 288, + 414 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 390, + 288, + 414 + ], + "spans": [ + { + "bbox": [ + 53, + 390, + 288, + 414 + ], + "type": "text", + "content": "[8] Ayush Bhandari, Achuta Kadambi, and Ramesh Raskar. Computational Imaging. The MIT Press, 2022. 8" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 414, + 288, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 414, + 288, + 468 + ], + "spans": [ + { + "bbox": [ + 53, + 414, + 288, + 468 + ], + "type": "text", + "content": "[9] Michael Broxton, John Flynn, Ryan Overbeck, Daniel Erickson, Peter Hedman, Matthew Duvall, Jason Dourgarian, Jay Busch, Matt Whalen, and Paul Debevec. Immersive light field video with a layered mesh representation. ACM Transactions on Graphics (TOG), 39(4):86-1, 2020. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 468, + 288, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 468, + 288, + 525 + ], + "spans": [ + { + "bbox": [ + 47, + 468, + 288, + 525 + ], + "type": "text", + "content": "[10] Rohan Chabra, Jan E Lenssen, Eddy Ilg, Tanner Schmidt, Julian Straub, Steven Lovegrove, and Richard Newcombe. Deep local shapes: Learning local sdf priors for detailed 3d reconstruction. In European Conference on Computer Vision, pages 608-625. Springer, 2020. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 525, + 288, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 525, + 288, + 590 + ], + "spans": [ + { + "bbox": [ + 47, + 525, + 288, + 590 + ], + "type": "text", + "content": "[11] Eric R. Chan, Marco Monteiro, Petr Kellnhofer, Jiajun Wu, and Gordon Wetzstein. Pi-gan: Periodic implicit generative adversarial networks for 3d-aware image synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 5799-5809, June 2021. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 590, + 288, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 590, + 288, + 645 + ], + "spans": [ + { + "bbox": [ + 47, + 590, + 288, + 645 + ], + "type": "text", + "content": "[12] Tongbo Chen, Hendrik P. A. Lensch, Christian Fuchs, and Hans-Peter Seidel. Polarization and phase-shifting for 3d scanning of translucent objects. In 2007 IEEE Conference on Computer Vision and Pattern Recognition, pages 1-8, 2007. 3" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 646, + 288, + 691 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 646, + 288, + 691 + ], + "spans": [ + { + "bbox": [ + 47, + 646, + 288, + 691 + ], + "type": "text", + "content": "[13] Zhiqin Chen and Hao Zhang. Learning implicit fields for generative shape modeling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5939-5948, 2019. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 691, + 288, + 715 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 691, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 47, + 691, + 288, + 715 + ], + "type": "text", + "content": "[14] Akshit Dave, Yongyi Zhao, and Ashok Veeraraghavan. Pandora: Polarization-aided neural decomposition of radiance." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 715 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 326, + 73, + 545, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 545, + 106 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 545, + 106 + ], + "type": "text", + "content": "In Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part VII, pages 538-556. Springer, 2022. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 107, + 547, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 107, + 547, + 162 + ], + "spans": [ + { + "bbox": [ + 307, + 107, + 547, + 162 + ], + "type": "text", + "content": "[15] O. Drbohlav and R. Sara. Unambiguous determination of shape from photometric stereo with unknown light sources. In Proceedings Eighth IEEE International Conference on Computer Vision. ICCV 2001, volume 1, pages 581-586 vol.1, 2001. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 163, + 545, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 163, + 545, + 217 + ], + "spans": [ + { + "bbox": [ + 307, + 163, + 545, + 217 + ], + "type": "text", + "content": "[16] SM Ali Eslami, Danilo Jimenez Rezende, Frederic Besse, Fabio Viola, Ari S Morcos, Marta Garnelo, Avraham Ruderman, Andrei A Rusu, Ivo Danihelka, Karol Gregor, et al. Neural scene representation and rendering. Science, 360(6394):1204-1210, 2018. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 218, + 545, + 284 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 218, + 545, + 284 + ], + "spans": [ + { + "bbox": [ + 307, + 218, + 545, + 284 + ], + "type": "text", + "content": "[17] John Flynn, Michael Broxton, Paul Debevec, Matthew DuVall, Graham Fyffe, Ryan Overbeck, Noah Snavely, and Richard Tucker. Deepview: View synthesis with learned gradient descent. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2367-2376, 2019. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 285, + 545, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 285, + 545, + 342 + ], + "spans": [ + { + "bbox": [ + 307, + 285, + 545, + 342 + ], + "type": "text", + "content": "[18] Kyle Genova, Forrester Cole, Avneesh Sud, Aaron Sarna, and Thomas Funkhouser. Local deep implicit functions for 3d shape. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4857-4866, 2020. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 342, + 545, + 398 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 342, + 545, + 398 + ], + "spans": [ + { + "bbox": [ + 307, + 342, + 545, + 398 + ], + "type": "text", + "content": "[19] Kyle Genova, Forrester Cole, Daniel Vlasic, Aaron Sarna, William T Freeman, and Thomas Funkhouser. Learning shape templates with structured implicit functions. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7154-7164, 2019. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 398, + 545, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 398, + 545, + 464 + ], + "spans": [ + { + "bbox": [ + 307, + 398, + 545, + 464 + ], + "type": "text", + "content": "[20] Ciriaco Goddi, Ivan Martí-Vidal, Hugo Messias, Geoffrey C Bower, Avery E Broderick, Jason Dexter, Daniel P Marrone, Monika Moscibrodzka, Hiroshi Nagai, Juan Carlos Algaba, et al. Polarimetric properties of event horizon telescope targets from alma. The Astrophysical Journal Letters, 910(1):L14, 2021. 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 464, + 545, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 464, + 545, + 498 + ], + "spans": [ + { + "bbox": [ + 307, + 464, + 545, + 498 + ], + "type": "text", + "content": "[21] Amos Gropp, Lior Yariv, Niv Haim, Matan Atzmon, and Yaron Lipman. Implicit geometric regularization for learning shapes. arXiv preprint arXiv:2002.10099, 2020. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 498, + 545, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 498, + 545, + 544 + ], + "spans": [ + { + "bbox": [ + 307, + 498, + 545, + 544 + ], + "type": "text", + "content": "[22] Peter Hedman, Julien Philip, True Price, Jan-Michael Frahm, George Drettakis, and Gabriel Brostow. Deep blending for free-viewpoint image-based rendering. ACM Transactions on Graphics (TOG), 37(6):1-15, 2018. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 544, + 545, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 544, + 545, + 590 + ], + "spans": [ + { + "bbox": [ + 307, + 544, + 545, + 590 + ], + "type": "text", + "content": "[23] Philipp Henzler, Niloy J Mitra, and Tobias Ritschel. Escaping Plato's cave: 3d shape from adversarial rendering. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9984-9993, 2019. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 590, + 545, + 644 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 590, + 545, + 644 + ], + "spans": [ + { + "bbox": [ + 307, + 590, + 545, + 644 + ], + "type": "text", + "content": "[24] Cong Phuoc Huynh, Antonio Robles-Kelly, and Edwin Hancock. Shape and refractive index recovery from single-view polarisation images. In 2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 1229-1236, 2010. 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 644, + 545, + 691 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 644, + 545, + 691 + ], + "spans": [ + { + "bbox": [ + 307, + 644, + 545, + 691 + ], + "type": "text", + "content": "[25] Cong Phuoc Huynh, Antonio Robles-Kelly, and Edwin R Hancock. Shape and refractive index from single-view spectro-polarimetric images. International journal of computer vision, 101(1):64-94, 2013. 2" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 691, + 545, + 715 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 691, + 545, + 715 + ], + "spans": [ + { + "bbox": [ + 307, + 691, + 545, + 715 + ], + "type": "text", + "content": "[26] Chiyu Jiang, Avneesh Sud, Ameesh Makadia, Jingwei Huang, Matthias Nießner, Thomas Funkhouser, et al. Local" + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "16587" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 66, + 72, + 288, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 72, + 288, + 106 + ], + "spans": [ + { + "bbox": [ + 66, + 72, + 288, + 106 + ], + "type": "text", + "content": "implicit grid representations for 3d scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6001-6010, 2020. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 107, + 287, + 151 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 107, + 287, + 151 + ], + "spans": [ + { + "bbox": [ + 48, + 107, + 287, + 151 + ], + "type": "text", + "content": "[27] Achuta Kadambi, Vage Taamazyan, Boxin Shi, and Ramesh Raskar. Polarized 3d: High-quality depth sensing with polarization cues. In Proceedings of the IEEE International Conference on Computer Vision, pages 3370-3378, 2015. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 152, + 288, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 152, + 288, + 195 + ], + "spans": [ + { + "bbox": [ + 48, + 152, + 288, + 195 + ], + "type": "text", + "content": "[28] Samruddhi Kahu and Reena Rahate. Image compression using singular value decomposition. International Journal of Advancements in Research & Technology, 2(8):244-248, 2013. 1, 4" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 198, + 288, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 198, + 288, + 262 + ], + "spans": [ + { + "bbox": [ + 48, + 198, + 288, + 262 + ], + "type": "text", + "content": "[29] Agastya Kalra, Vage Taamazyan, Supreeth Krishna Rao, Kartik Venkataraman, Ramesh Raskar, and Achuta Kadambi. Deep polarization cues for transparent object segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2020. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 264, + 288, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 264, + 288, + 319 + ], + "spans": [ + { + "bbox": [ + 48, + 264, + 288, + 319 + ], + "type": "text", + "content": "[30] Shaohui Liu, Yinda Zhang, Songyou Peng, Boxin Shi, Marc Pollefeys, and Zhaopeng Cui. Dist: Rendering deep implicit signed distance function with differentiable sphere tracing. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2019-2028, 2020. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 320, + 287, + 364 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 320, + 287, + 364 + ], + "spans": [ + { + "bbox": [ + 48, + 320, + 287, + 364 + ], + "type": "text", + "content": "[31] Stephen Lombardi, Tomas Simon, Jason Saragih, Gabriel Schwartz, Andreas Lehrmann, and Yaser Sheikh. Neural volumes: Learning dynamic renderable volumes from images. arXiv preprint arXiv:1906.07751, 2019. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 365, + 287, + 409 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 365, + 287, + 409 + ], + "spans": [ + { + "bbox": [ + 48, + 365, + 287, + 409 + ], + "type": "text", + "content": "[32] Tomohiro Maeda, Achuta Kadambi, Yoav Y Schechner, and Ramesh Raskar. Dynamic heterodyne interferometry. In 2018 IEEE International Conference on Computational Photography (ICCP), pages 1-11. IEEE, 2018. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 411, + 287, + 455 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 411, + 287, + 455 + ], + "spans": [ + { + "bbox": [ + 48, + 411, + 287, + 455 + ], + "type": "text", + "content": "[33] Ali H. Mahmoud, Moumen T. El-Melegy, and Aly A. Farag. Direct method for shape recovery from polarization and shading. In 2012 19th IEEE International Conference on Image Processing, pages 1769-1772, 2012. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 456, + 287, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 456, + 287, + 510 + ], + "spans": [ + { + "bbox": [ + 48, + 456, + 287, + 510 + ], + "type": "text", + "content": "[34] Julien N. P. Martel, David B. Lindell, Connor Z. Lin, Eric R. Chan, Marco Monteiro, and Gordon Wetzstein. Acorn: Adaptive coordinate networks for neural scene representation. ACM Trans. Graph. (SIGGRAPH), 40(4), 2021. 2, 3, 5, 6, 7, 8" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 512, + 287, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 512, + 287, + 567 + ], + "spans": [ + { + "bbox": [ + 48, + 512, + 287, + 567 + ], + "type": "text", + "content": "[35] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3d reconstruction in function space. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4460-4470, 2019. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 568, + 287, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 568, + 287, + 633 + ], + "spans": [ + { + "bbox": [ + 48, + 568, + 287, + 633 + ], + "type": "text", + "content": "[36] Ben Mildenhall, Peter Hedman, Ricardo Martin-Brualla, Pratul P Srinivasan, and Jonathan T Barron. Nerf in the dark: High dynamic range view synthesis from noisy raw images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16190-16199, 2022. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 635, + 287, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 635, + 287, + 689 + ], + "spans": [ + { + "bbox": [ + 48, + 635, + 287, + 689 + ], + "type": "text", + "content": "[37] Ben Mildenhall, Pratul P Srinivasan, Rodrigo Ortiz-Cayon, Nima Khademi Kalantari, Ravi Ramamoorthi, Ren Ng, and Abhishek Kar. Local light field fusion: Practical view synthesis with prescriptive sampling guidelines. ACM Transactions on Graphics (TOG), 38(4):1-14, 2019. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 691, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 691, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 691, + 287, + 713 + ], + "type": "text", + "content": "[38] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf:" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 326, + 73, + 545, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 545, + 106 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 545, + 106 + ], + "type": "text", + "content": "Representing scenes as neural radiance fields for view synthesis. In European conference on computer vision, pages 405-421. Springer, 2020. 2, 5, 6, 7, 8" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 107, + 545, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 107, + 545, + 149 + ], + "spans": [ + { + "bbox": [ + 307, + 107, + 545, + 149 + ], + "type": "text", + "content": "[39] Miyazaki, Tan, Hara, and Ikeuchi. Polarization-based inverse rendering from a single view. In Proceedings Ninth IEEE International Conference on Computer Vision, pages 982-987 vol.2, 2003. 2, 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 151, + 545, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 151, + 545, + 194 + ], + "spans": [ + { + "bbox": [ + 307, + 151, + 545, + 194 + ], + "type": "text", + "content": "[40] D. Miyazaki, M. Kagesawa, and K. Ikeuchi. Transparent surface modeling from a pair of polarization images. IEEE Transactions on Pattern Analysis and Machine Intelligence, 26(1):73-82, 2004. 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 195, + 545, + 238 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 195, + 545, + 238 + ], + "spans": [ + { + "bbox": [ + 307, + 195, + 545, + 238 + ], + "type": "text", + "content": "[41] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Transactions on Graphics (ToG), 41(4):1-15, 2022. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 239, + 545, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 239, + 545, + 281 + ], + "spans": [ + { + "bbox": [ + 307, + 239, + 545, + 281 + ], + "type": "text", + "content": "[42] Shree K Nayar, Xi-Sheng Fang, and Terrance Boult. Separation of reflection components using color and polarization. International Journal of Computer Vision, 21(3):163-186, 1997. 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 284, + 545, + 327 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 284, + 545, + 327 + ], + "spans": [ + { + "bbox": [ + 307, + 284, + 545, + 327 + ], + "type": "text", + "content": "[43] Trung Ngo Thanh, Hajime Nagahara, and Rin-ichiro Taniguchi. Shape and light directions from shading and polarization. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), June 2015. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 328, + 545, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 328, + 545, + 381 + ], + "spans": [ + { + "bbox": [ + 307, + 328, + 545, + 381 + ], + "type": "text", + "content": "[44] Thu H Nguyen-Phuoc, Christian Richardt, Long Mai, Yongliang Yang, and Niloy Mitra. Blockgan: Learning 3d object-aware scene representations from unlabelled images. Advances in Neural Information Processing Systems, 33:6767-6778, 2020. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 383, + 545, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 383, + 545, + 437 + ], + "spans": [ + { + "bbox": [ + 307, + 383, + 545, + 437 + ], + "type": "text", + "content": "[45] Michael Niemeyer, Lars Mescheder, Michael Oechsle, and Andreas Geiger. Occupancy flow: 4d reconstruction by learning particle dynamics. In Proceedings of the IEEE/CVF international conference on computer vision, pages 5379-5389, 2019. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 438, + 545, + 492 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 438, + 545, + 492 + ], + "spans": [ + { + "bbox": [ + 307, + 438, + 545, + 492 + ], + "type": "text", + "content": "[46] Michael Oechsle, Lars Mescheder, Michael Niemeyer, Thilo Strauss, and Andreas Geiger. Texture fields: Learning texture representations in function space. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4531-4540, 2019. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 494, + 545, + 548 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 494, + 545, + 548 + ], + "spans": [ + { + "bbox": [ + 307, + 494, + 545, + 548 + ], + "type": "text", + "content": "[47] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. Deepsdf: Learning continuous signed distance functions for shape representation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 165-174, 2019. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 548, + 545, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 548, + 545, + 668 + ], + "spans": [ + { + "bbox": [ + 307, + 548, + 545, + 668 + ], + "type": "text", + "content": "[48] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Kopf, Edward Yang, Zachary DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. Pytorch: An imperative style, high-performance deep learning library. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'Alché-Buc, E. Fox, and R. Garnett, editors, Advances in Neural Information Processing Systems 32, pages 8024-8035. Curran Associates, Inc., 2019. 5" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 670, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 670, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 307, + 670, + 545, + 713 + ], + "type": "text", + "content": "[49] Songyou Peng, Michael Niemeyer, Lars Mescheder, Marc Pollefeys, and Andreas Geiger. Convolutional occupancy networks. In European Conference on Computer Vision, pages 523-540. Springer, 2020. 2" + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "16588" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 286, + 620 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 48, + 72, + 286, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 286, + 106 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 286, + 106 + ], + "type": "text", + "content": "[50] Gernot Riegler and Vladlen Koltun. Free view synthesis. In European Conference on Computer Vision, pages 623-640. Springer, 2020. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 49, + 107, + 286, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 107, + 286, + 162 + ], + "spans": [ + { + "bbox": [ + 49, + 107, + 286, + 162 + ], + "type": "text", + "content": "[51] Y.Y. Schechner, S.G. Narasimhan, and S.K. Nayar. Instant dehazing of images using polarization. In Proceedings of the 2001 IEEE Computer Society Conference on Computer Vision and Pattern Recognition. CVPR 2001, volume 1, pages I-I, 2001. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 49, + 163, + 286, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 163, + 286, + 217 + ], + "spans": [ + { + "bbox": [ + 49, + 163, + 286, + 217 + ], + "type": "text", + "content": "[52] Vincent Sitzmann, Julien Martel, Alexander Bergman, David Lindell, and Gordon Wetzstein. Implicit neural representations with periodic activation functions. Advances in Neural Information Processing Systems, 33:7462-7473, 2020. 2, 3, 4, 5, 6, 7, 8" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 49, + 219, + 286, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 219, + 286, + 274 + ], + "spans": [ + { + "bbox": [ + 49, + 219, + 286, + 274 + ], + "type": "text", + "content": "[53] Vincent Sitzmann, Justus Thies, Felix Heide, Matthias Nießner, Gordon Wetzstein, and Michael Zollhofer. Deepvoxels: Learning persistent 3d feature embeddings. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2437-2446, 2019. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 49, + 275, + 286, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 275, + 286, + 319 + ], + "spans": [ + { + "bbox": [ + 49, + 275, + 286, + 319 + ], + "type": "text", + "content": "[54] Vincent Sitzmann, Michael Zollhöfer, and Gordon Wetzstein. Scene representation networks: Continuous 3d-structure-aware neural scene representations. Advances in Neural Information Processing Systems, 32, 2019. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 49, + 320, + 286, + 363 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 320, + 286, + 363 + ], + "spans": [ + { + "bbox": [ + 49, + 320, + 286, + 363 + ], + "type": "text", + "content": "[55] Daniel Teo, Boxin Shi, Yinqiang Zheng, and Sai-Kit Yeung. Self-calibrating polarising radiometric calibration. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), June 2018. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 49, + 365, + 286, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 365, + 286, + 407 + ], + "spans": [ + { + "bbox": [ + 49, + 365, + 286, + 407 + ], + "type": "text", + "content": "[56] Justus Thies, Michael Zollhöfer, and Matthias Nießner. Deferred neural rendering: Image synthesis using neural textures. ACM Transactions on Graphics (TOG), 38(4):1-12, 2019. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 49, + 409, + 286, + 442 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 409, + 286, + 442 + ], + "spans": [ + { + "bbox": [ + 49, + 409, + 286, + 442 + ], + "type": "text", + "content": "[57] Tali Treibitz and Yoav Y. Schechner. Active polarization rescattering. IEEE Transactions on Pattern Analysis and Machine Intelligence, 31(3):385-399, 2009. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 49, + 444, + 286, + 496 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 444, + 286, + 496 + ], + "spans": [ + { + "bbox": [ + 49, + 444, + 286, + 496 + ], + "type": "text", + "content": "[58] Zhen Wang, Shijie Zhou, Jeong Joon Park, Despoina Paschalidou, Suya You, Gordon Wetzstein, Leonidas Guibas, and Achuta Kadambi. Alto: Alternating latent topologies for implicit 3d reconstruction. arXiv preprint arXiv:2212.04096, 2022. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 49, + 498, + 286, + 531 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 498, + 286, + 531 + ], + "spans": [ + { + "bbox": [ + 49, + 498, + 286, + 531 + ], + "type": "text", + "content": "[59] Lawrence B Wolff. Polarization vision: a new sensory approach to image understanding. Image and Vision computing, 15(2):81-93, 1997. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 49, + 533, + 286, + 586 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 533, + 286, + 586 + ], + "spans": [ + { + "bbox": [ + 49, + 533, + 286, + 586 + ], + "type": "text", + "content": "[60] Xiuming Zhang, Sean Fanello, Yun-Ta Tsai, Tiancheng Sun, Tianfan Xue, Rohit Pandey, Sergio Orts-Escalano, Philip Davidson, Christoph Rhemann, Paul Debevec, et al. Neural light transport for relighting and view synthesis. ACM Transactions on Graphics (TOG), 40(1):1-17, 2021. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 49, + 588, + 286, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 588, + 286, + 620 + ], + "spans": [ + { + "bbox": [ + 49, + 588, + 286, + 620 + ], + "type": "text", + "content": "[61] Chu Zhou, Minggui Teng, Yufei Han, Chao Xu, and Boxin Shi. Learning to dehaze with polarization. Advances in Neural Information Processing Systems, 34, 2021. 3" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "16589" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/sRGB Real Noise Synthesizing With Neighboring Correlation-Aware Noise Model/4d3b606f-ee7b-460f-b2bf-d38a08fa5304_content_list.json b/2023/sRGB Real Noise Synthesizing With Neighboring Correlation-Aware Noise Model/4d3b606f-ee7b-460f-b2bf-d38a08fa5304_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..43f61a9c2398c847e6935a2245d48c6f72e5b1d4 --- /dev/null +++ b/2023/sRGB Real Noise Synthesizing With Neighboring Correlation-Aware Noise Model/4d3b606f-ee7b-460f-b2bf-d38a08fa5304_content_list.json @@ -0,0 +1,1818 @@ +[ + { + "type": "text", + "text": "sRGB Real Noise Synthesizing with Neighboring Correlation-Aware Noise Model", + "text_level": 1, + "bbox": [ + 76, + 130, + 893, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Zixuan Fu $^{1*}$ , Lanqing Guo $^{1*}$ , Bihan Wen $^{1\\dagger}$ $^{1}$ Nanyang Technological University, Singapore \n{zixuan.fu, lanqing001, bihan.wen}@ntu.edu.sg", + "bbox": [ + 285, + 179, + 684, + 233 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 268, + 313, + 285 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Modeling and synthesizing real noise in the standard RGB (sRGB) domain is challenging due to the complicated noise distribution. While most of the deep noise generators proposed to synthesize sRGB real noise using an end-to-end trained model, the lack of explicit noise modeling degrades the quality of their synthesized noise. In this work, we propose to model the real noise as not only dependent on the underlying clean image pixel intensity, but also highly correlated to its neighboring noise realization within the local region. Correspondingly, we propose a novel noise synthesizing framework by explicitly learning its neighboring correlation on top of the signal dependency. With the proposed noise model, our framework greatly bridges the distribution gap between synthetic noise and real noise. We show that our generated \"real\" sRGB noisy images can be used for training supervised deep denoisers, thus to improve their real denoising results with a large margin, comparing to the popular classic denoisers or the deep denoisers that are trained on other sRGB noise generators. The code will be available at https://github.com/xuan611/sRGB-Real-NoiseSynthesizing.", + "bbox": [ + 76, + 300, + 473, + 618 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 647, + 209, + 662 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Real image denoising is one of the most challenging tasks in low-level vision. Deep denoisers that are trained using synthetic noise, e.g., Additive White Gaussian Noise (AWGN), perform poorly on real photography [3, 15], which motivates more realistic noise models, e.g., [1, 5, 14-16]. In general, there are two approaches towards real noise modeling, i.e., modeling in the raw-RGB and standard RGB (sRGB) domains. Popular modeling methods including the physical-based [25, 28] and data-driven methods [1, 6] exploit sophisticated noise models in the raw-RGB domain, which demonstrated promising perfor", + "bbox": [ + 75, + 672, + 470, + 840 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "mance as noise in raw-RGB is largely simplified comparing to noise in sRGB [20, 22]. However, raw-RGB images are not usually utilized by common users due to their large sizes. In contrast, most commercial cameras generate sRGB images by default, which are more popular in practice. Unfortunately, the noise generation methods in the raw-RGB domain cannot be directly applied to sRGB images, as the real noise distribution in sRGB is more complicated than raw-RGB noise, caused by the in-camera signal processing (ISP) pipeline [22].", + "bbox": [ + 500, + 270, + 893, + 421 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent works [5, 15] proposed to generate noise on raw-RGB images and convert them into sRGB images by the ISP pipeline including demosaicing, white balancing, gamma correction, etc. While these methods synthesized realistic noise, the requirement of raw-RGB images as well as manually defined ISP pipelines limits their applications. An alternative solution for sRGB real noise modeling is to train the generative models with sRGB noisy-clean images and directly synthesize real noise on sRGB images [16,17,20,26]. However, these models synthesize noise without explicitly modeling the characteristics of sRGB real noise, resulting in degradation of the quality of the synthesized noise.", + "bbox": [ + 498, + 426, + 893, + 608 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In this paper, we propose a novel real noise generation network, based on Neighboring Correlation-Aware noise model, dubbed as NeCA, to directly synthesize real noise in the sRGB domain. The proposed real noise synthesis assumes that the sRGB real noise is not only signal-dependent, i.e., noise level partially depends on its underlying clean pixel, but also highly correlated with its neighboring noise realization. Such a real noise model greatly bridges the gap between the synthetic and real noise in sRGB. Furthermore, the synthesized \"real\" images by the proposed NeCA can be used for training supervised deep denoisers, thus tackling the real image denoising challenges, subjective to only a few real training data. The trained deep denoiser using our synthetic noisy images achieves state-of-the-art denoising performance, compared to the popular classic denoisers as well as deep denoisers that are trained on synthetic pairs from other noise models. To sum up, our main contributions can be concluded as follows:", + "bbox": [ + 498, + 613, + 893, + 898 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 807, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Co-first authors contributed equally.", + "bbox": [ + 94, + 849, + 292, + 863 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "†Corresponding author: Bihan Wen.", + "bbox": [ + 96, + 863, + 287, + 875 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "This work was supported in part by the MOE AcRF Tier 1 (RG61/22) and Start-Up Grant.", + "bbox": [ + 78, + 875, + 468, + 900 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1683", + "bbox": [ + 483, + 944, + 514, + 955 + ], + "page_idx": 0 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We introduce a neighboring correlation-aware noise model for sRGB real noise synthesis by explicitly modeling the neighboring correlation of real noise, to bridge the gap between the synthetic and real noise distribution in sRGB.", + "- Our proposed framework shows a well-generalized ability, which is still capable to improve the real image denoising performance even with limited training data.", + "- With the synthetic image pairs generated by NeCA, the trained denoisers achieve state-of-the-art denoising performance compared with the deep denoisers trained with other real noise models." + ], + "bbox": [ + 96, + 90, + 467, + 295 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 76, + 311, + 217, + 325 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1. Raw-RGB Image Noise Synthesis", + "text_level": 1, + "bbox": [ + 76, + 335, + 367, + 351 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Modeling real noise in raw-RGB is challenging as it cannot be simply assumed as Additive White Gaussian Noise (AWGN). Typically, raw-RGB noise models can be classified into two categories: physical-based models and learning-based models. One of the most commonly used physical-based models is the heteroscedastic Gaussian noise [10], which posits noise value, located at pixel $i$ , is dependent on its underlying clean pixel intensity:", + "bbox": [ + 75, + 359, + 467, + 479 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {n} _ {i} \\sim \\mathcal {N} \\left(0, \\sigma_ {s} ^ {2} \\cdot \\boldsymbol {x} _ {i} + \\sigma_ {c} ^ {2}\\right), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 184, + 494, + 467, + 512 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "where $n$ and $x$ are noise and clean image in the raw-RGB domain, while $\\sigma_{s}$ and $\\sigma_{c}$ denote the noise variance term for signal-dependent and signal-independent components. Such a noise model is also known as the noise level function (NLF) as it describes the relationship between the pixelwise noise level and image intensity. To better model the camera sensor noise, recent works [25, 28] have proposed that real noise is a sophisticated combination of shot noise, read noise and row noise, etc.", + "bbox": [ + 75, + 518, + 467, + 652 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Compared to statistical modeling of noise, learning-based models learn the real noise distribution with generative models such as the generative adversarial nets (GANs) [6] and normalization flows [1] from paired noisy-clean images. Although these methods perform well in raw-RGB, they cannot be directly applied to model sRGB real noise since their assumptions are based on the characteristics of raw-RGB noise. For instance, these noise generators synthesize raw-RGB noise from an initialized heteroscedastic Gaussian noise (as described in Equation (1)), which fails to provide an accurate representation of real noise in the sRGB domain [21, 22].", + "bbox": [ + 75, + 655, + 467, + 835 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.2. sRGB Image Noise Synthesis", + "text_level": 1, + "bbox": [ + 76, + 845, + 333, + 861 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The camera ISP pipeline, including demosaicing, tone mapping, white balancing, gamma mapping, etc., makes", + "bbox": [ + 76, + 869, + 467, + 900 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "real noise in the sRGB domain to be more complicated than it is in the raw-RGB domain. To synthesize sRGB real noise, two approaches have been proposed: (1) synthesizing noisy samples in the raw-RGB domain and rendering them into sRGB images by applying the manually defined ISP pipeline [5, 15], and (2) directly synthesizing real noise in the sRGB domain [8, 16, 17, 20, 26].", + "bbox": [ + 496, + 90, + 890, + 196 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In CBDNet [15], heteroscedastic Gaussian noise is added on raw-RGB clean images, and images are converted into sRGB using demosaicing and camera response functions. However, CBDNet requires raw-RGB images, which are not commonly used. To address this issue, unprocessing image (UPI) [5] proposes to de-render sRGB images into raw-RGB images using several predefined unprocessing pipelines. Similar procedures used in CBDNet are then applied to the unprocessed raw-RGB images to obtain their sRGB versions.", + "bbox": [ + 496, + 198, + 890, + 347 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Despite CBDNet and UPI effectively synthesize sRGB real noise, they still require predefined ISP pipelines, which may not match real ones used in different camera sensors. Therefore, generating real noise directly in the sRGB domain with deep generative models [11, 19] is considered an alternative solution. GCBD [8] proposes a GAN-based model that learns noise distributions by training on noise patches that have been cropped from noisy images. However, the synthesized noise is signal-independent as it is generated from random noise. DANet [26] and GRDN [17] use conditional generative networks to synthesize signal-dependent noise, however, few experiments are conducted to demonstrate the effectiveness of the proposed noise generators. C2N [16] attempts to synthesize the real noise with unpaired clean-noisy images, but the generated noise contains artifacts and color-shift problems due to the unpaired training mode. Recently, Kousha et al. [20] propose a conditional flow-based model for sRGB image noise generation that takes clean images, camera types, and ISO levels as input. However, the denoiser, trained with synthetic data, improves marginally compared to the unpaired noise generation method C2N. Unlike previous attempts that model noise with an end-to-end generator, our proposed method explicitly decomposes signal dependency and neighboring correlation of real noise and learns them with separate networks.", + "bbox": [ + 496, + 349, + 890, + 741 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 500, + 760, + 589, + 773 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3.1. Neighboring Correlation-Aware Noise Model", + "text_level": 1, + "bbox": [ + 500, + 785, + 879, + 801 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this section, we present our proposed noise model for sRGB real noise. We begin by introducing the basic noise model, which defines the signal dependency of pixel-wise noise level and its underlying clean pixels. We then discuss discrepancies between noise synthesized by the basic noise model and sRGB real noise and propose to bridge this gap", + "bbox": [ + 496, + 810, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "1684", + "bbox": [ + 483, + 945, + 514, + 955 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "by explicitly modeling noise neighboring correlation on top of the signal dependency.", + "bbox": [ + 75, + 90, + 468, + 119 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Basic Noise Model. Both raw-RGB and sRGB real noise are dependent on the image signal. In raw-RGB, the noise level can be approximated as a simple function of its underlying clean pixel intensity, i.e., heteroscedastic Gaussian noise described in Equation (1). However sRGB real noise is more complex due to camera settings and signal transformations in the ISP pipeline [20-22]. To address this challenge, we propose a noise model that characterizes the signal dependency of sRGB real noise. Specifically, for an sRGB clean image $\\mathbf{x} = (x_{1},\\dots,x_{N})$ and its paired noisy version $\\mathbf{y} = (y_{1},\\dots,y_{N})$ , we define noise level at pixel $i$ as a function of the clean image patch $\\Omega_{\\mathbf{x}}$ , centered at clean pixel $x_{i}$ , and camera ISO level $\\gamma$ :", + "bbox": [ + 75, + 121, + 470, + 316 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {\\sigma} _ {i} = f \\left(\\Omega_ {\\boldsymbol {x}}, \\gamma\\right), \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 217, + 329, + 468, + 345 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $f(\\cdot)$ represents the non-linear relationship of $\\Omega_{\\pmb{x}},\\gamma$ and the pixel-wise noise level $\\sigma_{i} = (\\sigma_{i,r},\\sigma_{i,g},\\sigma_{i,b})$ for three color channels. For the sake of clarity, we omit the location index $i$ in the expression for the local region $\\Omega_{\\pmb{x}}$ . Then the distribution of noise $\\pmb{v}$ at each pixel is modeled as a Gaussian distribution:", + "bbox": [ + 75, + 357, + 468, + 446 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {v} _ {i, c} \\sim \\mathcal {N} (0, \\sigma_ {i, c} ^ {2}), \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 207, + 459, + 468, + 477 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $c$ is the index of RGB channels. We further define the noise level map $\\pmb{m}$ , which has the same size as the clean image and the value at pixel $i$ refers to the noise level $\\sigma_{i}$ . Finally, we can simulate signal-dependent noise as follows:", + "bbox": [ + 75, + 487, + 468, + 550 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {v} = \\boldsymbol {\\epsilon} \\odot \\boldsymbol {m}, \\quad \\epsilon_ {i, c} \\sim \\mathcal {N} (0, 1 ^ {2}). \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 165, + 561, + 468, + 579 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Neighboring Correlation Noise Model. The noise synthesized by the basic noise model still exhibits discrepancies with real noise, as shown in Figure 1(b) and (d). We attribute this gap to the improper noise realization defined in Equation (4), where noise is sampled spatially independently from the basic noise model. Specifically, the most commonly used noise models, including the AWGN, heteroscedastic Gaussian noise, and our basic noise model, assume that the noise distribution is independent at each pixel, and the noise is sampled from the noise distribution without considering its neighboring synthesized noise. However, this noise realization method is inadequate to synthesize RGB real noise as the noise value is assumed to be highly correlated with its neighboring noise values due to the influence of the ISP pipeline such as demosaicing, which introduces neighboring operations. We refer to this characteristic of noise as neighboring correlation and define a neighboring correlation operator $g(\\cdot)$ that maps such the correlation onto the synthesized signal-dependent noise $v$ :", + "bbox": [ + 75, + 585, + 470, + 875 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {n} _ {i} = g \\left(\\Omega_ {\\boldsymbol {v}}\\right), \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 227, + 885, + 468, + 901 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/2b5db907cea09a82760c07c93a4c77d9c48573121172ee500c1fb92c0698dd7c.jpg", + "image_caption": [ + "(a) Clean" + ], + "image_footnote": [], + "bbox": [ + 513, + 90, + 686, + 170 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/9a3eb8d5e8d9a04de9fb6a392ea24c537d2b7145ddad57be520b76b8c6b79296.jpg", + "image_caption": [ + "(b) SDNU Noise" + ], + "image_footnote": [], + "bbox": [ + 691, + 90, + 864, + 170 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/5fccf73333b805713134bd0baec7e835e7b71f49e853a79f7fd20b1507e305a9.jpg", + "image_caption": [ + "(c) SDNC Noise" + ], + "image_footnote": [], + "bbox": [ + 511, + 191, + 686, + 272 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/a8807c5a9d0283918fd60de1f86bd60930c0c63cb2e3ada6e796305c537c640b.jpg", + "image_caption": [ + "(d) Real", + "Figure 1. The visualization of modeling signal dependency and neighboring correlation of sRGB real noise. (a) Clean image. (b) Synthetic signal-dependent and neighboring uncorrelated (SDNU) noise. (c) Synthetic signal-dependent and neighboring correlated (SDNC) noise. (d) Real noise. We add a constant value to the noise maps for better visualizing the signal dependency." + ], + "image_footnote": [], + "bbox": [ + 691, + 191, + 864, + 272 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $n$ is the neighboring correlated noise and $\\Omega_{\\mathbf{v}}$ is the local patch of $\\mathbf{v}$ , centered at pixel $i$ . By processing the neighboring uncorrelated noise $\\mathbf{v}$ with the neighboring correlation operator, which is learned by our proposed noise synthesizing framework in Section 3.2, the final generated noise performs similar characteristics to real noise, as demonstrated in Figure 1(c) and (d). For the purpose of clarity, we use SDNU noise to refer to the intermediate synthesized signal-dependent and neighboring uncorrelated noise $\\mathbf{v}$ , and SDNC noise to refer to the final generated signal-dependent and neighboring correlated noise $\\mathbf{n}$ . In the following sections, we will introduce the proposed noise synthesizing framework to explicitly learn the neighboring correlation and signal dependency of noise.", + "bbox": [ + 496, + 391, + 892, + 604 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. Noise Synthesizing Framework", + "text_level": 1, + "bbox": [ + 500, + 617, + 774, + 633 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Given paired sRGB real-world noisy and clean images $(\\pmb{y},\\pmb{x})$ , where $\\pmb{y} = \\pmb{x} + \\pmb{n}$ , our proposed framework aims to learn the neighboring correlation-aware noise model using paired data. Our proposed framework, as illustrated in Figure 2, comprises three networks: a gain estimation network (GENet), a noise-level prediction network (NPNet), and a neighboring correlation network (NCNet). GENet estimates the gain factor from a noisy image, which serves to amplify the synthesized noise, similar to the ISO level. NPNet synthesizes the SDNU noise by incorporating the estimated gain factor and the clean image as inputs. Finally, NCNet explicitly models the neighboring correlation of sRGB real noise and generates the SDNC noise.", + "bbox": [ + 496, + 642, + 890, + 838 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Gain Estimation Network. The gain estimation network (GENet) is designed to estimate the gain factor from a noisy image $\\pmb{y}$ , which serves as guidance to control the overall magnitude of the synthesized noise. The gain factor is de", + "bbox": [ + 498, + 839, + 890, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "1685", + "bbox": [ + 483, + 944, + 514, + 955 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/483648faafa1e16ca21cecab943ac122dbf1a884d756eea629e328fddeb735d3.jpg", + "image_caption": [ + "Noise-level prediction network", + "Figure 2. The proposed noise synthesizing framework. Our NeCA contains three networks including the gain estimation network (GENet), noise-level prediction network (NPNet), and neighboring correlation network (NCNet). PD denotes the Pixel-shuffle Down-sampling scheme introduced in [29]. Local noise level estimation and global noise level estimation operations are formulated in Equation (13) and (6). The details of the network architecture and PD scheme are described in the supplementary material." + ], + "image_footnote": [], + "bbox": [ + 81, + 109, + 890, + 340 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "fined as the global noise level of the noisy image, which is the standard deviation calculated by every noise value in its noise $\\pmb{n}$ :", + "bbox": [ + 75, + 429, + 468, + 472 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\beta = \\sqrt {\\frac {1}{N} \\sum_ {i , c} \\left(\\boldsymbol {n} _ {i , c} - \\bar {n}\\right) ^ {2}}, \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 179, + 472, + 468, + 512 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\beta$ is the defined global noise level of the noisy image $\\pmb{y}$ , $\\bar{n}$ is the mean of the noise $\\pmb{n}$ , and $N$ is the total number of pixels in the noisy image. However, during testing, the calculated gain factor is unavailable. To solve this, we aim to estimate the gain factor from the noisy image using GENet:", + "bbox": [ + 75, + 517, + 468, + 604 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\beta} = E (\\boldsymbol {y}), \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 233, + 606, + 468, + 625 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $E$ represents the GENet, and $\\hat{\\beta}$ is the estimated gain factor by GENet, which is expected to be as close as the global noise level of the noisy image. The main reason to use the gain factor estimated from the noisy image rather than the ISO level is driven by a crucial factor. ISO levels are typically saved in the metadata of images. The requirement of metadata will limit the application of our noise synthesizing framework.", + "bbox": [ + 75, + 632, + 468, + 752 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Noise-level Prediction Network. The noise-level prediction network (NPNet) learns a parametric model for the noise distribution defined in Equation (3). To achieve this, NPNet predicts the pixel-wise noise level $\\hat{\\sigma}_i$ using the clean local patch $\\Omega_{\\pmb{x}}$ and estimated gain factor $\\hat{\\beta}$ :", + "bbox": [ + 75, + 753, + 468, + 829 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\boldsymbol {\\sigma}} _ {i} = G _ {1} \\left(\\Omega_ {\\boldsymbol {x}}, \\hat {\\beta}\\right), \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 212, + 840, + 468, + 858 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $G_{1}$ denotes the NPNet, which has three output channels to predict noise levels for each pixel. To effectively", + "bbox": [ + 75, + 869, + 468, + 901 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "incorporate the gain factor into the NPNet, we first apply the layer normalization [4] to the feature map of convolution and then multiply the normalized feature map by the gain factor. In practice, NPNet directly outputs the predicted noise level map $\\hat{m}$ by utilizing a clean image and gain factor:", + "bbox": [ + 498, + 429, + 890, + 518 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\boldsymbol {m}} = G _ {1} (\\boldsymbol {x}, \\hat {\\beta}). \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 640, + 518, + 890, + 535 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Once the noise level map $\\hat{m}$ is obtained, the SDNU noise $\\hat{v}$ can be synthesized by using the sampling trick defined in Equation (4).", + "bbox": [ + 498, + 540, + 890, + 585 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Neighboring Correlation Network. The neighboring correlation network (NCNet) performs as the neighboring correlation operator, described in Equation (5). By taking the noise value and its neighboring noise realization as input, NCNet generates the SDNC noise $\\hat{n}$ :", + "bbox": [ + 498, + 585, + 890, + 660 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\boldsymbol {n}} _ {i} = G _ {2} \\left(\\Omega_ {\\hat {\\boldsymbol {v}}}\\right), \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 643, + 669, + 890, + 686 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\Omega_{\\hat{v}}$ is the noise patch of $\\hat{v}$ located at pixel $i$ and $G_{2}$ denotes the NCNet. The SDNC noise can be directly generated by taking the SDNU noise into the network:", + "bbox": [ + 498, + 694, + 890, + 739 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\boldsymbol {n}} = G _ {2} (\\hat {\\boldsymbol {v}}). \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 651, + 748, + 890, + 763 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3. Loss Functions", + "text_level": 1, + "bbox": [ + 500, + 771, + 651, + 786 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To jointly train the proposed networks, five loss functions are introduced: (1) standard deviation losses $\\mathcal{L}_{std1}$ and $\\mathcal{L}_{std2}$ , (2) adversarial losses $\\mathcal{L}_{adv1}$ and $\\mathcal{L}_{adv2}$ , (3) the regularization loss $\\mathcal{L}_{reg}$ . The details of these loss functions will be introduced later.", + "bbox": [ + 498, + 794, + 890, + 868 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Standard Deviation Loss. We introduce $\\mathcal{L}_{std1}$ to enforce the estimated gain factor $\\hat{\\beta}$ by GENet to be close to the", + "bbox": [ + 498, + 869, + 890, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "1686", + "bbox": [ + 483, + 944, + 514, + 955 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "global noise level $\\beta$ of the noisy image, which is defined as follows:", + "bbox": [ + 75, + 90, + 468, + 119 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {s t d 1} = \\mathbb {E} _ {\\boldsymbol {y}} \\left[ (\\hat {\\beta} - \\beta) ^ {2} \\right], \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 189, + 119, + 468, + 138 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\beta$ and $\\hat{\\beta}$ are obtained by Equation (6) and (7).", + "bbox": [ + 76, + 143, + 421, + 159 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The objective of NPNet is to predict the noise level map $\\hat{m}$ by taking the clean image and gain factor as input. However, since the groundtruth noise level map is not available, we propose to use a simple local noise level estimation method to approximate the noise level map $m$ from the noise, which is calculated as follows:", + "bbox": [ + 75, + 159, + 468, + 250 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {m} _ {i, c} = \\sqrt {\\mathcal {M F} \\left(\\Omega_ {n} ^ {2}\\right) - \\mathcal {M F} ^ {2} \\left(\\Omega_ {n}\\right)}, \\tag {13}\n$$\n", + "text_format": "latex", + "bbox": [ + 150, + 257, + 468, + 282 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\Omega_{n}$ denotes the $7\\times 7$ noise patch located at pixel $i$ , channel $c$ of noise map $\\pmb{n}$ , and $\\mathcal{MF}(\\cdot)$ represents the mean filter. Then the $\\mathcal{L}_{std2}$ is defined as follows:", + "bbox": [ + 76, + 287, + 468, + 333 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {s t d 2} = \\mathbb {E} _ {\\boldsymbol {x}, \\boldsymbol {y}} \\left[ \\left| | \\hat {\\boldsymbol {m}} - \\boldsymbol {m} \\right| \\right| _ {2} ^ {2} ]. \\tag {14}\n$$\n", + "text_format": "latex", + "bbox": [ + 174, + 339, + 468, + 359 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Adversarial Loss. In order to guarantee that the generated noise shares the similar distribution with real noise, we introduce two adversarial losses. Our first adversarial loss $\\mathcal{L}_{adv1}$ is imposed between the final synthetic SDNC noise and real noise $(\\hat{n}, n)$ to enforce the highly neighboring correlation in the generated noise, similar to that of the real noise. Our second adversarial loss $\\mathcal{L}_{adv2}$ is calculated by using Pixel-shuffle Down-sampling [29] versions of synthesized intermediate noise $\\hat{v}$ and real noise $n$ . Specifically, $\\mathcal{L}_{adv2}$ servers as a complementary loss for $\\mathcal{L}_{std2}$ because estimating the approximate noise level map using Equation (13) may not be reliable, as this method struggles to differentiate between noise originating from different intensities. However, directly calculating the adversarial loss between noise $\\hat{v}$ and $n$ is unreasonable since $\\hat{v}$ is neighboring uncorrelated. To address this problem, we utilize the Pixel-shuffle Down-sampling (PD) scheme proposed in [29] to obtain down-sampled versions $((\\hat{v})_{\\downarrow s}, (n)_{\\downarrow s})$ of both synthetic noise $\\hat{v}$ and real noise $n$ . Here $\\downarrow_s$ denotes the PD operation with a stride of $s$ (in this paper, $s$ is set to 3). According to [29], the neighboring correlation in the PD real noise $(n)_{\\downarrow s}$ will be greatly attenuated. This allows us to calculate the adversarial loss between the two down-sampled versions. We utilize WGAN-GP [13] to compute adversarial losses, while $\\mathcal{L}_{adv1}$ is defined as follows:", + "bbox": [ + 75, + 366, + 468, + 743 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {a d v 1} = - \\mathbb {E} _ {\\hat {\\boldsymbol {n}}} \\left[ D _ {1} (\\hat {\\boldsymbol {n}}) \\right], \\tag {15}\n$$\n", + "text_format": "latex", + "bbox": [ + 189, + 752, + 468, + 768 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $D_{1}$ is the discriminator for NCNet, which scores the realness of synthesized noise. Similarly, $\\mathcal{L}_{adv2}$ is computed as follows:", + "bbox": [ + 75, + 776, + 468, + 821 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {a d v 2} = - \\mathbb {E} _ {(\\hat {\\boldsymbol {v}}) \\downarrow_ {s}} \\left[ D _ {2} \\left((\\hat {\\boldsymbol {v}}) \\downarrow_ {s}\\right) \\right], \\tag {16}\n$$\n", + "text_format": "latex", + "bbox": [ + 163, + 830, + 468, + 848 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $D_{2}$ is the discriminator for NPNet. More detail about the PD scheme and the discriminator losses will be discussed in the supplementary material.", + "bbox": [ + 75, + 854, + 468, + 900 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/4f05c02adcff079c93c835184027659623560bf60716b415777050469c80ba15.jpg", + "image_caption": [ + "Figure 3. The designed two inference versions. NeCA-W utilizes the whole framework to synthesize SDNC noise. NeCA-S only adopts NCNet to synthesize signal-independent neighboring correlated (SINC) noise by taking the AWGN as input." + ], + "image_footnote": [], + "bbox": [ + 504, + 89, + 890, + 247 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Regularization Loss. Besides the losses mentioned above, a regularization loss $\\mathcal{L}_{reg}$ is utilized to stabilize training. It is imposed between the estimated gain factor $\\hat{\\beta}$ and the predicted noise level map $\\hat{m}$ :", + "bbox": [ + 498, + 335, + 890, + 398 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {r e g} = \\mathbb {E} _ {\\boldsymbol {x}, \\boldsymbol {y}} \\left[ \\| \\boldsymbol {w} \\| _ {2} ^ {2} \\right], \\tag {17}\n$$\n", + "text_format": "latex", + "bbox": [ + 620, + 407, + 890, + 426 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\pmb{w}_{i,c} = \\hat{\\pmb{m}}_{i,c} - \\hat{\\beta}$", + "bbox": [ + 500, + 436, + 666, + 453 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Finally, the full loss functions of the framework are described as follows:", + "bbox": [ + 498, + 453, + 890, + 482 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = \\mathcal {L} _ {\\text {r e g}} + \\lambda_ {1} \\mathcal {L} _ {\\text {a d v 1}} + \\lambda_ {2} \\mathcal {L} _ {\\text {a d v 2}} + \\lambda_ {3} \\mathcal {L} _ {\\text {s t d 1}} + \\lambda_ {4} \\mathcal {L} _ {\\text {s t d 2}}, \\tag {18}\n$$\n", + "text_format": "latex", + "bbox": [ + 506, + 494, + 890, + 511 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\lambda_1, \\lambda_2, \\lambda_3$ and $\\lambda_4$ are hyperparameters to balance the importance between different losses.", + "bbox": [ + 498, + 520, + 890, + 550 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.4. Inference Stage", + "text_level": 1, + "bbox": [ + 500, + 560, + 655, + 575 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We provide two inference versions to generate noise, as illustrated in Figure 3: (1) NeCA-W applies the entire framework to synthesize \"real\" noise. It first estimates the gain factor from an arbitrary noisy image and synthesizes noise by conditioning on a clean image and the estimated gain factor. (2) NeCA-S is the simplified version of NeCA-W which uses only NCNet for inference. In this method, AWGN is synthesized and then NCNet maps it with the neighboring correlation. We refer to this synthetic noise as signal-independent neighboring correlated (SINC) noise. Notably, NeCA-S still enhances the performance of deep denoiser on real noise, even though the denoiser is trained using the synthesized SINC noise. In the meantime, this inference version only requires minimal data to train the NCNet, which we will demonstrate in our experiments.", + "bbox": [ + 498, + 583, + 890, + 809 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 500, + 821, + 632, + 838 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Experimental Setup", + "text_level": 1, + "bbox": [ + 500, + 847, + 689, + 863 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To assess the effectiveness of our proposed noise synthesizing framework, we conduct experiments in two parts:", + "bbox": [ + 500, + 869, + 890, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "1687", + "bbox": [ + 483, + 944, + 514, + 955 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "First, we assess the quality of the generated noise. Second, we examine the performance of NeCA on the downstream image denoising task. The details of the experiments will be discussed in the following subsections.", + "bbox": [ + 75, + 90, + 467, + 150 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Dataset. We evaluate our NeCA on the medium version of Smartphone Image Denoising Dataset (SIDD) [2], which comprises 320 noisy-clean image pairs captured by five different smartphone cameras, including Samsung Galaxy S6 Edge (S6), iPhone 7 (IP), Google Pixel (GP), Motorola Nexus 6 (N6), and LG G4 (G4). These images are collected in ten different scenes with varying ISO levels and lighting conditions. The SIDD provides both raw-RGB and sRGB images, with the sRGB version obtained by rendering the captured raw-RGB images through the manually defined ISP pipeline provided in [2]. In our experiments, we use the sRGB version to evaluate the proposed method.", + "bbox": [ + 75, + 152, + 467, + 332 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Metrics. We evaluate the performance of NeCA using three metrics: Discrete Kullback-Leibler (KL) divergence, Signal-to-Noise Ratio (PSNR) and Structural Similarity (SSIM) [24]. The KL divergence is used to measure the similarity of histograms between real noise and generated noise. The histogram range is set from $-0.1$ to 0.1 with 64 intervals. The PSNR and SSIM are used to evaluate the performance of deep denoisers. A higher PSNR and SSIM reflect better denoising performance, while a smaller KL divergence represents better noise synthesizing quality.", + "bbox": [ + 75, + 334, + 467, + 484 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Implementation Details. All the networks are optimized using Adam optimizer [18] with a batch size of 32. Images are cropped to a size of $96 \\times 96$ pixels for training. For noise generation, we train individual networks for 300 epochs with the learning rate of $10^{-4}$ . For denoising, we select the DnCNN [27] as the default deep denoiser for comparison and train it for 300 epochs with the learning rate of $10^{-3}$ . The $\\lambda_{1}, \\lambda_{2}, \\lambda_{3}$ and $\\lambda_{4}$ in the loss functions are set to 0.1, 0.1, 50, 10 respectively.", + "bbox": [ + 75, + 486, + 467, + 621 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Noise Synthesis on SIDD", + "text_level": 1, + "bbox": [ + 76, + 633, + 302, + 648 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Compared Baselines. We compare NeCA with several noise models, including Additive White Gaussian Noise (AWGN), C2N [16], and the NLF (described in Equation (1)). To synthesize AWGN, we estimate the noise level from each noisy image by applying a noise estimation method introduced in [7] and add it to its corresponding clean image. To synthesize noise using the C2N, we directly utilize the pretrained model provided by the authors. For the NLF, we synthesize heteroscedastic Gaussian noise on the raw-RGB clean images from SIDD, where the signal-dependent term $\\sigma_s^2$ and signal-independent term $\\sigma_c^2$ are obtained from the metadata provided by SIDD. We then apply the same ISP pipeline as used in the SIDD to render them to sRGB. We refer to this model as NLF-ISP for simplicity.", + "bbox": [ + 75, + 657, + 467, + 868 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Preparation. We evaluate the results of generated noise on each camera in SIDD, where $80\\%$ of image pairs are allo", + "bbox": [ + 75, + 869, + 467, + 898 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/bc2a396d6882e1a32e2f1ff93c4ed58fee080fdee2b2fbba7eb630e92807cf32.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
CameraMetricsAWGNC2N [16]NeCANLF-ISPReal
G4KL1.97550.16600.02420.0102-
PSNR28.1537.8138.8538.5140.60
GPKL1.83510.13150.04320.0126-
PSNR28.4537.0837.7237.7438.33
IPKL1.85620.05810.04100.0475-
PSNR28.0139.1239.4639.5339.45
N6KL2.14650.35240.02060.0063-
PSNR26.3133.5935.5434.8435.56
S6KL0.45170.45170.03020.0902-
PSNR27.2233.1835.5635.9936.85
AverageKL2.00620.21290.03420.0414-
PSNR27.9036.3737.5837.5938.27
", + "bbox": [ + 501, + 88, + 890, + 260 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 1. Quantitative results of synthetic noise. The results are computed on the validation sets of five SIDD cameras with KL divergence and PSNR (dB). The best results are highlighted in bold.", + "bbox": [ + 498, + 271, + 890, + 311 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "cated for training the noise synthesizing framework, while the rest $20\\%$ are reserved for validation. The quality of the synthesized noise was evaluated using two metrics: KL divergence and PSNR. We calculate the KL divergence between the histograms of ground truth noise in the validation set and the noise synthesized by NeCA with clean images and corresponding gain factors from the validation set. Notably, the gain factors used for evaluation are estimated by GENet from the noisy images paired with the clean images, as they cannot be set to random values for evaluation. Besides, we also use the PSNR to further evaluate the quality of synthesized noisy images. We train the DnCNN with the synthesized noisy-clean image pairs on the training set and apply it to denoise the noisy images from the validation set. We calculate the PSNR between the denoised images and corresponding clean images to evaluate the denoising performance. In order to maintain consistency between the training and validation sets, we ensure that both sets contain the same set of ISO levels.", + "bbox": [ + 496, + 347, + 890, + 633 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Noise Synthesis Results. Table 1 shows the KL divergence and PSNR results computed on validation sets of five devices. For the results of average KL divergence over all five cameras, our method exhibits the best performance among all noise models. Additionally, our method lags slightly behind NLF-ISP by $0.01\\mathrm{dB}$ on the average PSNR. It is worth noting that noise samples generated by NLF-ISP are first synthesized in the raw-RGB domain and then rendered to sRGB using the same ISP pipelines as in SIDD, suggesting the minimal discrepancies between noise samples from NLF-ISP and real data. The similar results on each camera between NLF-ISP and our NeCA model demonstrate the promising performance of the proposed model. Figure 4 shows generated noise maps from compared methods. Remarkable visual similarities observed between generated noise maps and real noise maps indicate that our framework is capable to synthesize realistic noise.", + "bbox": [ + 496, + 643, + 890, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "1688", + "bbox": [ + 483, + 944, + 514, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/8c9671b6b6592f1848853eeac1bfc697e649c4ce13775fddf19081826b9bf377.jpg", + "image_caption": [ + "Figure 4. Visualization of synthetic noise samples under different ISO-lighting conditions on SIDD [2]. The displayed images, from left to right, correspond to clean image, C2N, Our method, NLF-ISP and real noisy image." + ], + "image_footnote": [], + "bbox": [ + 80, + 87, + 468, + 597 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3. Applications on Real Image Denoising", + "text_level": 1, + "bbox": [ + 76, + 690, + 405, + 708 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Compared Baselines. Various noise generation methods are evaluated to demonstrate the effectiveness of these baselines performed on the downstream real image denoising task, including GCBD [8], C2N [16], Flow-sRGB [20], NeCA-S and NeCA-W. When assessing denoising performance, classical denoisers such as BM3D [9] and WNNM [12] are also included in the experiments.", + "bbox": [ + 75, + 715, + 468, + 821 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Preparation. We establish the synthetic SIDD where clean images are from the original SIDD and noisy images are synthesized by using NeCA-W and NeCA-S. Specifically, the proposed framework is trained on the entire SIDD for each camera and the whole framework (NeCA-W) is used to", + "bbox": [ + 75, + 824, + 468, + 898 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/c6f33c507302525de1289666ba278ab512610d6fd5630cf7205adb7f1b428470.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodSIDDDND
PSNR(dB)SSIMPSNR(dB)SSIM
BM3D [9]25.650.68534.510.851
WNNM [12]25.780.80934.670.865
GCBD [8]--35.580.922
C2N* [16]33.760.90136.080.903
Flow-sRGB* [20]34.740.912--
NeCA-S*36.100.92736.960.938
NeCA-W*36.820.93237.530.940
Real*37.120.93437.890.942
", + "bbox": [ + 501, + 87, + 893, + 227 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 2. Quantitative evaluation of denoising performance on SIDD and DND benchmark. * denotes the DnCNN denoiser is trained on either the synthetic or real image pairs with the SIDD. (red: the best result, blue: the second best)", + "bbox": [ + 498, + 232, + 890, + 287 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "generate noise for each clean image from the SIDD, where the gain factor is estimated from its paired noisy image. On the other hand, We train NeCA with a few paired images, e.g., three image pairs with varying ISO levels (800, 1600, 3200) from camera N6 and use only NCNet (NeCA-S) to generate signal-independent neighboring correlated (SINC) noise for clean images from SIDD, as seen in Figure 3. The synthesized SINC noise is added to the clean image. For each clean image, the noise level of AWGN is randomly selected from a range of [0, 75]. Our experiments with NeCA-S aim to demonstrate the advantages of explicitly modeling the neighboring correlation of real noise. Other sRGB real noise generation baselines, including C2N [16] and Flow-sRGB [20], also follow the same experimental settings with NeCA-W. With the synthetic noisy-clean image pairs, we train the DnCNN on either synthetic or real pairs of SIDD. Then the denoising performances are evaluated on both the SIDD and DND [23] benchmarks.", + "bbox": [ + 496, + 308, + 892, + 580 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Results and Discussions. Table 2 shows the denoising results of the compared denoisers. Obviously, DnCNN trained on the synthetic samples from NeCA-W, achieves the best results among all compared methods in terms of both PSNR and SSIM. Specifically, NeCA-W gets 2.08 dB gains from Flow-sRGB on the SIDD benchmark, where Flow-sRGB is an end-to-end flow model which implicitly synthesizes real noise. The improvement of denoising performance obtained by NeCA-W indicates the accuracy of our noise model. Moreover, even though the denoising performance of NeCA-W still does not surpass the denoiser trained on the real data, the slight PSNR and SSIM discrepancies between them suggest our model does shrink this gap. Furthermore, the most impressive thing is that NeCA-S still achieves comparable denoising results on both the SIDD and DND benchmarks, outperforming the Flow-sRGB by a large margin. Note that the synthetic noise from NeCA-S is signal-independent. The superior performance of NeCA-S further verifies explicitly modeling neighboring correlation benefits the sRGB real noise synthesis.", + "bbox": [ + 496, + 580, + 892, + 883 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Figure 6 and 7 show the denoised images from the SIDD", + "bbox": [ + 517, + 885, + 890, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "1689", + "bbox": [ + 483, + 944, + 514, + 955 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/6d68097a8c3c6eb4f4bea8fea82bcfca1935609ca28c61b4a94b06f9b258630e.jpg", + "image_caption": [ + "(a) Clean" + ], + "image_footnote": [], + "bbox": [ + 130, + 88, + 248, + 179 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/ef022acb930b4a1c30e1c88d9e9216049df18d68f066d87dc583f88bf776d432.jpg", + "image_caption": [ + "(b) $\\hat{\\beta} = 0.02$" + ], + "image_footnote": [], + "bbox": [ + 250, + 88, + 364, + 178 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/4739bb193f2c932858b2f22493569e60fdcd48759bdf30c48bc919404879c02f.jpg", + "image_caption": [ + "(c) $\\hat{\\beta} = 0.06$" + ], + "image_footnote": [], + "bbox": [ + 367, + 88, + 480, + 178 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/f45d3d6be20a3c36a584d69ecb19eb7d7c5fc047b1e283311d8c8140c3f1a9c1.jpg", + "image_caption": [ + "(d) $\\hat{\\beta} = 0.10$" + ], + "image_footnote": [], + "bbox": [ + 480, + 88, + 599, + 178 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/516ec8201b7b54e84f809303b973dd23781f225f6e0b96343afa85a9d8f861d0.jpg", + "image_caption": [ + "(e) $\\hat{\\beta} = 0.14$" + ], + "image_footnote": [], + "bbox": [ + 602, + 88, + 717, + 178 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/5e2617478418d058fbb049cac1aeefec9b17fe721f91ce0e60702bc087e96e91.jpg", + "image_caption": [ + "(f) $\\hat{\\beta} = 0.18$" + ], + "image_footnote": [], + "bbox": [ + 718, + 88, + 834, + 178 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/fa67a832ba461681aa6a61cccc56f71e3221a747a2fbddd6be88b08e53ddd654.jpg", + "image_caption": [ + "Figure 5. Results of controllable noise synthesis. The gain factor ranges from 0.02 to 0.18 with intervals of 0.04." + ], + "image_footnote": [], + "bbox": [ + 94, + 226, + 212, + 318 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/9c7c9f87e5ad8ed1f80c19f1b9e660e28340038f249e2e227313542ecd9f045f.jpg", + "image_caption": [ + "(b) C2N [16]" + ], + "image_footnote": [], + "bbox": [ + 215, + 227, + 333, + 319 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/ff1f381eba36e6da51584c2580c47cacf2b4663ec63269c1d346aeba13b0aa95.jpg", + "image_caption": [ + "(c) NeCA-S" + ], + "image_footnote": [], + "bbox": [ + 334, + 227, + 452, + 319 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/fc607dac803c972cc879a21ba7b44774f6765dae48a0f9325f15b88ce4aa5c09.jpg", + "image_caption": [ + "(a) Noisy", + "(d) NeCA-W" + ], + "image_footnote": [], + "bbox": [ + 93, + 337, + 210, + 426 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/6c8aa11b4bc9fc744a83fadc5dc423828ba5fc1adeef17fda07ba3640fb3d1fb.jpg", + "image_caption": [ + "(e) Real" + ], + "image_footnote": [], + "bbox": [ + 212, + 337, + 330, + 426 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/ce2c034828c1d63c9e5c67690dbd8e354f2b7dc6f7b9ba3f07ad6df88ae5142f.jpg", + "image_caption": [ + "(f) Clean" + ], + "image_footnote": [], + "bbox": [ + 331, + 337, + 450, + 426 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/baf7ebbe63e91d30aca03f798a85425fa8b68b3ac7d02a9c436d07795d21e863.jpg", + "image_caption": [ + "Figure 6. Denoising results on the SIDD dataset. DnCNN denoisers are trained on the noisy images from (b) C2N, (c, d) our models, and (e) real noisy images of the SIDD." + ], + "image_footnote": [], + "bbox": [ + 94, + 510, + 212, + 604 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/577c34bc1dec9e4b436b76e0a1491243a99cd25377208c4b2be0e650bb31445d.jpg", + "image_caption": [ + "(b) BM3D [9]" + ], + "image_footnote": [], + "bbox": [ + 215, + 511, + 333, + 604 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/62e07428b834f79a4ad27c96cdb99a1aed99296e4fab164e30f267674c1fa872.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 333, + 511, + 452, + 604 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/58d081b82dada00d053c63984181cb25b957486f15ac2262a88eb15362771af2.jpg", + "image_caption": [ + "(a) Noisy", + "(d) NeCA-S", + "Figure 7. Denoising results on the DND dataset. DnCNN denoisers are trained on the noisy images from (c) C2N, (d, e) our models, and (f) real noisy images of the SIDD." + ], + "image_footnote": [], + "bbox": [ + 93, + 621, + 210, + 712 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/d1c13e763586c1a69a63b058e60ada9357f40f635041fc934f4abcf5bead0f79.jpg", + "image_caption": [ + "(e) NeCA-W" + ], + "image_footnote": [], + "bbox": [ + 212, + 621, + 331, + 712 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/93b7aa1f1696eb8d6c4b2fe2410bb65e822829a37b82b2bc7285b638a8dc0924.jpg", + "image_caption": [ + "(c) C2N [16]", + "(f) Real" + ], + "image_footnote": [], + "bbox": [ + 331, + 621, + 450, + 712 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "and DND datasets. The results indicate that the denoisers trained on the synthetic image pairs from NeCA-W and NeCA-S achieve similar denoising results compared to the denoiser trained on real image pairs. In contrast, the denoiser trained on noisy samples from C2N, which employs an unpaired training scheme, fails to suppress the noise effectively, partly due to its unpaired train scheme.", + "bbox": [ + 76, + 794, + 468, + 900 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/e716acd9cb0bc4cc37d5ec50e0f7c9fb752b205e82814c55189d7ebc53af1287.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Lossw/o Lstd2w/o Ladv1w/o Lregall
KL0.0520.0480.1080.041
", + "bbox": [ + 539, + 226, + 849, + 267 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 3. Ablation study on the effectiveness of different loss functions. We train the framework on the training set of camera IP and calculate KL divergence on its validation set.", + "bbox": [ + 500, + 277, + 890, + 319 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.4.Customized Generation", + "text_level": 1, + "bbox": [ + 500, + 337, + 718, + 351 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Our proposed noise synthesizing framework allows for controlling the generated noise with multiple noise levels by manipulating the gain factors. Figure 5 illustrates the controllable synthesizing results, which are generated by varying the gain factor within the range of 0.02 to 0.18 with intervals of 0.04. The results demonstrate that an increase in the gain factor value leads to a proportional increase in the magnitude of the generated noise.", + "bbox": [ + 496, + 361, + 890, + 483 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.5. Ablation Study", + "text_level": 1, + "bbox": [ + 500, + 496, + 651, + 512 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this section, we conduct ablation studies to verify the effectiveness of individual loss functions in our framework, including $\\mathcal{L}_{std2}$ , $\\mathcal{L}_{adv2}$ and $\\mathcal{L}_{reg}$ . We exclude $\\mathcal{L}_{std1}$ and $\\mathcal{L}_{adv1}$ from evaluation since they are indispensable for framework training. As indicated in Table 3, the model achieves optimal performance in KL divergence with complete loss functions, suggesting all the components contribute to the final synthetic noise. However, removing $\\mathcal{L}_{reg}$ significantly reduces the KL divergence, suggesting the importance of stabilizing the training process. Moreover, both $\\mathcal{L}_{adv2}$ and $\\mathcal{L}_{std2}$ improve the quality of synthetic noise, supporting our claim that $\\mathcal{L}_{adv2}$ serves as a complementary loss for $\\mathcal{L}_{std2}$ , enabling the NPNet to predict more accurate noise levels.", + "bbox": [ + 496, + 520, + 890, + 731 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 750, + 617, + 763 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this paper, we propose a neighboring correlation-aware noise model for sRGB real noise generation. Our proposed method effectively bridges the gap between synthetic noise and real noise by explicitly modeling the signal dependency and neighboring correlation of real noise. The experimental results demonstrate the proposed noise model achieves superior performance on both real noise synthesis and downstream real image denoising tasks.", + "bbox": [ + 496, + 773, + 890, + 898 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "1690", + "bbox": [ + 483, + 944, + 514, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Abdelrahman Abdelhamed, Marcus A Brubaker, and Michael S Brown. Noise flow: Noise modeling with conditional normalizing flows. In ICCV, 2019. 1, 2", + "[2] Abdelrahman Abdelhamed, Stephen Lin, and Michael S Brown. A high-quality denoising dataset for smartphone cameras. In CVPR, 2018. 6, 7", + "[3] Saeed Anwar and Nick Barnes. Real image denoising with feature attention. In ICCV, 2019. 1", + "[4] Jimmy Lei Ba, Jamie Ryan Kiros, and Geoffrey E Hinton. Layer normalization. arXiv preprint arXiv:1607.06450, 2016. 4", + "[5] Tim Brooks, Ben Mildenhall, Tianfan Xue, Jiawen Chen, Dillon Sharlet, and Jonathan T Barron. Unprocessing images for learned raw denoising. In CVPR, 2019. 1, 2", + "[6] Ke-Chi Chang, Ren Wang, Hung-Jin Lin, Yu-Lun Liu, Chia-Ping Chen, Yu-Lin Chang, and Hwann-Tzong Chen. Learning camera-aware noise models. In ECCV, 2020. 1, 2", + "[7] Guangyong Chen, Fengyuan Zhu, and Pheng Ann Heng. An efficient statistical method for image noise level estimation. In ICCV, 2015. 6", + "[8] Jingwen Chen, Jiawei Chen, Hongyang Chao, and Ming Yang. Image blind denoising with generative adversarial network based noise modeling. In CVPR, 2018. 2, 7", + "[9] Kostadin Dabov, Alessandro Foi, Vladimir Katkovnik, and Karen Egiazarian. Image denoising by sparse 3-d transform-domain collaborative filtering. TIP, 2007. 7, 8", + "[10] Alessandro Foi. Clipped noisy images: Heteroskedastic modeling and practical denoising. Signal Processing, 2009. 2", + "[11] Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial networks. Communications of the ACM, 2020. 2", + "[12] Shuhang Gu, Lei Zhang, Wangmeng Zuo, and Xiangchu Feng. Weighted nuclear norm minimization with application to image denoising. In CVPR, 2014. 7", + "[13] Ishaan Gulrajani, Faruk Ahmed, Martin Arjovsky, Vincent Dumoulin, and Aaron C Courville. Improved training of wasserstein gans. NIPS, 2017. 5", + "[14] Lanqing Guo, Siyu Huang, Haosen Liu, and Bihan Wen. Fino: Flow-based joint image and noise model. arXiv preprint arXiv:2111.06031, 2021. 1", + "[15] Shi Guo, Zifei Yan, Kai Zhang, Wangmeng Zuo, and Lei Zhang. Toward convolutional blind denoising of real photographs. In CVPR, 2019. 1, 2", + "[16] Geonwoon Jang, Wooseok Lee, Sanghyun Son, and Kyoung Mu Lee. C2n: Practical generative noise modeling for real-world denoising. In ICCV, 2021. 1, 2, 6, 7, 8", + "[17] Dong-Wook Kim, Jae Ryun Chung, and Seung-Won Jung. Grdn: Grouped residual dense network for real image denoising and gan-based real-world noise modeling. In CVPRW, 2019. 1, 2", + "[18] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. 6" + ], + "bbox": [ + 78, + 114, + 467, + 898 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[19] Durk P Kingma and Prafulla Dhariwal. Glow: Generative flow with invertible 1x1 convolutions. NIPS, 2018. 2", + "[20] Shayan Kousha, Ali Maleky, Michael S Brown, and Marcus A Brubaker. Modeling srgb camera noise with normalizing flows. In CVPR, 2022. 1, 2, 3, 7", + "[21] Ce Liu, William T Freeman, Richard Szeliski, and Sing Bing Kang. Noise estimation from a single image. In CVPR, 2006. 2, 3", + "[22] Seonghyeon Nam, Youngbae Hwang, Yasuyuki Matsushita, and Seon Joo Kim. A holistic approach to cross-channel image noise modeling and its application to image denoising. In CVPR, 2016. 1, 2, 3", + "[23] Tobias Plotz and Stefan Roth. Benchmarking denoising algorithms with real photographs. In CVPR, 2017. 7", + "[24] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. Image quality assessment: from error visibility to structural similarity. TIP, 2004. 6", + "[25] Kaixuan Wei, Ying Fu, Jiaolong Yang, and Hua Huang. A physics-based noise formation model for extreme low-light raw denoising. In CVPR, 2020. 1, 2", + "[26] Zongsheng Yue, Qian Zhao, Lei Zhang, and Deyu Meng. Dual adversarial network: Toward real-world noise removal and noise generation. In ECCV, 2020. 1, 2", + "[27] Kai Zhang, Wangmeng Zuo, Yunjin Chen, Deyu Meng, and Lei Zhang. Beyond a gaussian denoiser: Residual learning of deep cnn for image denoising. TIP, 2017. 6", + "[28] Yi Zhang, Hongwei Qin, Xiaogang Wang, and Hongsheng Li. Rethinking noise synthesis and modeling in raw denoising. In ICCV, 2021. 1, 2", + "[29] Yuqian Zhou, Jianbo Jiao, Haibin Huang, Yang Wang, Jue Wang, Honghui Shi, and Thomas Huang. When awgn-based denoiser meets real noises. In AAAI, 2020. 4, 5" + ], + "bbox": [ + 501, + 92, + 890, + 546 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "1691", + "bbox": [ + 483, + 944, + 513, + 955 + ], + "page_idx": 8 + } +] \ No newline at end of file diff --git a/2023/sRGB Real Noise Synthesizing With Neighboring Correlation-Aware Noise Model/4d3b606f-ee7b-460f-b2bf-d38a08fa5304_model.json b/2023/sRGB Real Noise Synthesizing With Neighboring Correlation-Aware Noise Model/4d3b606f-ee7b-460f-b2bf-d38a08fa5304_model.json new file mode 100644 index 0000000000000000000000000000000000000000..c59664400aa55f8cd83f9513db7944daa8ab111a --- /dev/null +++ b/2023/sRGB Real Noise Synthesizing With Neighboring Correlation-Aware Noise Model/4d3b606f-ee7b-460f-b2bf-d38a08fa5304_model.json @@ -0,0 +1,2341 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.808, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.131, + 0.895, + 0.153 + ], + "angle": 0, + "content": "sRGB Real Noise Synthesizing with Neighboring Correlation-Aware Noise Model" + }, + { + "type": "text", + "bbox": [ + 0.287, + 0.18, + 0.685, + 0.234 + ], + "angle": 0, + "content": "Zixuan Fu\\(^{1*}\\), Lanqing Guo\\(^{1*}\\), Bihan Wen\\(^{1\\dagger}\\) \n\\(^{1}\\)Nanyang Technological University, Singapore \n{zixuan.fu, lanqing001, bihan.wen}@ntu.edu.sg" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.269, + 0.314, + 0.286 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.301, + 0.474, + 0.619 + ], + "angle": 0, + "content": "Modeling and synthesizing real noise in the standard RGB (sRGB) domain is challenging due to the complicated noise distribution. While most of the deep noise generators proposed to synthesize sRGB real noise using an end-to-end trained model, the lack of explicit noise modeling degrades the quality of their synthesized noise. In this work, we propose to model the real noise as not only dependent on the underlying clean image pixel intensity, but also highly correlated to its neighboring noise realization within the local region. Correspondingly, we propose a novel noise synthesizing framework by explicitly learning its neighboring correlation on top of the signal dependency. With the proposed noise model, our framework greatly bridges the distribution gap between synthetic noise and real noise. We show that our generated \"real\" sRGB noisy images can be used for training supervised deep denoisers, thus to improve their real denoising results with a large margin, comparing to the popular classic denoisers or the deep denoisers that are trained on other sRGB noise generators. The code will be available at https://github.com/xuan611/sRGB-Real-NoiseSynthesizing." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.648, + 0.21, + 0.664 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.674, + 0.471, + 0.841 + ], + "angle": 0, + "content": "Real image denoising is one of the most challenging tasks in low-level vision. Deep denoisers that are trained using synthetic noise, e.g., Additive White Gaussian Noise (AWGN), perform poorly on real photography [3, 15], which motivates more realistic noise models, e.g., [1, 5, 14-16]. In general, there are two approaches towards real noise modeling, i.e., modeling in the raw-RGB and standard RGB (sRGB) domains. Popular modeling methods including the physical-based [25, 28] and data-driven methods [1, 6] exploit sophisticated noise models in the raw-RGB domain, which demonstrated promising perfor" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.271, + 0.895, + 0.422 + ], + "angle": 0, + "content": "mance as noise in raw-RGB is largely simplified comparing to noise in sRGB [20, 22]. However, raw-RGB images are not usually utilized by common users due to their large sizes. In contrast, most commercial cameras generate sRGB images by default, which are more popular in practice. Unfortunately, the noise generation methods in the raw-RGB domain cannot be directly applied to sRGB images, as the real noise distribution in sRGB is more complicated than raw-RGB noise, caused by the in-camera signal processing (ISP) pipeline [22]." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.427, + 0.895, + 0.609 + ], + "angle": 0, + "content": "Recent works [5, 15] proposed to generate noise on raw-RGB images and convert them into sRGB images by the ISP pipeline including demosaicing, white balancing, gamma correction, etc. While these methods synthesized realistic noise, the requirement of raw-RGB images as well as manually defined ISP pipelines limits their applications. An alternative solution for sRGB real noise modeling is to train the generative models with sRGB noisy-clean images and directly synthesize real noise on sRGB images [16,17,20,26]. However, these models synthesize noise without explicitly modeling the characteristics of sRGB real noise, resulting in degradation of the quality of the synthesized noise." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.614, + 0.895, + 0.9 + ], + "angle": 0, + "content": "In this paper, we propose a novel real noise generation network, based on Neighboring Correlation-Aware noise model, dubbed as NeCA, to directly synthesize real noise in the sRGB domain. The proposed real noise synthesis assumes that the sRGB real noise is not only signal-dependent, i.e., noise level partially depends on its underlying clean pixel, but also highly correlated with its neighboring noise realization. Such a real noise model greatly bridges the gap between the synthetic and real noise in sRGB. Furthermore, the synthesized \"real\" images by the proposed NeCA can be used for training supervised deep denoisers, thus tackling the real image denoising challenges, subjective to only a few real training data. The trained deep denoiser using our synthetic noisy images achieves state-of-the-art denoising performance, compared to the popular classic denoisers as well as deep denoisers that are trained on synthetic pairs from other noise models. To sum up, our main contributions can be concluded as follows:" + }, + { + "type": "page_footnote", + "bbox": [ + 0.096, + 0.851, + 0.294, + 0.864 + ], + "angle": 0, + "content": "*Co-first authors contributed equally." + }, + { + "type": "page_footnote", + "bbox": [ + 0.098, + 0.864, + 0.288, + 0.876 + ], + "angle": 0, + "content": "†Corresponding author: Bihan Wen." + }, + { + "type": "page_footnote", + "bbox": [ + 0.08, + 0.876, + 0.469, + 0.901 + ], + "angle": 0, + "content": "This work was supported in part by the MOE AcRF Tier 1 (RG61/22) and Start-Up Grant." + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.851, + 0.469, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.484, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "1683" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.097, + 0.092, + 0.468, + 0.166 + ], + "angle": 0, + "content": "- We introduce a neighboring correlation-aware noise model for sRGB real noise synthesis by explicitly modeling the neighboring correlation of real noise, to bridge the gap between the synthetic and real noise distribution in sRGB." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.172, + 0.468, + 0.232 + ], + "angle": 0, + "content": "- Our proposed framework shows a well-generalized ability, which is still capable to improve the real image denoising performance even with limited training data." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.237, + 0.468, + 0.296 + ], + "angle": 0, + "content": "- With the synthetic image pairs generated by NeCA, the trained denoisers achieve state-of-the-art denoising performance compared with the deep denoisers trained with other real noise models." + }, + { + "type": "list", + "bbox": [ + 0.097, + 0.092, + 0.468, + 0.296 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.312, + 0.218, + 0.327 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.337, + 0.368, + 0.352 + ], + "angle": 0, + "content": "2.1. Raw-RGB Image Noise Synthesis" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.36, + 0.468, + 0.48 + ], + "angle": 0, + "content": "Modeling real noise in raw-RGB is challenging as it cannot be simply assumed as Additive White Gaussian Noise (AWGN). Typically, raw-RGB noise models can be classified into two categories: physical-based models and learning-based models. One of the most commonly used physical-based models is the heteroscedastic Gaussian noise [10], which posits noise value, located at pixel \\(i\\), is dependent on its underlying clean pixel intensity:" + }, + { + "type": "equation", + "bbox": [ + 0.185, + 0.495, + 0.468, + 0.513 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {n} _ {i} \\sim \\mathcal {N} \\left(0, \\sigma_ {s} ^ {2} \\cdot \\boldsymbol {x} _ {i} + \\sigma_ {c} ^ {2}\\right), \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.52, + 0.468, + 0.653 + ], + "angle": 0, + "content": "where \\( n \\) and \\( x \\) are noise and clean image in the raw-RGB domain, while \\( \\sigma_{s} \\) and \\( \\sigma_{c} \\) denote the noise variance term for signal-dependent and signal-independent components. Such a noise model is also known as the noise level function (NLF) as it describes the relationship between the pixelwise noise level and image intensity. To better model the camera sensor noise, recent works [25, 28] have proposed that real noise is a sophisticated combination of shot noise, read noise and row noise, etc." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.656, + 0.468, + 0.837 + ], + "angle": 0, + "content": "Compared to statistical modeling of noise, learning-based models learn the real noise distribution with generative models such as the generative adversarial nets (GANs) [6] and normalization flows [1] from paired noisy-clean images. Although these methods perform well in raw-RGB, they cannot be directly applied to model sRGB real noise since their assumptions are based on the characteristics of raw-RGB noise. For instance, these noise generators synthesize raw-RGB noise from an initialized heteroscedastic Gaussian noise (as described in Equation (1)), which fails to provide an accurate representation of real noise in the sRGB domain [21, 22]." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.847, + 0.334, + 0.862 + ], + "angle": 0, + "content": "2.2. sRGB Image Noise Synthesis" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.871, + 0.468, + 0.901 + ], + "angle": 0, + "content": "The camera ISP pipeline, including demosaicing, tone mapping, white balancing, gamma mapping, etc., makes" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.197 + ], + "angle": 0, + "content": "real noise in the sRGB domain to be more complicated than it is in the raw-RGB domain. To synthesize sRGB real noise, two approaches have been proposed: (1) synthesizing noisy samples in the raw-RGB domain and rendering them into sRGB images by applying the manually defined ISP pipeline [5, 15], and (2) directly synthesizing real noise in the sRGB domain [8, 16, 17, 20, 26]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.199, + 0.892, + 0.348 + ], + "angle": 0, + "content": "In CBDNet [15], heteroscedastic Gaussian noise is added on raw-RGB clean images, and images are converted into sRGB using demosaicing and camera response functions. However, CBDNet requires raw-RGB images, which are not commonly used. To address this issue, unprocessing image (UPI) [5] proposes to de-render sRGB images into raw-RGB images using several predefined unprocessing pipelines. Similar procedures used in CBDNet are then applied to the unprocessed raw-RGB images to obtain their sRGB versions." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.351, + 0.892, + 0.742 + ], + "angle": 0, + "content": "Despite CBDNet and UPI effectively synthesize sRGB real noise, they still require predefined ISP pipelines, which may not match real ones used in different camera sensors. Therefore, generating real noise directly in the sRGB domain with deep generative models [11, 19] is considered an alternative solution. GCBD [8] proposes a GAN-based model that learns noise distributions by training on noise patches that have been cropped from noisy images. However, the synthesized noise is signal-independent as it is generated from random noise. DANet [26] and GRDN [17] use conditional generative networks to synthesize signal-dependent noise, however, few experiments are conducted to demonstrate the effectiveness of the proposed noise generators. C2N [16] attempts to synthesize the real noise with unpaired clean-noisy images, but the generated noise contains artifacts and color-shift problems due to the unpaired training mode. Recently, Kousha et al. [20] propose a conditional flow-based model for sRGB image noise generation that takes clean images, camera types, and ISO levels as input. However, the denoiser, trained with synthetic data, improves marginally compared to the unpaired noise generation method C2N. Unlike previous attempts that model noise with an end-to-end generator, our proposed method explicitly decomposes signal dependency and neighboring correlation of real noise and learns them with separate networks." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.761, + 0.59, + 0.775 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.786, + 0.88, + 0.802 + ], + "angle": 0, + "content": "3.1. Neighboring Correlation-Aware Noise Model" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.811, + 0.892, + 0.901 + ], + "angle": 0, + "content": "In this section, we present our proposed noise model for sRGB real noise. We begin by introducing the basic noise model, which defines the signal dependency of pixel-wise noise level and its underlying clean pixels. We then discuss discrepancies between noise synthesized by the basic noise model and sRGB real noise and propose to bridge this gap" + }, + { + "type": "page_number", + "bbox": [ + 0.484, + 0.946, + 0.516, + 0.956 + ], + "angle": 0, + "content": "1684" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.121 + ], + "angle": 0, + "content": "by explicitly modeling noise neighboring correlation on top of the signal dependency." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.122, + 0.471, + 0.318 + ], + "angle": 0, + "content": "Basic Noise Model. Both raw-RGB and sRGB real noise are dependent on the image signal. In raw-RGB, the noise level can be approximated as a simple function of its underlying clean pixel intensity, i.e., heteroscedastic Gaussian noise described in Equation (1). However sRGB real noise is more complex due to camera settings and signal transformations in the ISP pipeline [20-22]. To address this challenge, we propose a noise model that characterizes the signal dependency of sRGB real noise. Specifically, for an sRGB clean image \\( \\mathbf{x} = (x_{1},\\dots,x_{N}) \\) and its paired noisy version \\( \\mathbf{y} = (y_{1},\\dots,y_{N}) \\), we define noise level at pixel \\( i \\) as a function of the clean image patch \\( \\Omega_{\\mathbf{x}} \\), centered at clean pixel \\( x_{i} \\), and camera ISO level \\( \\gamma \\):" + }, + { + "type": "equation", + "bbox": [ + 0.218, + 0.33, + 0.469, + 0.347 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {\\sigma} _ {i} = f \\left(\\Omega_ {\\boldsymbol {x}}, \\gamma\\right), \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.358, + 0.47, + 0.448 + ], + "angle": 0, + "content": "where \\(f(\\cdot)\\) represents the non-linear relationship of \\(\\Omega_{\\pmb{x}},\\gamma\\) and the pixel-wise noise level \\(\\sigma_{i} = (\\sigma_{i,r},\\sigma_{i,g},\\sigma_{i,b})\\) for three color channels. For the sake of clarity, we omit the location index \\(i\\) in the expression for the local region \\(\\Omega_{\\pmb{x}}\\). Then the distribution of noise \\(\\pmb{v}\\) at each pixel is modeled as a Gaussian distribution:" + }, + { + "type": "equation", + "bbox": [ + 0.209, + 0.46, + 0.469, + 0.478 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {v} _ {i, c} \\sim \\mathcal {N} (0, \\sigma_ {i, c} ^ {2}), \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.488, + 0.47, + 0.551 + ], + "angle": 0, + "content": "where \\(c\\) is the index of RGB channels. We further define the noise level map \\(\\pmb{m}\\), which has the same size as the clean image and the value at pixel \\(i\\) refers to the noise level \\(\\sigma_{i}\\). Finally, we can simulate signal-dependent noise as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.166, + 0.563, + 0.469, + 0.58 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {v} = \\boldsymbol {\\epsilon} \\odot \\boldsymbol {m}, \\quad \\epsilon_ {i, c} \\sim \\mathcal {N} (0, 1 ^ {2}). \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.587, + 0.471, + 0.875 + ], + "angle": 0, + "content": "Neighboring Correlation Noise Model. The noise synthesized by the basic noise model still exhibits discrepancies with real noise, as shown in Figure 1(b) and (d). We attribute this gap to the improper noise realization defined in Equation (4), where noise is sampled spatially independently from the basic noise model. Specifically, the most commonly used noise models, including the AWGN, heteroscedastic Gaussian noise, and our basic noise model, assume that the noise distribution is independent at each pixel, and the noise is sampled from the noise distribution without considering its neighboring synthesized noise. However, this noise realization method is inadequate to synthesize RGB real noise as the noise value is assumed to be highly correlated with its neighboring noise values due to the influence of the ISP pipeline such as demosaicing, which introduces neighboring operations. We refer to this characteristic of noise as neighboring correlation and define a neighboring correlation operator \\( g(\\cdot) \\) that maps such the correlation onto the synthesized signal-dependent noise \\( v \\):" + }, + { + "type": "equation", + "bbox": [ + 0.228, + 0.886, + 0.469, + 0.902 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {n} _ {i} = g \\left(\\Omega_ {\\boldsymbol {v}}\\right), \\tag {5}\n\\]" + }, + { + "type": "image", + "bbox": [ + 0.514, + 0.091, + 0.687, + 0.171 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.569, + 0.176, + 0.632, + 0.19 + ], + "angle": 0, + "content": "(a) Clean" + }, + { + "type": "image", + "bbox": [ + 0.692, + 0.091, + 0.865, + 0.171 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.723, + 0.176, + 0.834, + 0.19 + ], + "angle": 0, + "content": "(b) SDNU Noise" + }, + { + "type": "image", + "bbox": [ + 0.513, + 0.192, + 0.687, + 0.273 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.545, + 0.277, + 0.656, + 0.291 + ], + "angle": 0, + "content": "(c) SDNC Noise" + }, + { + "type": "image", + "bbox": [ + 0.692, + 0.192, + 0.865, + 0.273 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.751, + 0.277, + 0.806, + 0.291 + ], + "angle": 0, + "content": "(d) Real" + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.294, + 0.892, + 0.378 + ], + "angle": 0, + "content": "Figure 1. The visualization of modeling signal dependency and neighboring correlation of sRGB real noise. (a) Clean image. (b) Synthetic signal-dependent and neighboring uncorrelated (SDNU) noise. (c) Synthetic signal-dependent and neighboring correlated (SDNC) noise. (d) Real noise. We add a constant value to the noise maps for better visualizing the signal dependency." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.392, + 0.893, + 0.605 + ], + "angle": 0, + "content": "where \\( n \\) is the neighboring correlated noise and \\( \\Omega_{\\mathbf{v}} \\) is the local patch of \\( \\mathbf{v} \\), centered at pixel \\( i \\). By processing the neighboring uncorrelated noise \\( \\mathbf{v} \\) with the neighboring correlation operator, which is learned by our proposed noise synthesizing framework in Section 3.2, the final generated noise performs similar characteristics to real noise, as demonstrated in Figure 1(c) and (d). For the purpose of clarity, we use SDNU noise to refer to the intermediate synthesized signal-dependent and neighboring uncorrelated noise \\( \\mathbf{v} \\), and SDNC noise to refer to the final generated signal-dependent and neighboring correlated noise \\( \\mathbf{n} \\). In the following sections, we will introduce the proposed noise synthesizing framework to explicitly learn the neighboring correlation and signal dependency of noise." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.618, + 0.776, + 0.634 + ], + "angle": 0, + "content": "3.2. Noise Synthesizing Framework" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.643, + 0.892, + 0.839 + ], + "angle": 0, + "content": "Given paired sRGB real-world noisy and clean images \\((\\pmb{y},\\pmb{x})\\), where \\(\\pmb{y} = \\pmb{x} + \\pmb{n}\\), our proposed framework aims to learn the neighboring correlation-aware noise model using paired data. Our proposed framework, as illustrated in Figure 2, comprises three networks: a gain estimation network (GENet), a noise-level prediction network (NPNet), and a neighboring correlation network (NCNet). GENet estimates the gain factor from a noisy image, which serves to amplify the synthesized noise, similar to the ISO level. NPNet synthesizes the SDNU noise by incorporating the estimated gain factor and the clean image as inputs. Finally, NCNet explicitly models the neighboring correlation of sRGB real noise and generates the SDNC noise." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.84, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Gain Estimation Network. The gain estimation network (GENet) is designed to estimate the gain factor from a noisy image \\(\\pmb{y}\\), which serves as guidance to control the overall magnitude of the synthesized noise. The gain factor is de" + }, + { + "type": "page_number", + "bbox": [ + 0.484, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "1685" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.093, + 0.257, + 0.107 + ], + "angle": 0, + "content": "Noise-level prediction network" + }, + { + "type": "image", + "bbox": [ + 0.082, + 0.11, + 0.891, + 0.342 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.358, + 0.893, + 0.415 + ], + "angle": 0, + "content": "Figure 2. The proposed noise synthesizing framework. Our NeCA contains three networks including the gain estimation network (GENet), noise-level prediction network (NPNet), and neighboring correlation network (NCNet). PD denotes the Pixel-shuffle Down-sampling scheme introduced in [29]. Local noise level estimation and global noise level estimation operations are formulated in Equation (13) and (6). The details of the network architecture and PD scheme are described in the supplementary material." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.43, + 0.47, + 0.473 + ], + "angle": 0, + "content": "fined as the global noise level of the noisy image, which is the standard deviation calculated by every noise value in its noise \\( \\pmb{n} \\):" + }, + { + "type": "equation", + "bbox": [ + 0.181, + 0.473, + 0.469, + 0.513 + ], + "angle": 0, + "content": "\\[\n\\beta = \\sqrt {\\frac {1}{N} \\sum_ {i , c} \\left(\\boldsymbol {n} _ {i , c} - \\bar {n}\\right) ^ {2}}, \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.518, + 0.469, + 0.606 + ], + "angle": 0, + "content": "where \\(\\beta\\) is the defined global noise level of the noisy image \\(\\pmb{y}\\), \\(\\bar{n}\\) is the mean of the noise \\(\\pmb{n}\\), and \\(N\\) is the total number of pixels in the noisy image. However, during testing, the calculated gain factor is unavailable. To solve this, we aim to estimate the gain factor from the noisy image using GENet:" + }, + { + "type": "equation", + "bbox": [ + 0.235, + 0.607, + 0.469, + 0.625 + ], + "angle": 0, + "content": "\\[\n\\hat {\\beta} = E (\\boldsymbol {y}), \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.633, + 0.469, + 0.753 + ], + "angle": 0, + "content": "where \\( E \\) represents the GENet, and \\( \\hat{\\beta} \\) is the estimated gain factor by GENet, which is expected to be as close as the global noise level of the noisy image. The main reason to use the gain factor estimated from the noisy image rather than the ISO level is driven by a crucial factor. ISO levels are typically saved in the metadata of images. The requirement of metadata will limit the application of our noise synthesizing framework." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.755, + 0.469, + 0.83 + ], + "angle": 0, + "content": "Noise-level Prediction Network. The noise-level prediction network (NPNet) learns a parametric model for the noise distribution defined in Equation (3). To achieve this, NPNet predicts the pixel-wise noise level \\(\\hat{\\sigma}_i\\) using the clean local patch \\(\\Omega_{\\pmb{x}}\\) and estimated gain factor \\(\\hat{\\beta}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.213, + 0.841, + 0.469, + 0.859 + ], + "angle": 0, + "content": "\\[\n\\hat {\\boldsymbol {\\sigma}} _ {i} = G _ {1} \\left(\\Omega_ {\\boldsymbol {x}}, \\hat {\\beta}\\right), \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.871, + 0.469, + 0.902 + ], + "angle": 0, + "content": "where \\( G_{1} \\) denotes the NPNet, which has three output channels to predict noise levels for each pixel. To effectively" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.43, + 0.892, + 0.519 + ], + "angle": 0, + "content": "incorporate the gain factor into the NPNet, we first apply the layer normalization [4] to the feature map of convolution and then multiply the normalized feature map by the gain factor. In practice, NPNet directly outputs the predicted noise level map \\(\\hat{m}\\) by utilizing a clean image and gain factor:" + }, + { + "type": "equation", + "bbox": [ + 0.641, + 0.519, + 0.891, + 0.536 + ], + "angle": 0, + "content": "\\[\n\\hat {\\boldsymbol {m}} = G _ {1} (\\boldsymbol {x}, \\hat {\\beta}). \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.541, + 0.892, + 0.586 + ], + "angle": 0, + "content": "Once the noise level map \\(\\hat{m}\\) is obtained, the SDNU noise \\(\\hat{v}\\) can be synthesized by using the sampling trick defined in Equation (4)." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.587, + 0.892, + 0.661 + ], + "angle": 0, + "content": "Neighboring Correlation Network. The neighboring correlation network (NCNet) performs as the neighboring correlation operator, described in Equation (5). By taking the noise value and its neighboring noise realization as input, NCNet generates the SDNC noise \\(\\hat{n}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.645, + 0.67, + 0.891, + 0.687 + ], + "angle": 0, + "content": "\\[\n\\hat {\\boldsymbol {n}} _ {i} = G _ {2} \\left(\\Omega_ {\\hat {\\boldsymbol {v}}}\\right), \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.695, + 0.891, + 0.74 + ], + "angle": 0, + "content": "where \\(\\Omega_{\\hat{v}}\\) is the noise patch of \\(\\hat{v}\\) located at pixel \\(i\\) and \\(G_{2}\\) denotes the NCNet. The SDNC noise can be directly generated by taking the SDNU noise into the network:" + }, + { + "type": "equation", + "bbox": [ + 0.653, + 0.749, + 0.891, + 0.764 + ], + "angle": 0, + "content": "\\[\n\\hat {\\boldsymbol {n}} = G _ {2} (\\hat {\\boldsymbol {v}}). \\tag {11}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.772, + 0.653, + 0.787 + ], + "angle": 0, + "content": "3.3. Loss Functions" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.795, + 0.892, + 0.869 + ], + "angle": 0, + "content": "To jointly train the proposed networks, five loss functions are introduced: (1) standard deviation losses \\(\\mathcal{L}_{std1}\\) and \\(\\mathcal{L}_{std2}\\), (2) adversarial losses \\(\\mathcal{L}_{adv1}\\) and \\(\\mathcal{L}_{adv2}\\), (3) the regularization loss \\(\\mathcal{L}_{reg}\\). The details of these loss functions will be introduced later." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.871, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Standard Deviation Loss. We introduce \\(\\mathcal{L}_{std1}\\) to enforce the estimated gain factor \\(\\hat{\\beta}\\) by GENet to be close to the" + }, + { + "type": "page_number", + "bbox": [ + 0.484, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "1686" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.12 + ], + "angle": 0, + "content": "global noise level \\(\\beta\\) of the noisy image, which is defined as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.191, + 0.12, + 0.469, + 0.14 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {s t d 1} = \\mathbb {E} _ {\\boldsymbol {y}} \\left[ (\\hat {\\beta} - \\beta) ^ {2} \\right], \\tag {12}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.144, + 0.423, + 0.16 + ], + "angle": 0, + "content": "where \\(\\beta\\) and \\(\\hat{\\beta}\\) are obtained by Equation (6) and (7)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.16, + 0.469, + 0.25 + ], + "angle": 0, + "content": "The objective of NPNet is to predict the noise level map \\(\\hat{m}\\) by taking the clean image and gain factor as input. However, since the groundtruth noise level map is not available, we propose to use a simple local noise level estimation method to approximate the noise level map \\(m\\) from the noise, which is calculated as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.151, + 0.258, + 0.469, + 0.284 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {m} _ {i, c} = \\sqrt {\\mathcal {M F} \\left(\\Omega_ {n} ^ {2}\\right) - \\mathcal {M F} ^ {2} \\left(\\Omega_ {n}\\right)}, \\tag {13}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.289, + 0.469, + 0.334 + ], + "angle": 0, + "content": "where \\(\\Omega_{n}\\) denotes the \\(7\\times 7\\) noise patch located at pixel \\(i\\), channel \\(c\\) of noise map \\(\\pmb{n}\\), and \\(\\mathcal{MF}(\\cdot)\\) represents the mean filter. Then the \\(\\mathcal{L}_{std2}\\) is defined as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.175, + 0.34, + 0.469, + 0.361 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {s t d 2} = \\mathbb {E} _ {\\boldsymbol {x}, \\boldsymbol {y}} \\left[ \\left| | \\hat {\\boldsymbol {m}} - \\boldsymbol {m} \\right| \\right| _ {2} ^ {2} ]. \\tag {14}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.367, + 0.47, + 0.744 + ], + "angle": 0, + "content": "Adversarial Loss. In order to guarantee that the generated noise shares the similar distribution with real noise, we introduce two adversarial losses. Our first adversarial loss \\(\\mathcal{L}_{adv1}\\) is imposed between the final synthetic SDNC noise and real noise \\((\\hat{n}, n)\\) to enforce the highly neighboring correlation in the generated noise, similar to that of the real noise. Our second adversarial loss \\(\\mathcal{L}_{adv2}\\) is calculated by using Pixel-shuffle Down-sampling [29] versions of synthesized intermediate noise \\(\\hat{v}\\) and real noise \\(n\\). Specifically, \\(\\mathcal{L}_{adv2}\\) servers as a complementary loss for \\(\\mathcal{L}_{std2}\\) because estimating the approximate noise level map using Equation (13) may not be reliable, as this method struggles to differentiate between noise originating from different intensities. However, directly calculating the adversarial loss between noise \\(\\hat{v}\\) and \\(n\\) is unreasonable since \\(\\hat{v}\\) is neighboring uncorrelated. To address this problem, we utilize the Pixel-shuffle Down-sampling (PD) scheme proposed in [29] to obtain down-sampled versions \\(((\\hat{v})_{\\downarrow s}, (n)_{\\downarrow s})\\) of both synthetic noise \\(\\hat{v}\\) and real noise \\(n\\). Here \\(\\downarrow_s\\) denotes the PD operation with a stride of \\(s\\) (in this paper, \\(s\\) is set to 3). According to [29], the neighboring correlation in the PD real noise \\((n)_{\\downarrow s}\\) will be greatly attenuated. This allows us to calculate the adversarial loss between the two down-sampled versions. We utilize WGAN-GP [13] to compute adversarial losses, while \\(\\mathcal{L}_{adv1}\\) is defined as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.19, + 0.753, + 0.469, + 0.77 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {a d v 1} = - \\mathbb {E} _ {\\hat {\\boldsymbol {n}}} \\left[ D _ {1} (\\hat {\\boldsymbol {n}}) \\right], \\tag {15}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.777, + 0.469, + 0.822 + ], + "angle": 0, + "content": "where \\(D_{1}\\) is the discriminator for NCNet, which scores the realness of synthesized noise. Similarly, \\(\\mathcal{L}_{adv2}\\) is computed as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.164, + 0.831, + 0.469, + 0.849 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {a d v 2} = - \\mathbb {E} _ {(\\hat {\\boldsymbol {v}}) \\downarrow_ {s}} \\left[ D _ {2} \\left((\\hat {\\boldsymbol {v}}) \\downarrow_ {s}\\right) \\right], \\tag {16}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.856, + 0.469, + 0.901 + ], + "angle": 0, + "content": "where \\(D_{2}\\) is the discriminator for NPNet. More detail about the PD scheme and the discriminator losses will be discussed in the supplementary material." + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.09, + 0.891, + 0.248 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.267, + 0.892, + 0.323 + ], + "angle": 0, + "content": "Figure 3. The designed two inference versions. NeCA-W utilizes the whole framework to synthesize SDNC noise. NeCA-S only adopts NCNet to synthesize signal-independent neighboring correlated (SINC) noise by taking the AWGN as input." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.337, + 0.892, + 0.4 + ], + "angle": 0, + "content": "Regularization Loss. Besides the losses mentioned above, a regularization loss \\(\\mathcal{L}_{reg}\\) is utilized to stabilize training. It is imposed between the estimated gain factor \\(\\hat{\\beta}\\) and the predicted noise level map \\(\\hat{m}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.621, + 0.408, + 0.891, + 0.428 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {r e g} = \\mathbb {E} _ {\\boldsymbol {x}, \\boldsymbol {y}} \\left[ \\| \\boldsymbol {w} \\| _ {2} ^ {2} \\right], \\tag {17}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.437, + 0.667, + 0.454 + ], + "angle": 0, + "content": "where \\(\\pmb{w}_{i,c} = \\hat{\\pmb{m}}_{i,c} - \\hat{\\beta}\\)" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.454, + 0.891, + 0.483 + ], + "angle": 0, + "content": "Finally, the full loss functions of the framework are described as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.508, + 0.495, + 0.891, + 0.512 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = \\mathcal {L} _ {\\text {r e g}} + \\lambda_ {1} \\mathcal {L} _ {\\text {a d v 1}} + \\lambda_ {2} \\mathcal {L} _ {\\text {a d v 2}} + \\lambda_ {3} \\mathcal {L} _ {\\text {s t d 1}} + \\lambda_ {4} \\mathcal {L} _ {\\text {s t d 2}}, \\tag {18}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.521, + 0.891, + 0.551 + ], + "angle": 0, + "content": "where \\(\\lambda_1, \\lambda_2, \\lambda_3\\) and \\(\\lambda_4\\) are hyperparameters to balance the importance between different losses." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.561, + 0.656, + 0.576 + ], + "angle": 0, + "content": "3.4. Inference Stage" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.584, + 0.892, + 0.81 + ], + "angle": 0, + "content": "We provide two inference versions to generate noise, as illustrated in Figure 3: (1) NeCA-W applies the entire framework to synthesize \"real\" noise. It first estimates the gain factor from an arbitrary noisy image and synthesizes noise by conditioning on a clean image and the estimated gain factor. (2) NeCA-S is the simplified version of NeCA-W which uses only NCNet for inference. In this method, AWGN is synthesized and then NCNet maps it with the neighboring correlation. We refer to this synthetic noise as signal-independent neighboring correlated (SINC) noise. Notably, NeCA-S still enhances the performance of deep denoiser on real noise, even though the denoiser is trained using the synthesized SINC noise. In the meantime, this inference version only requires minimal data to train the NCNet, which we will demonstrate in our experiments." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.823, + 0.633, + 0.839 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.848, + 0.691, + 0.864 + ], + "angle": 0, + "content": "4.1. Experimental Setup" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.871, + 0.891, + 0.901 + ], + "angle": 0, + "content": "To assess the effectiveness of our proposed noise synthesizing framework, we conduct experiments in two parts:" + }, + { + "type": "page_number", + "bbox": [ + 0.484, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "1687" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.468, + 0.151 + ], + "angle": 0, + "content": "First, we assess the quality of the generated noise. Second, we examine the performance of NeCA on the downstream image denoising task. The details of the experiments will be discussed in the following subsections." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.153, + 0.468, + 0.333 + ], + "angle": 0, + "content": "Dataset. We evaluate our NeCA on the medium version of Smartphone Image Denoising Dataset (SIDD) [2], which comprises 320 noisy-clean image pairs captured by five different smartphone cameras, including Samsung Galaxy S6 Edge (S6), iPhone 7 (IP), Google Pixel (GP), Motorola Nexus 6 (N6), and LG G4 (G4). These images are collected in ten different scenes with varying ISO levels and lighting conditions. The SIDD provides both raw-RGB and sRGB images, with the sRGB version obtained by rendering the captured raw-RGB images through the manually defined ISP pipeline provided in [2]. In our experiments, we use the sRGB version to evaluate the proposed method." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.335, + 0.468, + 0.486 + ], + "angle": 0, + "content": "Metrics. We evaluate the performance of NeCA using three metrics: Discrete Kullback-Leibler (KL) divergence, Signal-to-Noise Ratio (PSNR) and Structural Similarity (SSIM) [24]. The KL divergence is used to measure the similarity of histograms between real noise and generated noise. The histogram range is set from \\(-0.1\\) to 0.1 with 64 intervals. The PSNR and SSIM are used to evaluate the performance of deep denoisers. A higher PSNR and SSIM reflect better denoising performance, while a smaller KL divergence represents better noise synthesizing quality." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.487, + 0.468, + 0.622 + ], + "angle": 0, + "content": "Implementation Details. All the networks are optimized using Adam optimizer [18] with a batch size of 32. Images are cropped to a size of \\(96 \\times 96\\) pixels for training. For noise generation, we train individual networks for 300 epochs with the learning rate of \\(10^{-4}\\). For denoising, we select the DnCNN [27] as the default deep denoiser for comparison and train it for 300 epochs with the learning rate of \\(10^{-3}\\). The \\(\\lambda_{1}, \\lambda_{2}, \\lambda_{3}\\) and \\(\\lambda_{4}\\) in the loss functions are set to 0.1, 0.1, 50, 10 respectively." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.634, + 0.303, + 0.649 + ], + "angle": 0, + "content": "4.2. Noise Synthesis on SIDD" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.659, + 0.468, + 0.869 + ], + "angle": 0, + "content": "Compared Baselines. We compare NeCA with several noise models, including Additive White Gaussian Noise (AWGN), C2N [16], and the NLF (described in Equation (1)). To synthesize AWGN, we estimate the noise level from each noisy image by applying a noise estimation method introduced in [7] and add it to its corresponding clean image. To synthesize noise using the C2N, we directly utilize the pretrained model provided by the authors. For the NLF, we synthesize heteroscedastic Gaussian noise on the raw-RGB clean images from SIDD, where the signal-dependent term \\(\\sigma_s^2\\) and signal-independent term \\(\\sigma_c^2\\) are obtained from the metadata provided by SIDD. We then apply the same ISP pipeline as used in the SIDD to render them to sRGB. We refer to this model as NLF-ISP for simplicity." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.871, + 0.468, + 0.9 + ], + "angle": 0, + "content": "Preparation. We evaluate the results of generated noise on each camera in SIDD, where \\(80\\%\\) of image pairs are allo" + }, + { + "type": "table", + "bbox": [ + 0.503, + 0.089, + 0.892, + 0.261 + ], + "angle": 0, + "content": "
CameraMetricsAWGNC2N [16]NeCANLF-ISPReal
G4KL1.97550.16600.02420.0102-
PSNR28.1537.8138.8538.5140.60
GPKL1.83510.13150.04320.0126-
PSNR28.4537.0837.7237.7438.33
IPKL1.85620.05810.04100.0475-
PSNR28.0139.1239.4639.5339.45
N6KL2.14650.35240.02060.0063-
PSNR26.3133.5935.5434.8435.56
S6KL0.45170.45170.03020.0902-
PSNR27.2233.1835.5635.9936.85
AverageKL2.00620.21290.03420.0414-
PSNR27.9036.3737.5837.5938.27
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.272, + 0.892, + 0.313 + ], + "angle": 0, + "content": "Table 1. Quantitative results of synthetic noise. The results are computed on the validation sets of five SIDD cameras with KL divergence and PSNR (dB). The best results are highlighted in bold." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.348, + 0.892, + 0.634 + ], + "angle": 0, + "content": "cated for training the noise synthesizing framework, while the rest \\(20\\%\\) are reserved for validation. The quality of the synthesized noise was evaluated using two metrics: KL divergence and PSNR. We calculate the KL divergence between the histograms of ground truth noise in the validation set and the noise synthesized by NeCA with clean images and corresponding gain factors from the validation set. Notably, the gain factors used for evaluation are estimated by GENet from the noisy images paired with the clean images, as they cannot be set to random values for evaluation. Besides, we also use the PSNR to further evaluate the quality of synthesized noisy images. We train the DnCNN with the synthesized noisy-clean image pairs on the training set and apply it to denoise the noisy images from the validation set. We calculate the PSNR between the denoised images and corresponding clean images to evaluate the denoising performance. In order to maintain consistency between the training and validation sets, we ensure that both sets contain the same set of ISO levels." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.644, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Noise Synthesis Results. Table 1 shows the KL divergence and PSNR results computed on validation sets of five devices. For the results of average KL divergence over all five cameras, our method exhibits the best performance among all noise models. Additionally, our method lags slightly behind NLF-ISP by \\(0.01\\mathrm{dB}\\) on the average PSNR. It is worth noting that noise samples generated by NLF-ISP are first synthesized in the raw-RGB domain and then rendered to sRGB using the same ISP pipelines as in SIDD, suggesting the minimal discrepancies between noise samples from NLF-ISP and real data. The similar results on each camera between NLF-ISP and our NeCA model demonstrate the promising performance of the proposed model. Figure 4 shows generated noise maps from compared methods. Remarkable visual similarities observed between generated noise maps and real noise maps indicate that our framework is capable to synthesize realistic noise." + }, + { + "type": "page_number", + "bbox": [ + 0.484, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "1688" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.081, + 0.088, + 0.47, + 0.598 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.615, + 0.471, + 0.673 + ], + "angle": 0, + "content": "Figure 4. Visualization of synthetic noise samples under different ISO-lighting conditions on SIDD [2]. The displayed images, from left to right, correspond to clean image, C2N, Our method, NLF-ISP and real noisy image." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.691, + 0.406, + 0.709 + ], + "angle": 0, + "content": "4.3. Applications on Real Image Denoising" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.717, + 0.469, + 0.822 + ], + "angle": 0, + "content": "Compared Baselines. Various noise generation methods are evaluated to demonstrate the effectiveness of these baselines performed on the downstream real image denoising task, including GCBD [8], C2N [16], Flow-sRGB [20], NeCA-S and NeCA-W. When assessing denoising performance, classical denoisers such as BM3D [9] and WNNM [12] are also included in the experiments." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.825, + 0.47, + 0.9 + ], + "angle": 0, + "content": "Preparation. We establish the synthetic SIDD where clean images are from the original SIDD and noisy images are synthesized by using NeCA-W and NeCA-S. Specifically, the proposed framework is trained on the entire SIDD for each camera and the whole framework (NeCA-W) is used to" + }, + { + "type": "table", + "bbox": [ + 0.502, + 0.088, + 0.895, + 0.228 + ], + "angle": 0, + "content": "
MethodSIDDDND
PSNR(dB)SSIMPSNR(dB)SSIM
BM3D [9]25.650.68534.510.851
WNNM [12]25.780.80934.670.865
GCBD [8]--35.580.922
C2N* [16]33.760.90136.080.903
Flow-sRGB* [20]34.740.912--
NeCA-S*36.100.92736.960.938
NeCA-W*36.820.93237.530.940
Real*37.120.93437.890.942
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.233, + 0.892, + 0.289 + ], + "angle": 0, + "content": "Table 2. Quantitative evaluation of denoising performance on SIDD and DND benchmark. * denotes the DnCNN denoiser is trained on either the synthetic or real image pairs with the SIDD. (red: the best result, blue: the second best)" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.309, + 0.893, + 0.581 + ], + "angle": 0, + "content": "generate noise for each clean image from the SIDD, where the gain factor is estimated from its paired noisy image. On the other hand, We train NeCA with a few paired images, e.g., three image pairs with varying ISO levels (800, 1600, 3200) from camera N6 and use only NCNet (NeCA-S) to generate signal-independent neighboring correlated (SINC) noise for clean images from SIDD, as seen in Figure 3. The synthesized SINC noise is added to the clean image. For each clean image, the noise level of AWGN is randomly selected from a range of [0, 75]. Our experiments with NeCA-S aim to demonstrate the advantages of explicitly modeling the neighboring correlation of real noise. Other sRGB real noise generation baselines, including C2N [16] and Flow-sRGB [20], also follow the same experimental settings with NeCA-W. With the synthetic noisy-clean image pairs, we train the DnCNN on either synthetic or real pairs of SIDD. Then the denoising performances are evaluated on both the SIDD and DND [23] benchmarks." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.582, + 0.893, + 0.884 + ], + "angle": 0, + "content": "Results and Discussions. Table 2 shows the denoising results of the compared denoisers. Obviously, DnCNN trained on the synthetic samples from NeCA-W, achieves the best results among all compared methods in terms of both PSNR and SSIM. Specifically, NeCA-W gets 2.08 dB gains from Flow-sRGB on the SIDD benchmark, where Flow-sRGB is an end-to-end flow model which implicitly synthesizes real noise. The improvement of denoising performance obtained by NeCA-W indicates the accuracy of our noise model. Moreover, even though the denoising performance of NeCA-W still does not surpass the denoiser trained on the real data, the slight PSNR and SSIM discrepancies between them suggest our model does shrink this gap. Furthermore, the most impressive thing is that NeCA-S still achieves comparable denoising results on both the SIDD and DND benchmarks, outperforming the Flow-sRGB by a large margin. Note that the synthetic noise from NeCA-S is signal-independent. The superior performance of NeCA-S further verifies explicitly modeling neighboring correlation benefits the sRGB real noise synthesis." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.886, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Figure 6 and 7 show the denoised images from the SIDD" + }, + { + "type": "page_number", + "bbox": [ + 0.484, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "1689" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.132, + 0.089, + 0.249, + 0.18 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.165, + 0.183, + 0.216, + 0.194 + ], + "angle": 0, + "content": "(a) Clean" + }, + { + "type": "image", + "bbox": [ + 0.25, + 0.089, + 0.366, + 0.179 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.272, + 0.181, + 0.345, + 0.194 + ], + "angle": 0, + "content": "(b) \\(\\hat{\\beta} = 0.02\\)" + }, + { + "type": "image", + "bbox": [ + 0.368, + 0.089, + 0.482, + 0.179 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.389, + 0.181, + 0.462, + 0.194 + ], + "angle": 0, + "content": "(c) \\(\\hat{\\beta} = 0.06\\)" + }, + { + "type": "image", + "bbox": [ + 0.482, + 0.089, + 0.601, + 0.179 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.507, + 0.181, + 0.579, + 0.194 + ], + "angle": 0, + "content": "(d) \\(\\hat{\\beta} = 0.10\\)" + }, + { + "type": "image", + "bbox": [ + 0.603, + 0.089, + 0.718, + 0.179 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.625, + 0.181, + 0.696, + 0.195 + ], + "angle": 0, + "content": "(e) \\(\\hat{\\beta} = 0.14\\)" + }, + { + "type": "image", + "bbox": [ + 0.72, + 0.089, + 0.835, + 0.179 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.743, + 0.181, + 0.812, + 0.194 + ], + "angle": 0, + "content": "(f) \\(\\hat{\\beta} = 0.18\\)" + }, + { + "type": "image_caption", + "bbox": [ + 0.152, + 0.2, + 0.816, + 0.214 + ], + "angle": 0, + "content": "Figure 5. Results of controllable noise synthesis. The gain factor ranges from 0.02 to 0.18 with intervals of 0.04." + }, + { + "type": "image", + "bbox": [ + 0.095, + 0.227, + 0.213, + 0.319 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.123, + 0.324, + 0.186, + 0.337 + ], + "angle": 0, + "content": "(a) Noisy" + }, + { + "type": "image", + "bbox": [ + 0.217, + 0.228, + 0.334, + 0.32 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.231, + 0.323, + 0.317, + 0.337 + ], + "angle": 0, + "content": "(b) C2N [16]" + }, + { + "type": "image", + "bbox": [ + 0.335, + 0.228, + 0.453, + 0.32 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.354, + 0.323, + 0.434, + 0.337 + ], + "angle": 0, + "content": "(c) NeCA-S" + }, + { + "type": "image", + "bbox": [ + 0.094, + 0.338, + 0.211, + 0.428 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.108, + 0.432, + 0.196, + 0.445 + ], + "angle": 0, + "content": "(d) NeCA-W" + }, + { + "type": "image", + "bbox": [ + 0.214, + 0.338, + 0.331, + 0.428 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.246, + 0.432, + 0.299, + 0.445 + ], + "angle": 0, + "content": "(e) Real" + }, + { + "type": "image", + "bbox": [ + 0.332, + 0.338, + 0.451, + 0.428 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.362, + 0.432, + 0.422, + 0.445 + ], + "angle": 0, + "content": "(f) Clean" + }, + { + "type": "image_caption", + "bbox": [ + 0.077, + 0.449, + 0.47, + 0.491 + ], + "angle": 0, + "content": "Figure 6. Denoising results on the SIDD dataset. DnCNN denoisers are trained on the noisy images from (b) C2N, (c, d) our models, and (e) real noisy images of the SIDD." + }, + { + "type": "image", + "bbox": [ + 0.095, + 0.511, + 0.213, + 0.605 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.123, + 0.608, + 0.185, + 0.622 + ], + "angle": 0, + "content": "(a) Noisy" + }, + { + "type": "image", + "bbox": [ + 0.216, + 0.512, + 0.334, + 0.605 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.228, + 0.607, + 0.321, + 0.621 + ], + "angle": 0, + "content": "(b) BM3D [9]" + }, + { + "type": "image", + "bbox": [ + 0.334, + 0.512, + 0.453, + 0.605 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.351, + 0.608, + 0.436, + 0.622 + ], + "angle": 0, + "content": "(c) C2N [16]" + }, + { + "type": "image", + "bbox": [ + 0.094, + 0.622, + 0.212, + 0.713 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.112, + 0.716, + 0.192, + 0.73 + ], + "angle": 0, + "content": "(d) NeCA-S" + }, + { + "type": "image", + "bbox": [ + 0.214, + 0.622, + 0.332, + 0.713 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.229, + 0.716, + 0.315, + 0.73 + ], + "angle": 0, + "content": "(e) NeCA-W" + }, + { + "type": "image", + "bbox": [ + 0.333, + 0.622, + 0.452, + 0.713 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.366, + 0.716, + 0.418, + 0.73 + ], + "angle": 0, + "content": "(f) Real" + }, + { + "type": "image_caption", + "bbox": [ + 0.077, + 0.734, + 0.47, + 0.776 + ], + "angle": 0, + "content": "Figure 7. Denoising results on the DND dataset. DnCNN denoisers are trained on the noisy images from (c) C2N, (d, e) our models, and (f) real noisy images of the SIDD." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.795, + 0.47, + 0.901 + ], + "angle": 0, + "content": "and DND datasets. The results indicate that the denoisers trained on the synthetic image pairs from NeCA-W and NeCA-S achieve similar denoising results compared to the denoiser trained on real image pairs. In contrast, the denoiser trained on noisy samples from C2N, which employs an unpaired training scheme, fails to suppress the noise effectively, partly due to its unpaired train scheme." + }, + { + "type": "table", + "bbox": [ + 0.54, + 0.227, + 0.85, + 0.268 + ], + "angle": 0, + "content": "
Lossw/o Lstd2w/o Ladv1w/o Lregall
KL0.0520.0480.1080.041
" + }, + { + "type": "table_caption", + "bbox": [ + 0.5, + 0.278, + 0.892, + 0.32 + ], + "angle": 0, + "content": "Table 3. Ablation study on the effectiveness of different loss functions. We train the framework on the training set of camera IP and calculate KL divergence on its validation set." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.338, + 0.719, + 0.352 + ], + "angle": 0, + "content": "4.4.Customized Generation" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.362, + 0.892, + 0.484 + ], + "angle": 0, + "content": "Our proposed noise synthesizing framework allows for controlling the generated noise with multiple noise levels by manipulating the gain factors. Figure 5 illustrates the controllable synthesizing results, which are generated by varying the gain factor within the range of 0.02 to 0.18 with intervals of 0.04. The results demonstrate that an increase in the gain factor value leads to a proportional increase in the magnitude of the generated noise." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.497, + 0.653, + 0.513 + ], + "angle": 0, + "content": "4.5. Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.521, + 0.892, + 0.732 + ], + "angle": 0, + "content": "In this section, we conduct ablation studies to verify the effectiveness of individual loss functions in our framework, including \\(\\mathcal{L}_{std2}\\), \\(\\mathcal{L}_{adv2}\\) and \\(\\mathcal{L}_{reg}\\). We exclude \\(\\mathcal{L}_{std1}\\) and \\(\\mathcal{L}_{adv1}\\) from evaluation since they are indispensable for framework training. As indicated in Table 3, the model achieves optimal performance in KL divergence with complete loss functions, suggesting all the components contribute to the final synthetic noise. However, removing \\(\\mathcal{L}_{reg}\\) significantly reduces the KL divergence, suggesting the importance of stabilizing the training process. Moreover, both \\(\\mathcal{L}_{adv2}\\) and \\(\\mathcal{L}_{std2}\\) improve the quality of synthetic noise, supporting our claim that \\(\\mathcal{L}_{adv2}\\) serves as a complementary loss for \\(\\mathcal{L}_{std2}\\), enabling the NPNet to predict more accurate noise levels." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.75, + 0.619, + 0.765 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.775, + 0.892, + 0.9 + ], + "angle": 0, + "content": "In this paper, we propose a neighboring correlation-aware noise model for sRGB real noise generation. Our proposed method effectively bridges the gap between synthetic noise and real noise by explicitly modeling the signal dependency and neighboring correlation of real noise. The experimental results demonstrate the proposed noise model achieves superior performance on both real noise synthesis and downstream real image denoising tasks." + }, + { + "type": "page_number", + "bbox": [ + 0.484, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "1690" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.116, + 0.468, + 0.157 + ], + "angle": 0, + "content": "[1] Abdelrahman Abdelhamed, Marcus A Brubaker, and Michael S Brown. Noise flow: Noise modeling with conditional normalizing flows. In ICCV, 2019. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.159, + 0.468, + 0.2 + ], + "angle": 0, + "content": "[2] Abdelrahman Abdelhamed, Stephen Lin, and Michael S Brown. A high-quality denoising dataset for smartphone cameras. In CVPR, 2018. 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.089, + 0.202, + 0.468, + 0.228 + ], + "angle": 0, + "content": "[3] Saeed Anwar and Nick Barnes. Real image denoising with feature attention. In ICCV, 2019. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.231, + 0.468, + 0.271 + ], + "angle": 0, + "content": "[4] Jimmy Lei Ba, Jamie Ryan Kiros, and Geoffrey E Hinton. Layer normalization. arXiv preprint arXiv:1607.06450, 2016. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.273, + 0.468, + 0.315 + ], + "angle": 0, + "content": "[5] Tim Brooks, Ben Mildenhall, Tianfan Xue, Jiawen Chen, Dillon Sharlet, and Jonathan T Barron. Unprocessing images for learned raw denoising. In CVPR, 2019. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.317, + 0.468, + 0.358 + ], + "angle": 0, + "content": "[6] Ke-Chi Chang, Ren Wang, Hung-Jin Lin, Yu-Lun Liu, Chia-Ping Chen, Yu-Lin Chang, and Hwann-Tzong Chen. Learning camera-aware noise models. In ECCV, 2020. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.36, + 0.468, + 0.4 + ], + "angle": 0, + "content": "[7] Guangyong Chen, Fengyuan Zhu, and Pheng Ann Heng. An efficient statistical method for image noise level estimation. In ICCV, 2015. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.402, + 0.468, + 0.444 + ], + "angle": 0, + "content": "[8] Jingwen Chen, Jiawei Chen, Hongyang Chao, and Ming Yang. Image blind denoising with generative adversarial network based noise modeling. In CVPR, 2018. 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.446, + 0.468, + 0.487 + ], + "angle": 0, + "content": "[9] Kostadin Dabov, Alessandro Foi, Vladimir Katkovnik, and Karen Egiazarian. Image denoising by sparse 3-d transform-domain collaborative filtering. TIP, 2007. 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.489, + 0.468, + 0.528 + ], + "angle": 0, + "content": "[10] Alessandro Foi. Clipped noisy images: Heteroskedastic modeling and practical denoising. Signal Processing, 2009. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.532, + 0.468, + 0.585 + ], + "angle": 0, + "content": "[11] Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial networks. Communications of the ACM, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.588, + 0.468, + 0.629 + ], + "angle": 0, + "content": "[12] Shuhang Gu, Lei Zhang, Wangmeng Zuo, and Xiangchu Feng. Weighted nuclear norm minimization with application to image denoising. In CVPR, 2014. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.631, + 0.468, + 0.672 + ], + "angle": 0, + "content": "[13] Ishaan Gulrajani, Faruk Ahmed, Martin Arjovsky, Vincent Dumoulin, and Aaron C Courville. Improved training of wasserstein gans. NIPS, 2017. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.674, + 0.468, + 0.715 + ], + "angle": 0, + "content": "[14] Lanqing Guo, Siyu Huang, Haosen Liu, and Bihan Wen. Fino: Flow-based joint image and noise model. arXiv preprint arXiv:2111.06031, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.717, + 0.468, + 0.758 + ], + "angle": 0, + "content": "[15] Shi Guo, Zifei Yan, Kai Zhang, Wangmeng Zuo, and Lei Zhang. Toward convolutional blind denoising of real photographs. In CVPR, 2019. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.761, + 0.468, + 0.801 + ], + "angle": 0, + "content": "[16] Geonwoon Jang, Wooseok Lee, Sanghyun Son, and Kyoung Mu Lee. C2n: Practical generative noise modeling for real-world denoising. In ICCV, 2021. 1, 2, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.803, + 0.468, + 0.857 + ], + "angle": 0, + "content": "[17] Dong-Wook Kim, Jae Ryun Chung, and Seung-Won Jung. Grdn: Grouped residual dense network for real image denoising and gan-based real-world noise modeling. In CVPRW, 2019. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.86, + 0.468, + 0.899 + ], + "angle": 0, + "content": "[18] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. 6" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.116, + 0.468, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.12 + ], + "angle": 0, + "content": "[19] Durk P Kingma and Prafulla Dhariwal. Glow: Generative flow with invertible 1x1 convolutions. NIPS, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.122, + 0.892, + 0.163 + ], + "angle": 0, + "content": "[20] Shayan Kousha, Ali Maleky, Michael S Brown, and Marcus A Brubaker. Modeling srgb camera noise with normalizing flows. In CVPR, 2022. 1, 2, 3, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.165, + 0.892, + 0.205 + ], + "angle": 0, + "content": "[21] Ce Liu, William T Freeman, Richard Szeliski, and Sing Bing Kang. Noise estimation from a single image. In CVPR, 2006. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.208, + 0.892, + 0.261 + ], + "angle": 0, + "content": "[22] Seonghyeon Nam, Youngbae Hwang, Yasuyuki Matsushita, and Seon Joo Kim. A holistic approach to cross-channel image noise modeling and its application to image denoising. In CVPR, 2016. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.264, + 0.892, + 0.291 + ], + "angle": 0, + "content": "[23] Tobias Plotz and Stefan Roth. Benchmarking denoising algorithms with real photographs. In CVPR, 2017. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.294, + 0.892, + 0.334 + ], + "angle": 0, + "content": "[24] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. Image quality assessment: from error visibility to structural similarity. TIP, 2004. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.336, + 0.892, + 0.377 + ], + "angle": 0, + "content": "[25] Kaixuan Wei, Ying Fu, Jiaolong Yang, and Hua Huang. A physics-based noise formation model for extreme low-light raw denoising. In CVPR, 2020. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.379, + 0.892, + 0.42 + ], + "angle": 0, + "content": "[26] Zongsheng Yue, Qian Zhao, Lei Zhang, and Deyu Meng. Dual adversarial network: Toward real-world noise removal and noise generation. In ECCV, 2020. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.422, + 0.892, + 0.463 + ], + "angle": 0, + "content": "[27] Kai Zhang, Wangmeng Zuo, Yunjin Chen, Deyu Meng, and Lei Zhang. Beyond a gaussian denoiser: Residual learning of deep cnn for image denoising. TIP, 2017. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.465, + 0.892, + 0.505 + ], + "angle": 0, + "content": "[28] Yi Zhang, Hongwei Qin, Xiaogang Wang, and Hongsheng Li. Rethinking noise synthesis and modeling in raw denoising. In ICCV, 2021. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.507, + 0.892, + 0.547 + ], + "angle": 0, + "content": "[29] Yuqian Zhou, Jianbo Jiao, Haibin Huang, Yang Wang, Jue Wang, Honghui Shi, and Thomas Huang. When awgn-based denoiser meets real noises. In AAAI, 2020. 4, 5" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.547 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.484, + 0.945, + 0.514, + 0.957 + ], + "angle": 0, + "content": "1691" + } + ] +] \ No newline at end of file diff --git a/2023/sRGB Real Noise Synthesizing With Neighboring Correlation-Aware Noise Model/4d3b606f-ee7b-460f-b2bf-d38a08fa5304_origin.pdf b/2023/sRGB Real Noise Synthesizing With Neighboring Correlation-Aware Noise Model/4d3b606f-ee7b-460f-b2bf-d38a08fa5304_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..83150ff9cb227bfa443ed1c2e03117cdc8e8c6f7 --- /dev/null +++ b/2023/sRGB Real Noise Synthesizing With Neighboring Correlation-Aware Noise Model/4d3b606f-ee7b-460f-b2bf-d38a08fa5304_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:53044cb42da2307fd296f2a779d8ff2f9f762e91ad26811006697ffbb87ef0a9 +size 3080191 diff --git a/2023/sRGB Real Noise Synthesizing With Neighboring Correlation-Aware Noise Model/full.md b/2023/sRGB Real Noise Synthesizing With Neighboring Correlation-Aware Noise Model/full.md new file mode 100644 index 0000000000000000000000000000000000000000..cd955dc13877d616dcb882c0cb0f562e6dd11bab --- /dev/null +++ b/2023/sRGB Real Noise Synthesizing With Neighboring Correlation-Aware Noise Model/full.md @@ -0,0 +1,363 @@ +# sRGB Real Noise Synthesizing with Neighboring Correlation-Aware Noise Model + +Zixuan Fu $^{1*}$ , Lanqing Guo $^{1*}$ , Bihan Wen $^{1\dagger}$ $^{1}$ Nanyang Technological University, Singapore +{zixuan.fu, lanqing001, bihan.wen}@ntu.edu.sg + +# Abstract + +Modeling and synthesizing real noise in the standard RGB (sRGB) domain is challenging due to the complicated noise distribution. While most of the deep noise generators proposed to synthesize sRGB real noise using an end-to-end trained model, the lack of explicit noise modeling degrades the quality of their synthesized noise. In this work, we propose to model the real noise as not only dependent on the underlying clean image pixel intensity, but also highly correlated to its neighboring noise realization within the local region. Correspondingly, we propose a novel noise synthesizing framework by explicitly learning its neighboring correlation on top of the signal dependency. With the proposed noise model, our framework greatly bridges the distribution gap between synthetic noise and real noise. We show that our generated "real" sRGB noisy images can be used for training supervised deep denoisers, thus to improve their real denoising results with a large margin, comparing to the popular classic denoisers or the deep denoisers that are trained on other sRGB noise generators. The code will be available at https://github.com/xuan611/sRGB-Real-NoiseSynthesizing. + +# 1. Introduction + +Real image denoising is one of the most challenging tasks in low-level vision. Deep denoisers that are trained using synthetic noise, e.g., Additive White Gaussian Noise (AWGN), perform poorly on real photography [3, 15], which motivates more realistic noise models, e.g., [1, 5, 14-16]. In general, there are two approaches towards real noise modeling, i.e., modeling in the raw-RGB and standard RGB (sRGB) domains. Popular modeling methods including the physical-based [25, 28] and data-driven methods [1, 6] exploit sophisticated noise models in the raw-RGB domain, which demonstrated promising perfor + +mance as noise in raw-RGB is largely simplified comparing to noise in sRGB [20, 22]. However, raw-RGB images are not usually utilized by common users due to their large sizes. In contrast, most commercial cameras generate sRGB images by default, which are more popular in practice. Unfortunately, the noise generation methods in the raw-RGB domain cannot be directly applied to sRGB images, as the real noise distribution in sRGB is more complicated than raw-RGB noise, caused by the in-camera signal processing (ISP) pipeline [22]. + +Recent works [5, 15] proposed to generate noise on raw-RGB images and convert them into sRGB images by the ISP pipeline including demosaicing, white balancing, gamma correction, etc. While these methods synthesized realistic noise, the requirement of raw-RGB images as well as manually defined ISP pipelines limits their applications. An alternative solution for sRGB real noise modeling is to train the generative models with sRGB noisy-clean images and directly synthesize real noise on sRGB images [16,17,20,26]. However, these models synthesize noise without explicitly modeling the characteristics of sRGB real noise, resulting in degradation of the quality of the synthesized noise. + +In this paper, we propose a novel real noise generation network, based on Neighboring Correlation-Aware noise model, dubbed as NeCA, to directly synthesize real noise in the sRGB domain. The proposed real noise synthesis assumes that the sRGB real noise is not only signal-dependent, i.e., noise level partially depends on its underlying clean pixel, but also highly correlated with its neighboring noise realization. Such a real noise model greatly bridges the gap between the synthetic and real noise in sRGB. Furthermore, the synthesized "real" images by the proposed NeCA can be used for training supervised deep denoisers, thus tackling the real image denoising challenges, subjective to only a few real training data. The trained deep denoiser using our synthetic noisy images achieves state-of-the-art denoising performance, compared to the popular classic denoisers as well as deep denoisers that are trained on synthetic pairs from other noise models. To sum up, our main contributions can be concluded as follows: + +- We introduce a neighboring correlation-aware noise model for sRGB real noise synthesis by explicitly modeling the neighboring correlation of real noise, to bridge the gap between the synthetic and real noise distribution in sRGB. +- Our proposed framework shows a well-generalized ability, which is still capable to improve the real image denoising performance even with limited training data. +- With the synthetic image pairs generated by NeCA, the trained denoisers achieve state-of-the-art denoising performance compared with the deep denoisers trained with other real noise models. + +# 2. Related Work + +# 2.1. Raw-RGB Image Noise Synthesis + +Modeling real noise in raw-RGB is challenging as it cannot be simply assumed as Additive White Gaussian Noise (AWGN). Typically, raw-RGB noise models can be classified into two categories: physical-based models and learning-based models. One of the most commonly used physical-based models is the heteroscedastic Gaussian noise [10], which posits noise value, located at pixel $i$ , is dependent on its underlying clean pixel intensity: + +$$ +\boldsymbol {n} _ {i} \sim \mathcal {N} \left(0, \sigma_ {s} ^ {2} \cdot \boldsymbol {x} _ {i} + \sigma_ {c} ^ {2}\right), \tag {1} +$$ + +where $n$ and $x$ are noise and clean image in the raw-RGB domain, while $\sigma_{s}$ and $\sigma_{c}$ denote the noise variance term for signal-dependent and signal-independent components. Such a noise model is also known as the noise level function (NLF) as it describes the relationship between the pixelwise noise level and image intensity. To better model the camera sensor noise, recent works [25, 28] have proposed that real noise is a sophisticated combination of shot noise, read noise and row noise, etc. + +Compared to statistical modeling of noise, learning-based models learn the real noise distribution with generative models such as the generative adversarial nets (GANs) [6] and normalization flows [1] from paired noisy-clean images. Although these methods perform well in raw-RGB, they cannot be directly applied to model sRGB real noise since their assumptions are based on the characteristics of raw-RGB noise. For instance, these noise generators synthesize raw-RGB noise from an initialized heteroscedastic Gaussian noise (as described in Equation (1)), which fails to provide an accurate representation of real noise in the sRGB domain [21, 22]. + +# 2.2. sRGB Image Noise Synthesis + +The camera ISP pipeline, including demosaicing, tone mapping, white balancing, gamma mapping, etc., makes + +real noise in the sRGB domain to be more complicated than it is in the raw-RGB domain. To synthesize sRGB real noise, two approaches have been proposed: (1) synthesizing noisy samples in the raw-RGB domain and rendering them into sRGB images by applying the manually defined ISP pipeline [5, 15], and (2) directly synthesizing real noise in the sRGB domain [8, 16, 17, 20, 26]. + +In CBDNet [15], heteroscedastic Gaussian noise is added on raw-RGB clean images, and images are converted into sRGB using demosaicing and camera response functions. However, CBDNet requires raw-RGB images, which are not commonly used. To address this issue, unprocessing image (UPI) [5] proposes to de-render sRGB images into raw-RGB images using several predefined unprocessing pipelines. Similar procedures used in CBDNet are then applied to the unprocessed raw-RGB images to obtain their sRGB versions. + +Despite CBDNet and UPI effectively synthesize sRGB real noise, they still require predefined ISP pipelines, which may not match real ones used in different camera sensors. Therefore, generating real noise directly in the sRGB domain with deep generative models [11, 19] is considered an alternative solution. GCBD [8] proposes a GAN-based model that learns noise distributions by training on noise patches that have been cropped from noisy images. However, the synthesized noise is signal-independent as it is generated from random noise. DANet [26] and GRDN [17] use conditional generative networks to synthesize signal-dependent noise, however, few experiments are conducted to demonstrate the effectiveness of the proposed noise generators. C2N [16] attempts to synthesize the real noise with unpaired clean-noisy images, but the generated noise contains artifacts and color-shift problems due to the unpaired training mode. Recently, Kousha et al. [20] propose a conditional flow-based model for sRGB image noise generation that takes clean images, camera types, and ISO levels as input. However, the denoiser, trained with synthetic data, improves marginally compared to the unpaired noise generation method C2N. Unlike previous attempts that model noise with an end-to-end generator, our proposed method explicitly decomposes signal dependency and neighboring correlation of real noise and learns them with separate networks. + +# 3. Method + +# 3.1. Neighboring Correlation-Aware Noise Model + +In this section, we present our proposed noise model for sRGB real noise. We begin by introducing the basic noise model, which defines the signal dependency of pixel-wise noise level and its underlying clean pixels. We then discuss discrepancies between noise synthesized by the basic noise model and sRGB real noise and propose to bridge this gap + +by explicitly modeling noise neighboring correlation on top of the signal dependency. + +Basic Noise Model. Both raw-RGB and sRGB real noise are dependent on the image signal. In raw-RGB, the noise level can be approximated as a simple function of its underlying clean pixel intensity, i.e., heteroscedastic Gaussian noise described in Equation (1). However sRGB real noise is more complex due to camera settings and signal transformations in the ISP pipeline [20-22]. To address this challenge, we propose a noise model that characterizes the signal dependency of sRGB real noise. Specifically, for an sRGB clean image $\mathbf{x} = (x_{1},\dots,x_{N})$ and its paired noisy version $\mathbf{y} = (y_{1},\dots,y_{N})$ , we define noise level at pixel $i$ as a function of the clean image patch $\Omega_{\mathbf{x}}$ , centered at clean pixel $x_{i}$ , and camera ISO level $\gamma$ : + +$$ +\boldsymbol {\sigma} _ {i} = f \left(\Omega_ {\boldsymbol {x}}, \gamma\right), \tag {2} +$$ + +where $f(\cdot)$ represents the non-linear relationship of $\Omega_{\pmb{x}},\gamma$ and the pixel-wise noise level $\sigma_{i} = (\sigma_{i,r},\sigma_{i,g},\sigma_{i,b})$ for three color channels. For the sake of clarity, we omit the location index $i$ in the expression for the local region $\Omega_{\pmb{x}}$ . Then the distribution of noise $\pmb{v}$ at each pixel is modeled as a Gaussian distribution: + +$$ +\boldsymbol {v} _ {i, c} \sim \mathcal {N} (0, \sigma_ {i, c} ^ {2}), \tag {3} +$$ + +where $c$ is the index of RGB channels. We further define the noise level map $\pmb{m}$ , which has the same size as the clean image and the value at pixel $i$ refers to the noise level $\sigma_{i}$ . Finally, we can simulate signal-dependent noise as follows: + +$$ +\boldsymbol {v} = \boldsymbol {\epsilon} \odot \boldsymbol {m}, \quad \epsilon_ {i, c} \sim \mathcal {N} (0, 1 ^ {2}). \tag {4} +$$ + +Neighboring Correlation Noise Model. The noise synthesized by the basic noise model still exhibits discrepancies with real noise, as shown in Figure 1(b) and (d). We attribute this gap to the improper noise realization defined in Equation (4), where noise is sampled spatially independently from the basic noise model. Specifically, the most commonly used noise models, including the AWGN, heteroscedastic Gaussian noise, and our basic noise model, assume that the noise distribution is independent at each pixel, and the noise is sampled from the noise distribution without considering its neighboring synthesized noise. However, this noise realization method is inadequate to synthesize RGB real noise as the noise value is assumed to be highly correlated with its neighboring noise values due to the influence of the ISP pipeline such as demosaicing, which introduces neighboring operations. We refer to this characteristic of noise as neighboring correlation and define a neighboring correlation operator $g(\cdot)$ that maps such the correlation onto the synthesized signal-dependent noise $v$ : + +$$ +\boldsymbol {n} _ {i} = g \left(\Omega_ {\boldsymbol {v}}\right), \tag {5} +$$ + +![](images/2b5db907cea09a82760c07c93a4c77d9c48573121172ee500c1fb92c0698dd7c.jpg) +(a) Clean + +![](images/9a3eb8d5e8d9a04de9fb6a392ea24c537d2b7145ddad57be520b76b8c6b79296.jpg) +(b) SDNU Noise + +![](images/5fccf73333b805713134bd0baec7e835e7b71f49e853a79f7fd20b1507e305a9.jpg) +(c) SDNC Noise + +![](images/a8807c5a9d0283918fd60de1f86bd60930c0c63cb2e3ada6e796305c537c640b.jpg) +(d) Real +Figure 1. The visualization of modeling signal dependency and neighboring correlation of sRGB real noise. (a) Clean image. (b) Synthetic signal-dependent and neighboring uncorrelated (SDNU) noise. (c) Synthetic signal-dependent and neighboring correlated (SDNC) noise. (d) Real noise. We add a constant value to the noise maps for better visualizing the signal dependency. + +where $n$ is the neighboring correlated noise and $\Omega_{\mathbf{v}}$ is the local patch of $\mathbf{v}$ , centered at pixel $i$ . By processing the neighboring uncorrelated noise $\mathbf{v}$ with the neighboring correlation operator, which is learned by our proposed noise synthesizing framework in Section 3.2, the final generated noise performs similar characteristics to real noise, as demonstrated in Figure 1(c) and (d). For the purpose of clarity, we use SDNU noise to refer to the intermediate synthesized signal-dependent and neighboring uncorrelated noise $\mathbf{v}$ , and SDNC noise to refer to the final generated signal-dependent and neighboring correlated noise $\mathbf{n}$ . In the following sections, we will introduce the proposed noise synthesizing framework to explicitly learn the neighboring correlation and signal dependency of noise. + +# 3.2. Noise Synthesizing Framework + +Given paired sRGB real-world noisy and clean images $(\pmb{y},\pmb{x})$ , where $\pmb{y} = \pmb{x} + \pmb{n}$ , our proposed framework aims to learn the neighboring correlation-aware noise model using paired data. Our proposed framework, as illustrated in Figure 2, comprises three networks: a gain estimation network (GENet), a noise-level prediction network (NPNet), and a neighboring correlation network (NCNet). GENet estimates the gain factor from a noisy image, which serves to amplify the synthesized noise, similar to the ISO level. NPNet synthesizes the SDNU noise by incorporating the estimated gain factor and the clean image as inputs. Finally, NCNet explicitly models the neighboring correlation of sRGB real noise and generates the SDNC noise. + +Gain Estimation Network. The gain estimation network (GENet) is designed to estimate the gain factor from a noisy image $\pmb{y}$ , which serves as guidance to control the overall magnitude of the synthesized noise. The gain factor is de + +![](images/483648faafa1e16ca21cecab943ac122dbf1a884d756eea629e328fddeb735d3.jpg) +Noise-level prediction network +Figure 2. The proposed noise synthesizing framework. Our NeCA contains three networks including the gain estimation network (GENet), noise-level prediction network (NPNet), and neighboring correlation network (NCNet). PD denotes the Pixel-shuffle Down-sampling scheme introduced in [29]. Local noise level estimation and global noise level estimation operations are formulated in Equation (13) and (6). The details of the network architecture and PD scheme are described in the supplementary material. + +fined as the global noise level of the noisy image, which is the standard deviation calculated by every noise value in its noise $\pmb{n}$ : + +$$ +\beta = \sqrt {\frac {1}{N} \sum_ {i , c} \left(\boldsymbol {n} _ {i , c} - \bar {n}\right) ^ {2}}, \tag {6} +$$ + +where $\beta$ is the defined global noise level of the noisy image $\pmb{y}$ , $\bar{n}$ is the mean of the noise $\pmb{n}$ , and $N$ is the total number of pixels in the noisy image. However, during testing, the calculated gain factor is unavailable. To solve this, we aim to estimate the gain factor from the noisy image using GENet: + +$$ +\hat {\beta} = E (\boldsymbol {y}), \tag {7} +$$ + +where $E$ represents the GENet, and $\hat{\beta}$ is the estimated gain factor by GENet, which is expected to be as close as the global noise level of the noisy image. The main reason to use the gain factor estimated from the noisy image rather than the ISO level is driven by a crucial factor. ISO levels are typically saved in the metadata of images. The requirement of metadata will limit the application of our noise synthesizing framework. + +Noise-level Prediction Network. The noise-level prediction network (NPNet) learns a parametric model for the noise distribution defined in Equation (3). To achieve this, NPNet predicts the pixel-wise noise level $\hat{\sigma}_i$ using the clean local patch $\Omega_{\pmb{x}}$ and estimated gain factor $\hat{\beta}$ : + +$$ +\hat {\boldsymbol {\sigma}} _ {i} = G _ {1} \left(\Omega_ {\boldsymbol {x}}, \hat {\beta}\right), \tag {8} +$$ + +where $G_{1}$ denotes the NPNet, which has three output channels to predict noise levels for each pixel. To effectively + +incorporate the gain factor into the NPNet, we first apply the layer normalization [4] to the feature map of convolution and then multiply the normalized feature map by the gain factor. In practice, NPNet directly outputs the predicted noise level map $\hat{m}$ by utilizing a clean image and gain factor: + +$$ +\hat {\boldsymbol {m}} = G _ {1} (\boldsymbol {x}, \hat {\beta}). \tag {9} +$$ + +Once the noise level map $\hat{m}$ is obtained, the SDNU noise $\hat{v}$ can be synthesized by using the sampling trick defined in Equation (4). + +Neighboring Correlation Network. The neighboring correlation network (NCNet) performs as the neighboring correlation operator, described in Equation (5). By taking the noise value and its neighboring noise realization as input, NCNet generates the SDNC noise $\hat{n}$ : + +$$ +\hat {\boldsymbol {n}} _ {i} = G _ {2} \left(\Omega_ {\hat {\boldsymbol {v}}}\right), \tag {10} +$$ + +where $\Omega_{\hat{v}}$ is the noise patch of $\hat{v}$ located at pixel $i$ and $G_{2}$ denotes the NCNet. The SDNC noise can be directly generated by taking the SDNU noise into the network: + +$$ +\hat {\boldsymbol {n}} = G _ {2} (\hat {\boldsymbol {v}}). \tag {11} +$$ + +# 3.3. Loss Functions + +To jointly train the proposed networks, five loss functions are introduced: (1) standard deviation losses $\mathcal{L}_{std1}$ and $\mathcal{L}_{std2}$ , (2) adversarial losses $\mathcal{L}_{adv1}$ and $\mathcal{L}_{adv2}$ , (3) the regularization loss $\mathcal{L}_{reg}$ . The details of these loss functions will be introduced later. + +Standard Deviation Loss. We introduce $\mathcal{L}_{std1}$ to enforce the estimated gain factor $\hat{\beta}$ by GENet to be close to the + +global noise level $\beta$ of the noisy image, which is defined as follows: + +$$ +\mathcal {L} _ {s t d 1} = \mathbb {E} _ {\boldsymbol {y}} \left[ (\hat {\beta} - \beta) ^ {2} \right], \tag {12} +$$ + +where $\beta$ and $\hat{\beta}$ are obtained by Equation (6) and (7). + +The objective of NPNet is to predict the noise level map $\hat{m}$ by taking the clean image and gain factor as input. However, since the groundtruth noise level map is not available, we propose to use a simple local noise level estimation method to approximate the noise level map $m$ from the noise, which is calculated as follows: + +$$ +\boldsymbol {m} _ {i, c} = \sqrt {\mathcal {M F} \left(\Omega_ {n} ^ {2}\right) - \mathcal {M F} ^ {2} \left(\Omega_ {n}\right)}, \tag {13} +$$ + +where $\Omega_{n}$ denotes the $7\times 7$ noise patch located at pixel $i$ , channel $c$ of noise map $\pmb{n}$ , and $\mathcal{MF}(\cdot)$ represents the mean filter. Then the $\mathcal{L}_{std2}$ is defined as follows: + +$$ +\mathcal {L} _ {s t d 2} = \mathbb {E} _ {\boldsymbol {x}, \boldsymbol {y}} \left[ \left| | \hat {\boldsymbol {m}} - \boldsymbol {m} \right| \right| _ {2} ^ {2} ]. \tag {14} +$$ + +Adversarial Loss. In order to guarantee that the generated noise shares the similar distribution with real noise, we introduce two adversarial losses. Our first adversarial loss $\mathcal{L}_{adv1}$ is imposed between the final synthetic SDNC noise and real noise $(\hat{n}, n)$ to enforce the highly neighboring correlation in the generated noise, similar to that of the real noise. Our second adversarial loss $\mathcal{L}_{adv2}$ is calculated by using Pixel-shuffle Down-sampling [29] versions of synthesized intermediate noise $\hat{v}$ and real noise $n$ . Specifically, $\mathcal{L}_{adv2}$ servers as a complementary loss for $\mathcal{L}_{std2}$ because estimating the approximate noise level map using Equation (13) may not be reliable, as this method struggles to differentiate between noise originating from different intensities. However, directly calculating the adversarial loss between noise $\hat{v}$ and $n$ is unreasonable since $\hat{v}$ is neighboring uncorrelated. To address this problem, we utilize the Pixel-shuffle Down-sampling (PD) scheme proposed in [29] to obtain down-sampled versions $((\hat{v})_{\downarrow s}, (n)_{\downarrow s})$ of both synthetic noise $\hat{v}$ and real noise $n$ . Here $\downarrow_s$ denotes the PD operation with a stride of $s$ (in this paper, $s$ is set to 3). According to [29], the neighboring correlation in the PD real noise $(n)_{\downarrow s}$ will be greatly attenuated. This allows us to calculate the adversarial loss between the two down-sampled versions. We utilize WGAN-GP [13] to compute adversarial losses, while $\mathcal{L}_{adv1}$ is defined as follows: + +$$ +\mathcal {L} _ {a d v 1} = - \mathbb {E} _ {\hat {\boldsymbol {n}}} \left[ D _ {1} (\hat {\boldsymbol {n}}) \right], \tag {15} +$$ + +where $D_{1}$ is the discriminator for NCNet, which scores the realness of synthesized noise. Similarly, $\mathcal{L}_{adv2}$ is computed as follows: + +$$ +\mathcal {L} _ {a d v 2} = - \mathbb {E} _ {(\hat {\boldsymbol {v}}) \downarrow_ {s}} \left[ D _ {2} \left((\hat {\boldsymbol {v}}) \downarrow_ {s}\right) \right], \tag {16} +$$ + +where $D_{2}$ is the discriminator for NPNet. More detail about the PD scheme and the discriminator losses will be discussed in the supplementary material. + +![](images/4f05c02adcff079c93c835184027659623560bf60716b415777050469c80ba15.jpg) +Figure 3. The designed two inference versions. NeCA-W utilizes the whole framework to synthesize SDNC noise. NeCA-S only adopts NCNet to synthesize signal-independent neighboring correlated (SINC) noise by taking the AWGN as input. + +Regularization Loss. Besides the losses mentioned above, a regularization loss $\mathcal{L}_{reg}$ is utilized to stabilize training. It is imposed between the estimated gain factor $\hat{\beta}$ and the predicted noise level map $\hat{m}$ : + +$$ +\mathcal {L} _ {r e g} = \mathbb {E} _ {\boldsymbol {x}, \boldsymbol {y}} \left[ \| \boldsymbol {w} \| _ {2} ^ {2} \right], \tag {17} +$$ + +where $\pmb{w}_{i,c} = \hat{\pmb{m}}_{i,c} - \hat{\beta}$ + +Finally, the full loss functions of the framework are described as follows: + +$$ +\mathcal {L} = \mathcal {L} _ {\text {r e g}} + \lambda_ {1} \mathcal {L} _ {\text {a d v 1}} + \lambda_ {2} \mathcal {L} _ {\text {a d v 2}} + \lambda_ {3} \mathcal {L} _ {\text {s t d 1}} + \lambda_ {4} \mathcal {L} _ {\text {s t d 2}}, \tag {18} +$$ + +where $\lambda_1, \lambda_2, \lambda_3$ and $\lambda_4$ are hyperparameters to balance the importance between different losses. + +# 3.4. Inference Stage + +We provide two inference versions to generate noise, as illustrated in Figure 3: (1) NeCA-W applies the entire framework to synthesize "real" noise. It first estimates the gain factor from an arbitrary noisy image and synthesizes noise by conditioning on a clean image and the estimated gain factor. (2) NeCA-S is the simplified version of NeCA-W which uses only NCNet for inference. In this method, AWGN is synthesized and then NCNet maps it with the neighboring correlation. We refer to this synthetic noise as signal-independent neighboring correlated (SINC) noise. Notably, NeCA-S still enhances the performance of deep denoiser on real noise, even though the denoiser is trained using the synthesized SINC noise. In the meantime, this inference version only requires minimal data to train the NCNet, which we will demonstrate in our experiments. + +# 4. Experiments + +# 4.1. Experimental Setup + +To assess the effectiveness of our proposed noise synthesizing framework, we conduct experiments in two parts: + +First, we assess the quality of the generated noise. Second, we examine the performance of NeCA on the downstream image denoising task. The details of the experiments will be discussed in the following subsections. + +Dataset. We evaluate our NeCA on the medium version of Smartphone Image Denoising Dataset (SIDD) [2], which comprises 320 noisy-clean image pairs captured by five different smartphone cameras, including Samsung Galaxy S6 Edge (S6), iPhone 7 (IP), Google Pixel (GP), Motorola Nexus 6 (N6), and LG G4 (G4). These images are collected in ten different scenes with varying ISO levels and lighting conditions. The SIDD provides both raw-RGB and sRGB images, with the sRGB version obtained by rendering the captured raw-RGB images through the manually defined ISP pipeline provided in [2]. In our experiments, we use the sRGB version to evaluate the proposed method. + +Metrics. We evaluate the performance of NeCA using three metrics: Discrete Kullback-Leibler (KL) divergence, Signal-to-Noise Ratio (PSNR) and Structural Similarity (SSIM) [24]. The KL divergence is used to measure the similarity of histograms between real noise and generated noise. The histogram range is set from $-0.1$ to 0.1 with 64 intervals. The PSNR and SSIM are used to evaluate the performance of deep denoisers. A higher PSNR and SSIM reflect better denoising performance, while a smaller KL divergence represents better noise synthesizing quality. + +Implementation Details. All the networks are optimized using Adam optimizer [18] with a batch size of 32. Images are cropped to a size of $96 \times 96$ pixels for training. For noise generation, we train individual networks for 300 epochs with the learning rate of $10^{-4}$ . For denoising, we select the DnCNN [27] as the default deep denoiser for comparison and train it for 300 epochs with the learning rate of $10^{-3}$ . The $\lambda_{1}, \lambda_{2}, \lambda_{3}$ and $\lambda_{4}$ in the loss functions are set to 0.1, 0.1, 50, 10 respectively. + +# 4.2. Noise Synthesis on SIDD + +Compared Baselines. We compare NeCA with several noise models, including Additive White Gaussian Noise (AWGN), C2N [16], and the NLF (described in Equation (1)). To synthesize AWGN, we estimate the noise level from each noisy image by applying a noise estimation method introduced in [7] and add it to its corresponding clean image. To synthesize noise using the C2N, we directly utilize the pretrained model provided by the authors. For the NLF, we synthesize heteroscedastic Gaussian noise on the raw-RGB clean images from SIDD, where the signal-dependent term $\sigma_s^2$ and signal-independent term $\sigma_c^2$ are obtained from the metadata provided by SIDD. We then apply the same ISP pipeline as used in the SIDD to render them to sRGB. We refer to this model as NLF-ISP for simplicity. + +Preparation. We evaluate the results of generated noise on each camera in SIDD, where $80\%$ of image pairs are allo + +
CameraMetricsAWGNC2N [16]NeCANLF-ISPReal
G4KL1.97550.16600.02420.0102-
PSNR28.1537.8138.8538.5140.60
GPKL1.83510.13150.04320.0126-
PSNR28.4537.0837.7237.7438.33
IPKL1.85620.05810.04100.0475-
PSNR28.0139.1239.4639.5339.45
N6KL2.14650.35240.02060.0063-
PSNR26.3133.5935.5434.8435.56
S6KL0.45170.45170.03020.0902-
PSNR27.2233.1835.5635.9936.85
AverageKL2.00620.21290.03420.0414-
PSNR27.9036.3737.5837.5938.27
+ +Table 1. Quantitative results of synthetic noise. The results are computed on the validation sets of five SIDD cameras with KL divergence and PSNR (dB). The best results are highlighted in bold. + +cated for training the noise synthesizing framework, while the rest $20\%$ are reserved for validation. The quality of the synthesized noise was evaluated using two metrics: KL divergence and PSNR. We calculate the KL divergence between the histograms of ground truth noise in the validation set and the noise synthesized by NeCA with clean images and corresponding gain factors from the validation set. Notably, the gain factors used for evaluation are estimated by GENet from the noisy images paired with the clean images, as they cannot be set to random values for evaluation. Besides, we also use the PSNR to further evaluate the quality of synthesized noisy images. We train the DnCNN with the synthesized noisy-clean image pairs on the training set and apply it to denoise the noisy images from the validation set. We calculate the PSNR between the denoised images and corresponding clean images to evaluate the denoising performance. In order to maintain consistency between the training and validation sets, we ensure that both sets contain the same set of ISO levels. + +Noise Synthesis Results. Table 1 shows the KL divergence and PSNR results computed on validation sets of five devices. For the results of average KL divergence over all five cameras, our method exhibits the best performance among all noise models. Additionally, our method lags slightly behind NLF-ISP by $0.01\mathrm{dB}$ on the average PSNR. It is worth noting that noise samples generated by NLF-ISP are first synthesized in the raw-RGB domain and then rendered to sRGB using the same ISP pipelines as in SIDD, suggesting the minimal discrepancies between noise samples from NLF-ISP and real data. The similar results on each camera between NLF-ISP and our NeCA model demonstrate the promising performance of the proposed model. Figure 4 shows generated noise maps from compared methods. Remarkable visual similarities observed between generated noise maps and real noise maps indicate that our framework is capable to synthesize realistic noise. + +![](images/8c9671b6b6592f1848853eeac1bfc697e649c4ce13775fddf19081826b9bf377.jpg) +Figure 4. Visualization of synthetic noise samples under different ISO-lighting conditions on SIDD [2]. The displayed images, from left to right, correspond to clean image, C2N, Our method, NLF-ISP and real noisy image. + +# 4.3. Applications on Real Image Denoising + +Compared Baselines. Various noise generation methods are evaluated to demonstrate the effectiveness of these baselines performed on the downstream real image denoising task, including GCBD [8], C2N [16], Flow-sRGB [20], NeCA-S and NeCA-W. When assessing denoising performance, classical denoisers such as BM3D [9] and WNNM [12] are also included in the experiments. + +Preparation. We establish the synthetic SIDD where clean images are from the original SIDD and noisy images are synthesized by using NeCA-W and NeCA-S. Specifically, the proposed framework is trained on the entire SIDD for each camera and the whole framework (NeCA-W) is used to + +
MethodSIDDDND
PSNR(dB)SSIMPSNR(dB)SSIM
BM3D [9]25.650.68534.510.851
WNNM [12]25.780.80934.670.865
GCBD [8]--35.580.922
C2N* [16]33.760.90136.080.903
Flow-sRGB* [20]34.740.912--
NeCA-S*36.100.92736.960.938
NeCA-W*36.820.93237.530.940
Real*37.120.93437.890.942
+ +Table 2. Quantitative evaluation of denoising performance on SIDD and DND benchmark. * denotes the DnCNN denoiser is trained on either the synthetic or real image pairs with the SIDD. (red: the best result, blue: the second best) + +generate noise for each clean image from the SIDD, where the gain factor is estimated from its paired noisy image. On the other hand, We train NeCA with a few paired images, e.g., three image pairs with varying ISO levels (800, 1600, 3200) from camera N6 and use only NCNet (NeCA-S) to generate signal-independent neighboring correlated (SINC) noise for clean images from SIDD, as seen in Figure 3. The synthesized SINC noise is added to the clean image. For each clean image, the noise level of AWGN is randomly selected from a range of [0, 75]. Our experiments with NeCA-S aim to demonstrate the advantages of explicitly modeling the neighboring correlation of real noise. Other sRGB real noise generation baselines, including C2N [16] and Flow-sRGB [20], also follow the same experimental settings with NeCA-W. With the synthetic noisy-clean image pairs, we train the DnCNN on either synthetic or real pairs of SIDD. Then the denoising performances are evaluated on both the SIDD and DND [23] benchmarks. + +Results and Discussions. Table 2 shows the denoising results of the compared denoisers. Obviously, DnCNN trained on the synthetic samples from NeCA-W, achieves the best results among all compared methods in terms of both PSNR and SSIM. Specifically, NeCA-W gets 2.08 dB gains from Flow-sRGB on the SIDD benchmark, where Flow-sRGB is an end-to-end flow model which implicitly synthesizes real noise. The improvement of denoising performance obtained by NeCA-W indicates the accuracy of our noise model. Moreover, even though the denoising performance of NeCA-W still does not surpass the denoiser trained on the real data, the slight PSNR and SSIM discrepancies between them suggest our model does shrink this gap. Furthermore, the most impressive thing is that NeCA-S still achieves comparable denoising results on both the SIDD and DND benchmarks, outperforming the Flow-sRGB by a large margin. Note that the synthetic noise from NeCA-S is signal-independent. The superior performance of NeCA-S further verifies explicitly modeling neighboring correlation benefits the sRGB real noise synthesis. + +Figure 6 and 7 show the denoised images from the SIDD + +![](images/6d68097a8c3c6eb4f4bea8fea82bcfca1935609ca28c61b4a94b06f9b258630e.jpg) +(a) Clean + +![](images/ef022acb930b4a1c30e1c88d9e9216049df18d68f066d87dc583f88bf776d432.jpg) +(b) $\hat{\beta} = 0.02$ + +![](images/4739bb193f2c932858b2f22493569e60fdcd48759bdf30c48bc919404879c02f.jpg) +(c) $\hat{\beta} = 0.06$ + +![](images/f45d3d6be20a3c36a584d69ecb19eb7d7c5fc047b1e283311d8c8140c3f1a9c1.jpg) +(d) $\hat{\beta} = 0.10$ + +![](images/516ec8201b7b54e84f809303b973dd23781f225f6e0b96343afa85a9d8f861d0.jpg) +(e) $\hat{\beta} = 0.14$ + +![](images/5e2617478418d058fbb049cac1aeefec9b17fe721f91ce0e60702bc087e96e91.jpg) +(f) $\hat{\beta} = 0.18$ + +![](images/fa67a832ba461681aa6a61cccc56f71e3221a747a2fbddd6be88b08e53ddd654.jpg) +Figure 5. Results of controllable noise synthesis. The gain factor ranges from 0.02 to 0.18 with intervals of 0.04. + +![](images/9c7c9f87e5ad8ed1f80c19f1b9e660e28340038f249e2e227313542ecd9f045f.jpg) +(b) C2N [16] + +![](images/ff1f381eba36e6da51584c2580c47cacf2b4663ec63269c1d346aeba13b0aa95.jpg) +(c) NeCA-S + +![](images/fc607dac803c972cc879a21ba7b44774f6765dae48a0f9325f15b88ce4aa5c09.jpg) +(a) Noisy +(d) NeCA-W + +![](images/6c8aa11b4bc9fc744a83fadc5dc423828ba5fc1adeef17fda07ba3640fb3d1fb.jpg) +(e) Real + +![](images/ce2c034828c1d63c9e5c67690dbd8e354f2b7dc6f7b9ba3f07ad6df88ae5142f.jpg) +(f) Clean + +![](images/baf7ebbe63e91d30aca03f798a85425fa8b68b3ac7d02a9c436d07795d21e863.jpg) +Figure 6. Denoising results on the SIDD dataset. DnCNN denoisers are trained on the noisy images from (b) C2N, (c, d) our models, and (e) real noisy images of the SIDD. + +![](images/577c34bc1dec9e4b436b76e0a1491243a99cd25377208c4b2be0e650bb31445d.jpg) +(b) BM3D [9] + +![](images/62e07428b834f79a4ad27c96cdb99a1aed99296e4fab164e30f267674c1fa872.jpg) + +![](images/58d081b82dada00d053c63984181cb25b957486f15ac2262a88eb15362771af2.jpg) +(a) Noisy +(d) NeCA-S +Figure 7. Denoising results on the DND dataset. DnCNN denoisers are trained on the noisy images from (c) C2N, (d, e) our models, and (f) real noisy images of the SIDD. + +![](images/d1c13e763586c1a69a63b058e60ada9357f40f635041fc934f4abcf5bead0f79.jpg) +(e) NeCA-W + +![](images/93b7aa1f1696eb8d6c4b2fe2410bb65e822829a37b82b2bc7285b638a8dc0924.jpg) +(c) C2N [16] +(f) Real + +and DND datasets. The results indicate that the denoisers trained on the synthetic image pairs from NeCA-W and NeCA-S achieve similar denoising results compared to the denoiser trained on real image pairs. In contrast, the denoiser trained on noisy samples from C2N, which employs an unpaired training scheme, fails to suppress the noise effectively, partly due to its unpaired train scheme. + +
Lossw/o Lstd2w/o Ladv1w/o Lregall
KL0.0520.0480.1080.041
+ +Table 3. Ablation study on the effectiveness of different loss functions. We train the framework on the training set of camera IP and calculate KL divergence on its validation set. + +# 4.4.Customized Generation + +Our proposed noise synthesizing framework allows for controlling the generated noise with multiple noise levels by manipulating the gain factors. Figure 5 illustrates the controllable synthesizing results, which are generated by varying the gain factor within the range of 0.02 to 0.18 with intervals of 0.04. The results demonstrate that an increase in the gain factor value leads to a proportional increase in the magnitude of the generated noise. + +# 4.5. Ablation Study + +In this section, we conduct ablation studies to verify the effectiveness of individual loss functions in our framework, including $\mathcal{L}_{std2}$ , $\mathcal{L}_{adv2}$ and $\mathcal{L}_{reg}$ . We exclude $\mathcal{L}_{std1}$ and $\mathcal{L}_{adv1}$ from evaluation since they are indispensable for framework training. As indicated in Table 3, the model achieves optimal performance in KL divergence with complete loss functions, suggesting all the components contribute to the final synthetic noise. However, removing $\mathcal{L}_{reg}$ significantly reduces the KL divergence, suggesting the importance of stabilizing the training process. Moreover, both $\mathcal{L}_{adv2}$ and $\mathcal{L}_{std2}$ improve the quality of synthetic noise, supporting our claim that $\mathcal{L}_{adv2}$ serves as a complementary loss for $\mathcal{L}_{std2}$ , enabling the NPNet to predict more accurate noise levels. + +# 5. Conclusion + +In this paper, we propose a neighboring correlation-aware noise model for sRGB real noise generation. Our proposed method effectively bridges the gap between synthetic noise and real noise by explicitly modeling the signal dependency and neighboring correlation of real noise. The experimental results demonstrate the proposed noise model achieves superior performance on both real noise synthesis and downstream real image denoising tasks. + +# References + +[1] Abdelrahman Abdelhamed, Marcus A Brubaker, and Michael S Brown. Noise flow: Noise modeling with conditional normalizing flows. In ICCV, 2019. 1, 2 +[2] Abdelrahman Abdelhamed, Stephen Lin, and Michael S Brown. A high-quality denoising dataset for smartphone cameras. In CVPR, 2018. 6, 7 +[3] Saeed Anwar and Nick Barnes. Real image denoising with feature attention. In ICCV, 2019. 1 +[4] Jimmy Lei Ba, Jamie Ryan Kiros, and Geoffrey E Hinton. Layer normalization. arXiv preprint arXiv:1607.06450, 2016. 4 +[5] Tim Brooks, Ben Mildenhall, Tianfan Xue, Jiawen Chen, Dillon Sharlet, and Jonathan T Barron. Unprocessing images for learned raw denoising. In CVPR, 2019. 1, 2 +[6] Ke-Chi Chang, Ren Wang, Hung-Jin Lin, Yu-Lun Liu, Chia-Ping Chen, Yu-Lin Chang, and Hwann-Tzong Chen. Learning camera-aware noise models. In ECCV, 2020. 1, 2 +[7] Guangyong Chen, Fengyuan Zhu, and Pheng Ann Heng. An efficient statistical method for image noise level estimation. In ICCV, 2015. 6 +[8] Jingwen Chen, Jiawei Chen, Hongyang Chao, and Ming Yang. Image blind denoising with generative adversarial network based noise modeling. In CVPR, 2018. 2, 7 +[9] Kostadin Dabov, Alessandro Foi, Vladimir Katkovnik, and Karen Egiazarian. Image denoising by sparse 3-d transform-domain collaborative filtering. TIP, 2007. 7, 8 +[10] Alessandro Foi. Clipped noisy images: Heteroskedastic modeling and practical denoising. Signal Processing, 2009. 2 +[11] Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial networks. Communications of the ACM, 2020. 2 +[12] Shuhang Gu, Lei Zhang, Wangmeng Zuo, and Xiangchu Feng. Weighted nuclear norm minimization with application to image denoising. In CVPR, 2014. 7 +[13] Ishaan Gulrajani, Faruk Ahmed, Martin Arjovsky, Vincent Dumoulin, and Aaron C Courville. Improved training of wasserstein gans. NIPS, 2017. 5 +[14] Lanqing Guo, Siyu Huang, Haosen Liu, and Bihan Wen. Fino: Flow-based joint image and noise model. arXiv preprint arXiv:2111.06031, 2021. 1 +[15] Shi Guo, Zifei Yan, Kai Zhang, Wangmeng Zuo, and Lei Zhang. Toward convolutional blind denoising of real photographs. In CVPR, 2019. 1, 2 +[16] Geonwoon Jang, Wooseok Lee, Sanghyun Son, and Kyoung Mu Lee. C2n: Practical generative noise modeling for real-world denoising. In ICCV, 2021. 1, 2, 6, 7, 8 +[17] Dong-Wook Kim, Jae Ryun Chung, and Seung-Won Jung. Grdn: Grouped residual dense network for real image denoising and gan-based real-world noise modeling. In CVPRW, 2019. 1, 2 +[18] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. 6 + +[19] Durk P Kingma and Prafulla Dhariwal. Glow: Generative flow with invertible 1x1 convolutions. NIPS, 2018. 2 +[20] Shayan Kousha, Ali Maleky, Michael S Brown, and Marcus A Brubaker. Modeling srgb camera noise with normalizing flows. In CVPR, 2022. 1, 2, 3, 7 +[21] Ce Liu, William T Freeman, Richard Szeliski, and Sing Bing Kang. Noise estimation from a single image. In CVPR, 2006. 2, 3 +[22] Seonghyeon Nam, Youngbae Hwang, Yasuyuki Matsushita, and Seon Joo Kim. A holistic approach to cross-channel image noise modeling and its application to image denoising. In CVPR, 2016. 1, 2, 3 +[23] Tobias Plotz and Stefan Roth. Benchmarking denoising algorithms with real photographs. In CVPR, 2017. 7 +[24] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. Image quality assessment: from error visibility to structural similarity. TIP, 2004. 6 +[25] Kaixuan Wei, Ying Fu, Jiaolong Yang, and Hua Huang. A physics-based noise formation model for extreme low-light raw denoising. In CVPR, 2020. 1, 2 +[26] Zongsheng Yue, Qian Zhao, Lei Zhang, and Deyu Meng. Dual adversarial network: Toward real-world noise removal and noise generation. In ECCV, 2020. 1, 2 +[27] Kai Zhang, Wangmeng Zuo, Yunjin Chen, Deyu Meng, and Lei Zhang. Beyond a gaussian denoiser: Residual learning of deep cnn for image denoising. TIP, 2017. 6 +[28] Yi Zhang, Hongwei Qin, Xiaogang Wang, and Hongsheng Li. Rethinking noise synthesis and modeling in raw denoising. In ICCV, 2021. 1, 2 +[29] Yuqian Zhou, Jianbo Jiao, Haibin Huang, Yang Wang, Jue Wang, Honghui Shi, and Thomas Huang. When awgn-based denoiser meets real noises. In AAAI, 2020. 4, 5 \ No newline at end of file diff --git a/2023/sRGB Real Noise Synthesizing With Neighboring Correlation-Aware Noise Model/images.zip b/2023/sRGB Real Noise Synthesizing With Neighboring Correlation-Aware Noise Model/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..d8b6f6bc9b3e7ba5d409fe5fe2b52e6e19fc5240 --- /dev/null +++ b/2023/sRGB Real Noise Synthesizing With Neighboring Correlation-Aware Noise Model/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c111d7b7fa4ef1385b7f0f5e2c6e9861c98900544558b0ed831040f1670b726 +size 692551 diff --git a/2023/sRGB Real Noise Synthesizing With Neighboring Correlation-Aware Noise Model/layout.json b/2023/sRGB Real Noise Synthesizing With Neighboring Correlation-Aware Noise Model/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..0507a74abc0ec9d72d051c519908568c5773b72b --- /dev/null +++ b/2023/sRGB Real Noise Synthesizing With Neighboring Correlation-Aware Noise Model/layout.json @@ -0,0 +1,9845 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 47, + 103, + 547, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 103, + 547, + 121 + ], + "spans": [ + { + "bbox": [ + 47, + 103, + 547, + 121 + ], + "type": "text", + "content": "sRGB Real Noise Synthesizing with Neighboring Correlation-Aware Noise Model" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 175, + 142, + 419, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 175, + 142, + 419, + 185 + ], + "spans": [ + { + "bbox": [ + 175, + 142, + 419, + 185 + ], + "type": "text", + "content": "Zixuan Fu" + }, + { + "bbox": [ + 175, + 142, + 419, + 185 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 175, + 142, + 419, + 185 + ], + "type": "text", + "content": ", Lanqing Guo" + }, + { + "bbox": [ + 175, + 142, + 419, + 185 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 175, + 142, + 419, + 185 + ], + "type": "text", + "content": ", Bihan Wen" + }, + { + "bbox": [ + 175, + 142, + 419, + 185 + ], + "type": "inline_equation", + "content": "^{1\\dagger}" + }, + { + "bbox": [ + 175, + 142, + 419, + 185 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 175, + 142, + 419, + 185 + ], + "type": "text", + "content": "Nanyang Technological University, Singapore \n{zixuan.fu, lanqing001, bihan.wen}@ntu.edu.sg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 143, + 213, + 192, + 226 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 213, + 192, + 226 + ], + "spans": [ + { + "bbox": [ + 143, + 213, + 192, + 226 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 238, + 290, + 490 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 238, + 290, + 490 + ], + "spans": [ + { + "bbox": [ + 47, + 238, + 290, + 490 + ], + "type": "text", + "content": "Modeling and synthesizing real noise in the standard RGB (sRGB) domain is challenging due to the complicated noise distribution. While most of the deep noise generators proposed to synthesize sRGB real noise using an end-to-end trained model, the lack of explicit noise modeling degrades the quality of their synthesized noise. In this work, we propose to model the real noise as not only dependent on the underlying clean image pixel intensity, but also highly correlated to its neighboring noise realization within the local region. Correspondingly, we propose a novel noise synthesizing framework by explicitly learning its neighboring correlation on top of the signal dependency. With the proposed noise model, our framework greatly bridges the distribution gap between synthetic noise and real noise. We show that our generated \"real\" sRGB noisy images can be used for training supervised deep denoisers, thus to improve their real denoising results with a large margin, comparing to the popular classic denoisers or the deep denoisers that are trained on other sRGB noise generators. The code will be available at https://github.com/xuan611/sRGB-Real-NoiseSynthesizing." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 513, + 128, + 525 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 513, + 128, + 525 + ], + "spans": [ + { + "bbox": [ + 47, + 513, + 128, + 525 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 533, + 288, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 533, + 288, + 666 + ], + "spans": [ + { + "bbox": [ + 46, + 533, + 288, + 666 + ], + "type": "text", + "content": "Real image denoising is one of the most challenging tasks in low-level vision. Deep denoisers that are trained using synthetic noise, e.g., Additive White Gaussian Noise (AWGN), perform poorly on real photography [3, 15], which motivates more realistic noise models, e.g., [1, 5, 14-16]. In general, there are two approaches towards real noise modeling, i.e., modeling in the raw-RGB and standard RGB (sRGB) domains. Popular modeling methods including the physical-based [25, 28] and data-driven methods [1, 6] exploit sophisticated noise models in the raw-RGB domain, which demonstrated promising perfor" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 214, + 547, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 214, + 547, + 334 + ], + "spans": [ + { + "bbox": [ + 306, + 214, + 547, + 334 + ], + "type": "text", + "content": "mance as noise in raw-RGB is largely simplified comparing to noise in sRGB [20, 22]. However, raw-RGB images are not usually utilized by common users due to their large sizes. In contrast, most commercial cameras generate sRGB images by default, which are more popular in practice. Unfortunately, the noise generation methods in the raw-RGB domain cannot be directly applied to sRGB images, as the real noise distribution in sRGB is more complicated than raw-RGB noise, caused by the in-camera signal processing (ISP) pipeline [22]." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 305, + 338, + 547, + 482 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 338, + 547, + 482 + ], + "spans": [ + { + "bbox": [ + 305, + 338, + 547, + 482 + ], + "type": "text", + "content": "Recent works [5, 15] proposed to generate noise on raw-RGB images and convert them into sRGB images by the ISP pipeline including demosaicing, white balancing, gamma correction, etc. While these methods synthesized realistic noise, the requirement of raw-RGB images as well as manually defined ISP pipelines limits their applications. An alternative solution for sRGB real noise modeling is to train the generative models with sRGB noisy-clean images and directly synthesize real noise on sRGB images [16,17,20,26]. However, these models synthesize noise without explicitly modeling the characteristics of sRGB real noise, resulting in degradation of the quality of the synthesized noise." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 486, + 547, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 486, + 547, + 712 + ], + "spans": [ + { + "bbox": [ + 305, + 486, + 547, + 712 + ], + "type": "text", + "content": "In this paper, we propose a novel real noise generation network, based on Neighboring Correlation-Aware noise model, dubbed as NeCA, to directly synthesize real noise in the sRGB domain. The proposed real noise synthesis assumes that the sRGB real noise is not only signal-dependent, i.e., noise level partially depends on its underlying clean pixel, but also highly correlated with its neighboring noise realization. Such a real noise model greatly bridges the gap between the synthetic and real noise in sRGB. Furthermore, the synthesized \"real\" images by the proposed NeCA can be used for training supervised deep denoisers, thus tackling the real image denoising challenges, subjective to only a few real training data. The trained deep denoiser using our synthetic noisy images achieves state-of-the-art denoising performance, compared to the popular classic denoisers as well as deep denoisers that are trained on synthetic pairs from other noise models. To sum up, our main contributions can be concluded as follows:" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 673, + 179, + 684 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 673, + 179, + 684 + ], + "spans": [ + { + "bbox": [ + 58, + 673, + 179, + 684 + ], + "type": "text", + "content": "*Co-first authors contributed equally." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 59, + 684, + 176, + 693 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 684, + 176, + 693 + ], + "spans": [ + { + "bbox": [ + 59, + 684, + 176, + 693 + ], + "type": "text", + "content": "†Corresponding author: Bihan Wen." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 693, + 287, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 693, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 693, + 287, + 713 + ], + "type": "text", + "content": "This work was supported in part by the MOE AcRF Tier 1 (RG61/22) and Start-Up Grant." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "text", + "content": "1683" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 59, + 72, + 286, + 234 + ], + "type": "list", + "angle": 0, + "index": 3, + "blocks": [ + { + "bbox": [ + 59, + 72, + 286, + 131 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 72, + 286, + 131 + ], + "spans": [ + { + "bbox": [ + 59, + 72, + 286, + 131 + ], + "type": "text", + "content": "- We introduce a neighboring correlation-aware noise model for sRGB real noise synthesis by explicitly modeling the neighboring correlation of real noise, to bridge the gap between the synthetic and real noise distribution in sRGB." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 59, + 136, + 286, + 183 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 136, + 286, + 183 + ], + "spans": [ + { + "bbox": [ + 59, + 136, + 286, + 183 + ], + "type": "text", + "content": "- Our proposed framework shows a well-generalized ability, which is still capable to improve the real image denoising performance even with limited training data." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 59, + 187, + 286, + 234 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 187, + 286, + 234 + ], + "spans": [ + { + "bbox": [ + 59, + 187, + 286, + 234 + ], + "type": "text", + "content": "- With the synthetic image pairs generated by NeCA, the trained denoisers achieve state-of-the-art denoising performance compared with the deep denoisers trained with other real noise models." + } + ] + } + ], + "index": 2 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 47, + 247, + 133, + 258 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 247, + 133, + 258 + ], + "spans": [ + { + "bbox": [ + 47, + 247, + 133, + 258 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 266, + 225, + 278 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 266, + 225, + 278 + ], + "spans": [ + { + "bbox": [ + 47, + 266, + 225, + 278 + ], + "type": "text", + "content": "2.1. Raw-RGB Image Noise Synthesis" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 285, + 286, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 285, + 286, + 380 + ], + "spans": [ + { + "bbox": [ + 46, + 285, + 286, + 380 + ], + "type": "text", + "content": "Modeling real noise in raw-RGB is challenging as it cannot be simply assumed as Additive White Gaussian Noise (AWGN). Typically, raw-RGB noise models can be classified into two categories: physical-based models and learning-based models. One of the most commonly used physical-based models is the heteroscedastic Gaussian noise [10], which posits noise value, located at pixel " + }, + { + "bbox": [ + 46, + 285, + 286, + 380 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 46, + 285, + 286, + 380 + ], + "type": "text", + "content": ", is dependent on its underlying clean pixel intensity:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 113, + 392, + 286, + 406 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 392, + 286, + 406 + ], + "spans": [ + { + "bbox": [ + 113, + 392, + 286, + 406 + ], + "type": "interline_equation", + "content": "\\boldsymbol {n} _ {i} \\sim \\mathcal {N} \\left(0, \\sigma_ {s} ^ {2} \\cdot \\boldsymbol {x} _ {i} + \\sigma_ {c} ^ {2}\\right), \\tag {1}", + "image_path": "9df10aada56fc9d7e146c36b4cdbf10b4be49f1bb313e8c2f602674d136dbb5f.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 411, + 286, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 411, + 286, + 517 + ], + "spans": [ + { + "bbox": [ + 46, + 411, + 286, + 517 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 411, + 286, + 517 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 46, + 411, + 286, + 517 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 411, + 286, + 517 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 46, + 411, + 286, + 517 + ], + "type": "text", + "content": " are noise and clean image in the raw-RGB domain, while " + }, + { + "bbox": [ + 46, + 411, + 286, + 517 + ], + "type": "inline_equation", + "content": "\\sigma_{s}" + }, + { + "bbox": [ + 46, + 411, + 286, + 517 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 411, + 286, + 517 + ], + "type": "inline_equation", + "content": "\\sigma_{c}" + }, + { + "bbox": [ + 46, + 411, + 286, + 517 + ], + "type": "text", + "content": " denote the noise variance term for signal-dependent and signal-independent components. Such a noise model is also known as the noise level function (NLF) as it describes the relationship between the pixelwise noise level and image intensity. To better model the camera sensor noise, recent works [25, 28] have proposed that real noise is a sophisticated combination of shot noise, read noise and row noise, etc." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 519, + 286, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 519, + 286, + 662 + ], + "spans": [ + { + "bbox": [ + 46, + 519, + 286, + 662 + ], + "type": "text", + "content": "Compared to statistical modeling of noise, learning-based models learn the real noise distribution with generative models such as the generative adversarial nets (GANs) [6] and normalization flows [1] from paired noisy-clean images. Although these methods perform well in raw-RGB, they cannot be directly applied to model sRGB real noise since their assumptions are based on the characteristics of raw-RGB noise. For instance, these noise generators synthesize raw-RGB noise from an initialized heteroscedastic Gaussian noise (as described in Equation (1)), which fails to provide an accurate representation of real noise in the sRGB domain [21, 22]." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 670, + 204, + 682 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 670, + 204, + 682 + ], + "spans": [ + { + "bbox": [ + 47, + 670, + 204, + 682 + ], + "type": "text", + "content": "2.2. sRGB Image Noise Synthesis" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 689, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 286, + 713 + ], + "type": "text", + "content": "The camera ISP pipeline, including demosaicing, tone mapping, white balancing, gamma mapping, etc., makes" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "text", + "content": "real noise in the sRGB domain to be more complicated than it is in the raw-RGB domain. To synthesize sRGB real noise, two approaches have been proposed: (1) synthesizing noisy samples in the raw-RGB domain and rendering them into sRGB images by applying the manually defined ISP pipeline [5, 15], and (2) directly synthesizing real noise in the sRGB domain [8, 16, 17, 20, 26]." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 157, + 545, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 157, + 545, + 275 + ], + "spans": [ + { + "bbox": [ + 304, + 157, + 545, + 275 + ], + "type": "text", + "content": "In CBDNet [15], heteroscedastic Gaussian noise is added on raw-RGB clean images, and images are converted into sRGB using demosaicing and camera response functions. However, CBDNet requires raw-RGB images, which are not commonly used. To address this issue, unprocessing image (UPI) [5] proposes to de-render sRGB images into raw-RGB images using several predefined unprocessing pipelines. Similar procedures used in CBDNet are then applied to the unprocessed raw-RGB images to obtain their sRGB versions." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 277, + 545, + 587 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 277, + 545, + 587 + ], + "spans": [ + { + "bbox": [ + 304, + 277, + 545, + 587 + ], + "type": "text", + "content": "Despite CBDNet and UPI effectively synthesize sRGB real noise, they still require predefined ISP pipelines, which may not match real ones used in different camera sensors. Therefore, generating real noise directly in the sRGB domain with deep generative models [11, 19] is considered an alternative solution. GCBD [8] proposes a GAN-based model that learns noise distributions by training on noise patches that have been cropped from noisy images. However, the synthesized noise is signal-independent as it is generated from random noise. DANet [26] and GRDN [17] use conditional generative networks to synthesize signal-dependent noise, however, few experiments are conducted to demonstrate the effectiveness of the proposed noise generators. C2N [16] attempts to synthesize the real noise with unpaired clean-noisy images, but the generated noise contains artifacts and color-shift problems due to the unpaired training mode. Recently, Kousha et al. [20] propose a conditional flow-based model for sRGB image noise generation that takes clean images, camera types, and ISO levels as input. However, the denoiser, trained with synthetic data, improves marginally compared to the unpaired noise generation method C2N. Unlike previous attempts that model noise with an end-to-end generator, our proposed method explicitly decomposes signal dependency and neighboring correlation of real noise and learns them with separate networks." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 602, + 361, + 613 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 602, + 361, + 613 + ], + "spans": [ + { + "bbox": [ + 306, + 602, + 361, + 613 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 622, + 538, + 635 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 622, + 538, + 635 + ], + "spans": [ + { + "bbox": [ + 306, + 622, + 538, + 635 + ], + "type": "text", + "content": "3.1. Neighboring Correlation-Aware Noise Model" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "text", + "content": "In this section, we present our proposed noise model for sRGB real noise. We begin by introducing the basic noise model, which defines the signal dependency of pixel-wise noise level and its underlying clean pixels. We then discuss discrepancies between noise synthesized by the basic noise model and sRGB real noise and propose to bridge this gap" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 296, + 749, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 749, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 296, + 749, + 315, + 757 + ], + "type": "text", + "content": "1684" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 95 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 95 + ], + "type": "text", + "content": "by explicitly modeling noise neighboring correlation on top of the signal dependency." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 96, + 288, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 96, + 288, + 251 + ], + "spans": [ + { + "bbox": [ + 46, + 96, + 288, + 251 + ], + "type": "text", + "content": "Basic Noise Model. Both raw-RGB and sRGB real noise are dependent on the image signal. In raw-RGB, the noise level can be approximated as a simple function of its underlying clean pixel intensity, i.e., heteroscedastic Gaussian noise described in Equation (1). However sRGB real noise is more complex due to camera settings and signal transformations in the ISP pipeline [20-22]. To address this challenge, we propose a noise model that characterizes the signal dependency of sRGB real noise. Specifically, for an sRGB clean image " + }, + { + "bbox": [ + 46, + 96, + 288, + 251 + ], + "type": "inline_equation", + "content": "\\mathbf{x} = (x_{1},\\dots,x_{N})" + }, + { + "bbox": [ + 46, + 96, + 288, + 251 + ], + "type": "text", + "content": " and its paired noisy version " + }, + { + "bbox": [ + 46, + 96, + 288, + 251 + ], + "type": "inline_equation", + "content": "\\mathbf{y} = (y_{1},\\dots,y_{N})" + }, + { + "bbox": [ + 46, + 96, + 288, + 251 + ], + "type": "text", + "content": ", we define noise level at pixel " + }, + { + "bbox": [ + 46, + 96, + 288, + 251 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 46, + 96, + 288, + 251 + ], + "type": "text", + "content": " as a function of the clean image patch " + }, + { + "bbox": [ + 46, + 96, + 288, + 251 + ], + "type": "inline_equation", + "content": "\\Omega_{\\mathbf{x}}" + }, + { + "bbox": [ + 46, + 96, + 288, + 251 + ], + "type": "text", + "content": ", centered at clean pixel " + }, + { + "bbox": [ + 46, + 96, + 288, + 251 + ], + "type": "inline_equation", + "content": "x_{i}" + }, + { + "bbox": [ + 46, + 96, + 288, + 251 + ], + "type": "text", + "content": ", and camera ISO level " + }, + { + "bbox": [ + 46, + 96, + 288, + 251 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 46, + 96, + 288, + 251 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 133, + 261, + 287, + 274 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 261, + 287, + 274 + ], + "spans": [ + { + "bbox": [ + 133, + 261, + 287, + 274 + ], + "type": "interline_equation", + "content": "\\boldsymbol {\\sigma} _ {i} = f \\left(\\Omega_ {\\boldsymbol {x}}, \\gamma\\right), \\tag {2}", + "image_path": "ae5f44726093dafef91a549dc4aa5b4f3764b72df106bac51819a877a8ae9174.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 283, + 287, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 283, + 287, + 354 + ], + "spans": [ + { + "bbox": [ + 46, + 283, + 287, + 354 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 283, + 287, + 354 + ], + "type": "inline_equation", + "content": "f(\\cdot)" + }, + { + "bbox": [ + 46, + 283, + 287, + 354 + ], + "type": "text", + "content": " represents the non-linear relationship of " + }, + { + "bbox": [ + 46, + 283, + 287, + 354 + ], + "type": "inline_equation", + "content": "\\Omega_{\\pmb{x}},\\gamma" + }, + { + "bbox": [ + 46, + 283, + 287, + 354 + ], + "type": "text", + "content": " and the pixel-wise noise level " + }, + { + "bbox": [ + 46, + 283, + 287, + 354 + ], + "type": "inline_equation", + "content": "\\sigma_{i} = (\\sigma_{i,r},\\sigma_{i,g},\\sigma_{i,b})" + }, + { + "bbox": [ + 46, + 283, + 287, + 354 + ], + "type": "text", + "content": " for three color channels. For the sake of clarity, we omit the location index " + }, + { + "bbox": [ + 46, + 283, + 287, + 354 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 46, + 283, + 287, + 354 + ], + "type": "text", + "content": " in the expression for the local region " + }, + { + "bbox": [ + 46, + 283, + 287, + 354 + ], + "type": "inline_equation", + "content": "\\Omega_{\\pmb{x}}" + }, + { + "bbox": [ + 46, + 283, + 287, + 354 + ], + "type": "text", + "content": ". Then the distribution of noise " + }, + { + "bbox": [ + 46, + 283, + 287, + 354 + ], + "type": "inline_equation", + "content": "\\pmb{v}" + }, + { + "bbox": [ + 46, + 283, + 287, + 354 + ], + "type": "text", + "content": " at each pixel is modeled as a Gaussian distribution:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 127, + 364, + 287, + 378 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 364, + 287, + 378 + ], + "spans": [ + { + "bbox": [ + 127, + 364, + 287, + 378 + ], + "type": "interline_equation", + "content": "\\boldsymbol {v} _ {i, c} \\sim \\mathcal {N} (0, \\sigma_ {i, c} ^ {2}), \\tag {3}", + "image_path": "b995c3bffcc331f2d6203106e4af399f425c49c427a3772f35d84c75f58945ef.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 386, + 287, + 436 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 386, + 287, + 436 + ], + "spans": [ + { + "bbox": [ + 46, + 386, + 287, + 436 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 386, + 287, + 436 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 46, + 386, + 287, + 436 + ], + "type": "text", + "content": " is the index of RGB channels. We further define the noise level map " + }, + { + "bbox": [ + 46, + 386, + 287, + 436 + ], + "type": "inline_equation", + "content": "\\pmb{m}" + }, + { + "bbox": [ + 46, + 386, + 287, + 436 + ], + "type": "text", + "content": ", which has the same size as the clean image and the value at pixel " + }, + { + "bbox": [ + 46, + 386, + 287, + 436 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 46, + 386, + 287, + 436 + ], + "type": "text", + "content": " refers to the noise level " + }, + { + "bbox": [ + 46, + 386, + 287, + 436 + ], + "type": "inline_equation", + "content": "\\sigma_{i}" + }, + { + "bbox": [ + 46, + 386, + 287, + 436 + ], + "type": "text", + "content": ". Finally, we can simulate signal-dependent noise as follows:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 101, + 445, + 287, + 459 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 101, + 445, + 287, + 459 + ], + "spans": [ + { + "bbox": [ + 101, + 445, + 287, + 459 + ], + "type": "interline_equation", + "content": "\\boldsymbol {v} = \\boldsymbol {\\epsilon} \\odot \\boldsymbol {m}, \\quad \\epsilon_ {i, c} \\sim \\mathcal {N} (0, 1 ^ {2}). \\tag {4}", + "image_path": "854e73b7be7945f0a3560d5ddc3663eb09d196b9ab41e48392cef4a83a4de991.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 464, + 288, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 464, + 288, + 693 + ], + "spans": [ + { + "bbox": [ + 46, + 464, + 288, + 693 + ], + "type": "text", + "content": "Neighboring Correlation Noise Model. The noise synthesized by the basic noise model still exhibits discrepancies with real noise, as shown in Figure 1(b) and (d). We attribute this gap to the improper noise realization defined in Equation (4), where noise is sampled spatially independently from the basic noise model. Specifically, the most commonly used noise models, including the AWGN, heteroscedastic Gaussian noise, and our basic noise model, assume that the noise distribution is independent at each pixel, and the noise is sampled from the noise distribution without considering its neighboring synthesized noise. However, this noise realization method is inadequate to synthesize RGB real noise as the noise value is assumed to be highly correlated with its neighboring noise values due to the influence of the ISP pipeline such as demosaicing, which introduces neighboring operations. We refer to this characteristic of noise as neighboring correlation and define a neighboring correlation operator " + }, + { + "bbox": [ + 46, + 464, + 288, + 693 + ], + "type": "inline_equation", + "content": "g(\\cdot)" + }, + { + "bbox": [ + 46, + 464, + 288, + 693 + ], + "type": "text", + "content": " that maps such the correlation onto the synthesized signal-dependent noise " + }, + { + "bbox": [ + 46, + 464, + 288, + 693 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 46, + 464, + 288, + 693 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 139, + 701, + 287, + 714 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 701, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 139, + 701, + 287, + 714 + ], + "type": "interline_equation", + "content": "\\boldsymbol {n} _ {i} = g \\left(\\Omega_ {\\boldsymbol {v}}\\right), \\tag {5}", + "image_path": "987cd52885d00ed6dde294fcc9058c134f8a09389f8551445517502a4d5f8dda.jpg" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 314, + 72, + 420, + 135 + ], + "blocks": [ + { + "bbox": [ + 314, + 72, + 420, + 135 + ], + "lines": [ + { + "bbox": [ + 314, + 72, + 420, + 135 + ], + "spans": [ + { + "bbox": [ + 314, + 72, + 420, + 135 + ], + "type": "image", + "image_path": "2b5db907cea09a82760c07c93a4c77d9c48573121172ee500c1fb92c0698dd7c.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 348, + 139, + 386, + 150 + ], + "lines": [ + { + "bbox": [ + 348, + 139, + 386, + 150 + ], + "spans": [ + { + "bbox": [ + 348, + 139, + 386, + 150 + ], + "type": "text", + "content": "(a) Clean" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 423, + 72, + 529, + 135 + ], + "blocks": [ + { + "bbox": [ + 423, + 72, + 529, + 135 + ], + "lines": [ + { + "bbox": [ + 423, + 72, + 529, + 135 + ], + "spans": [ + { + "bbox": [ + 423, + 72, + 529, + 135 + ], + "type": "image", + "image_path": "9a3eb8d5e8d9a04de9fb6a392ea24c537d2b7145ddad57be520b76b8c6b79296.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 442, + 139, + 510, + 150 + ], + "lines": [ + { + "bbox": [ + 442, + 139, + 510, + 150 + ], + "spans": [ + { + "bbox": [ + 442, + 139, + 510, + 150 + ], + "type": "text", + "content": "(b) SDNU Noise" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 313, + 152, + 420, + 216 + ], + "blocks": [ + { + "bbox": [ + 313, + 152, + 420, + 216 + ], + "lines": [ + { + "bbox": [ + 313, + 152, + 420, + 216 + ], + "spans": [ + { + "bbox": [ + 313, + 152, + 420, + 216 + ], + "type": "image", + "image_path": "5fccf73333b805713134bd0baec7e835e7b71f49e853a79f7fd20b1507e305a9.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 333, + 219, + 401, + 230 + ], + "lines": [ + { + "bbox": [ + 333, + 219, + 401, + 230 + ], + "spans": [ + { + "bbox": [ + 333, + 219, + 401, + 230 + ], + "type": "text", + "content": "(c) SDNC Noise" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 423, + 152, + 529, + 216 + ], + "blocks": [ + { + "bbox": [ + 423, + 152, + 529, + 216 + ], + "lines": [ + { + "bbox": [ + 423, + 152, + 529, + 216 + ], + "spans": [ + { + "bbox": [ + 423, + 152, + 529, + 216 + ], + "type": "image", + "image_path": "a8807c5a9d0283918fd60de1f86bd60930c0c63cb2e3ada6e796305c537c640b.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 459, + 219, + 493, + 230 + ], + "lines": [ + { + "bbox": [ + 459, + 219, + 493, + 230 + ], + "spans": [ + { + "bbox": [ + 459, + 219, + 493, + 230 + ], + "type": "text", + "content": "(d) Real" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 305, + 232, + 545, + 299 + ], + "lines": [ + { + "bbox": [ + 305, + 232, + 545, + 299 + ], + "spans": [ + { + "bbox": [ + 305, + 232, + 545, + 299 + ], + "type": "text", + "content": "Figure 1. The visualization of modeling signal dependency and neighboring correlation of sRGB real noise. (a) Clean image. (b) Synthetic signal-dependent and neighboring uncorrelated (SDNU) noise. (c) Synthetic signal-dependent and neighboring correlated (SDNC) noise. (d) Real noise. We add a constant value to the noise maps for better visualizing the signal dependency." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 310, + 546, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 310, + 546, + 479 + ], + "spans": [ + { + "bbox": [ + 304, + 310, + 546, + 479 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 310, + 546, + 479 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 304, + 310, + 546, + 479 + ], + "type": "text", + "content": " is the neighboring correlated noise and " + }, + { + "bbox": [ + 304, + 310, + 546, + 479 + ], + "type": "inline_equation", + "content": "\\Omega_{\\mathbf{v}}" + }, + { + "bbox": [ + 304, + 310, + 546, + 479 + ], + "type": "text", + "content": " is the local patch of " + }, + { + "bbox": [ + 304, + 310, + 546, + 479 + ], + "type": "inline_equation", + "content": "\\mathbf{v}" + }, + { + "bbox": [ + 304, + 310, + 546, + 479 + ], + "type": "text", + "content": ", centered at pixel " + }, + { + "bbox": [ + 304, + 310, + 546, + 479 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 304, + 310, + 546, + 479 + ], + "type": "text", + "content": ". By processing the neighboring uncorrelated noise " + }, + { + "bbox": [ + 304, + 310, + 546, + 479 + ], + "type": "inline_equation", + "content": "\\mathbf{v}" + }, + { + "bbox": [ + 304, + 310, + 546, + 479 + ], + "type": "text", + "content": " with the neighboring correlation operator, which is learned by our proposed noise synthesizing framework in Section 3.2, the final generated noise performs similar characteristics to real noise, as demonstrated in Figure 1(c) and (d). For the purpose of clarity, we use SDNU noise to refer to the intermediate synthesized signal-dependent and neighboring uncorrelated noise " + }, + { + "bbox": [ + 304, + 310, + 546, + 479 + ], + "type": "inline_equation", + "content": "\\mathbf{v}" + }, + { + "bbox": [ + 304, + 310, + 546, + 479 + ], + "type": "text", + "content": ", and SDNC noise to refer to the final generated signal-dependent and neighboring correlated noise " + }, + { + "bbox": [ + 304, + 310, + 546, + 479 + ], + "type": "inline_equation", + "content": "\\mathbf{n}" + }, + { + "bbox": [ + 304, + 310, + 546, + 479 + ], + "type": "text", + "content": ". In the following sections, we will introduce the proposed noise synthesizing framework to explicitly learn the neighboring correlation and signal dependency of noise." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 306, + 489, + 474, + 502 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 489, + 474, + 502 + ], + "spans": [ + { + "bbox": [ + 306, + 489, + 474, + 502 + ], + "type": "text", + "content": "3.2. Noise Synthesizing Framework" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 509, + 545, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 509, + 545, + 664 + ], + "spans": [ + { + "bbox": [ + 304, + 509, + 545, + 664 + ], + "type": "text", + "content": "Given paired sRGB real-world noisy and clean images " + }, + { + "bbox": [ + 304, + 509, + 545, + 664 + ], + "type": "inline_equation", + "content": "(\\pmb{y},\\pmb{x})" + }, + { + "bbox": [ + 304, + 509, + 545, + 664 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 509, + 545, + 664 + ], + "type": "inline_equation", + "content": "\\pmb{y} = \\pmb{x} + \\pmb{n}" + }, + { + "bbox": [ + 304, + 509, + 545, + 664 + ], + "type": "text", + "content": ", our proposed framework aims to learn the neighboring correlation-aware noise model using paired data. Our proposed framework, as illustrated in Figure 2, comprises three networks: a gain estimation network (GENet), a noise-level prediction network (NPNet), and a neighboring correlation network (NCNet). GENet estimates the gain factor from a noisy image, which serves to amplify the synthesized noise, similar to the ISO level. NPNet synthesizes the SDNU noise by incorporating the estimated gain factor and the clean image as inputs. Finally, NCNet explicitly models the neighboring correlation of sRGB real noise and generates the SDNC noise." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 305, + 665, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 665, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 665, + 545, + 713 + ], + "type": "text", + "content": "Gain Estimation Network. The gain estimation network (GENet) is designed to estimate the gain factor from a noisy image " + }, + { + "bbox": [ + 305, + 665, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\pmb{y}" + }, + { + "bbox": [ + 305, + 665, + 545, + 713 + ], + "type": "text", + "content": ", which serves as guidance to control the overall magnitude of the synthesized noise. The gain factor is de" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "text", + "content": "1685" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 50, + 87, + 545, + 270 + ], + "blocks": [ + { + "bbox": [ + 50, + 73, + 157, + 84 + ], + "lines": [ + { + "bbox": [ + 50, + 73, + 157, + 84 + ], + "spans": [ + { + "bbox": [ + 50, + 73, + 157, + 84 + ], + "type": "text", + "content": "Noise-level prediction network" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 50, + 87, + 545, + 270 + ], + "lines": [ + { + "bbox": [ + 50, + 87, + 545, + 270 + ], + "spans": [ + { + "bbox": [ + 50, + 87, + 545, + 270 + ], + "type": "image", + "image_path": "483648faafa1e16ca21cecab943ac122dbf1a884d756eea629e328fddeb735d3.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 283, + 546, + 328 + ], + "lines": [ + { + "bbox": [ + 46, + 283, + 546, + 328 + ], + "spans": [ + { + "bbox": [ + 46, + 283, + 546, + 328 + ], + "type": "text", + "content": "Figure 2. The proposed noise synthesizing framework. Our NeCA contains three networks including the gain estimation network (GENet), noise-level prediction network (NPNet), and neighboring correlation network (NCNet). PD denotes the Pixel-shuffle Down-sampling scheme introduced in [29]. Local noise level estimation and global noise level estimation operations are formulated in Equation (13) and (6). The details of the network architecture and PD scheme are described in the supplementary material." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 340, + 287, + 374 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 340, + 287, + 374 + ], + "spans": [ + { + "bbox": [ + 46, + 340, + 287, + 374 + ], + "type": "text", + "content": "fined as the global noise level of the noisy image, which is the standard deviation calculated by every noise value in its noise " + }, + { + "bbox": [ + 46, + 340, + 287, + 374 + ], + "type": "inline_equation", + "content": "\\pmb{n}" + }, + { + "bbox": [ + 46, + 340, + 287, + 374 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 110, + 374, + 287, + 406 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 374, + 287, + 406 + ], + "spans": [ + { + "bbox": [ + 110, + 374, + 287, + 406 + ], + "type": "interline_equation", + "content": "\\beta = \\sqrt {\\frac {1}{N} \\sum_ {i , c} \\left(\\boldsymbol {n} _ {i , c} - \\bar {n}\\right) ^ {2}}, \\tag {6}", + "image_path": "7425eba4b3164a1fe557d5de1bf2a7d5ca2b74116f9c1f2f997a05ce66e3f23c.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 410, + 287, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 410, + 287, + 479 + ], + "spans": [ + { + "bbox": [ + 46, + 410, + 287, + 479 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 410, + 287, + 479 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 46, + 410, + 287, + 479 + ], + "type": "text", + "content": " is the defined global noise level of the noisy image " + }, + { + "bbox": [ + 46, + 410, + 287, + 479 + ], + "type": "inline_equation", + "content": "\\pmb{y}" + }, + { + "bbox": [ + 46, + 410, + 287, + 479 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 410, + 287, + 479 + ], + "type": "inline_equation", + "content": "\\bar{n}" + }, + { + "bbox": [ + 46, + 410, + 287, + 479 + ], + "type": "text", + "content": " is the mean of the noise " + }, + { + "bbox": [ + 46, + 410, + 287, + 479 + ], + "type": "inline_equation", + "content": "\\pmb{n}" + }, + { + "bbox": [ + 46, + 410, + 287, + 479 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 46, + 410, + 287, + 479 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 46, + 410, + 287, + 479 + ], + "type": "text", + "content": " is the total number of pixels in the noisy image. However, during testing, the calculated gain factor is unavailable. To solve this, we aim to estimate the gain factor from the noisy image using GENet:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 143, + 480, + 287, + 495 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 480, + 287, + 495 + ], + "spans": [ + { + "bbox": [ + 143, + 480, + 287, + 495 + ], + "type": "interline_equation", + "content": "\\hat {\\beta} = E (\\boldsymbol {y}), \\tag {7}", + "image_path": "e55b7bf851292ae78c842f1c953c8c4818b2d06aca24738b05aae939583947c9.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 501, + 287, + 596 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 501, + 287, + 596 + ], + "spans": [ + { + "bbox": [ + 46, + 501, + 287, + 596 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 501, + 287, + 596 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 46, + 501, + 287, + 596 + ], + "type": "text", + "content": " represents the GENet, and " + }, + { + "bbox": [ + 46, + 501, + 287, + 596 + ], + "type": "inline_equation", + "content": "\\hat{\\beta}" + }, + { + "bbox": [ + 46, + 501, + 287, + 596 + ], + "type": "text", + "content": " is the estimated gain factor by GENet, which is expected to be as close as the global noise level of the noisy image. The main reason to use the gain factor estimated from the noisy image rather than the ISO level is driven by a crucial factor. ISO levels are typically saved in the metadata of images. The requirement of metadata will limit the application of our noise synthesizing framework." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 597, + 287, + 657 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 597, + 287, + 657 + ], + "spans": [ + { + "bbox": [ + 46, + 597, + 287, + 657 + ], + "type": "text", + "content": "Noise-level Prediction Network. The noise-level prediction network (NPNet) learns a parametric model for the noise distribution defined in Equation (3). To achieve this, NPNet predicts the pixel-wise noise level " + }, + { + "bbox": [ + 46, + 597, + 287, + 657 + ], + "type": "inline_equation", + "content": "\\hat{\\sigma}_i" + }, + { + "bbox": [ + 46, + 597, + 287, + 657 + ], + "type": "text", + "content": " using the clean local patch " + }, + { + "bbox": [ + 46, + 597, + 287, + 657 + ], + "type": "inline_equation", + "content": "\\Omega_{\\pmb{x}}" + }, + { + "bbox": [ + 46, + 597, + 287, + 657 + ], + "type": "text", + "content": " and estimated gain factor " + }, + { + "bbox": [ + 46, + 597, + 287, + 657 + ], + "type": "inline_equation", + "content": "\\hat{\\beta}" + }, + { + "bbox": [ + 46, + 597, + 287, + 657 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 666, + 287, + 680 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 666, + 287, + 680 + ], + "spans": [ + { + "bbox": [ + 130, + 666, + 287, + 680 + ], + "type": "interline_equation", + "content": "\\hat {\\boldsymbol {\\sigma}} _ {i} = G _ {1} \\left(\\Omega_ {\\boldsymbol {x}}, \\hat {\\beta}\\right), \\tag {8}", + "image_path": "4633cfb5b2ab63a35c6a174f2d74999e57077422472ce8e65238a2416c2fa4a4.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 689, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 689, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 689, + 287, + 714 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 689, + 287, + 714 + ], + "type": "inline_equation", + "content": "G_{1}" + }, + { + "bbox": [ + 46, + 689, + 287, + 714 + ], + "type": "text", + "content": " denotes the NPNet, which has three output channels to predict noise levels for each pixel. To effectively" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 340, + 545, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 340, + 545, + 411 + ], + "spans": [ + { + "bbox": [ + 305, + 340, + 545, + 411 + ], + "type": "text", + "content": "incorporate the gain factor into the NPNet, we first apply the layer normalization [4] to the feature map of convolution and then multiply the normalized feature map by the gain factor. In practice, NPNet directly outputs the predicted noise level map " + }, + { + "bbox": [ + 305, + 340, + 545, + 411 + ], + "type": "inline_equation", + "content": "\\hat{m}" + }, + { + "bbox": [ + 305, + 340, + 545, + 411 + ], + "type": "text", + "content": " by utilizing a clean image and gain factor:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 392, + 411, + 545, + 424 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 392, + 411, + 545, + 424 + ], + "spans": [ + { + "bbox": [ + 392, + 411, + 545, + 424 + ], + "type": "interline_equation", + "content": "\\hat {\\boldsymbol {m}} = G _ {1} (\\boldsymbol {x}, \\hat {\\beta}). \\tag {9}", + "image_path": "3bbb330c65c33a79e3c7e896ce048d4cbc30e2ec4be3465519c758ec412ffc7f.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 428, + 545, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 428, + 545, + 464 + ], + "spans": [ + { + "bbox": [ + 305, + 428, + 545, + 464 + ], + "type": "text", + "content": "Once the noise level map " + }, + { + "bbox": [ + 305, + 428, + 545, + 464 + ], + "type": "inline_equation", + "content": "\\hat{m}" + }, + { + "bbox": [ + 305, + 428, + 545, + 464 + ], + "type": "text", + "content": " is obtained, the SDNU noise " + }, + { + "bbox": [ + 305, + 428, + 545, + 464 + ], + "type": "inline_equation", + "content": "\\hat{v}" + }, + { + "bbox": [ + 305, + 428, + 545, + 464 + ], + "type": "text", + "content": " can be synthesized by using the sampling trick defined in Equation (4)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 464, + 545, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 464, + 545, + 523 + ], + "spans": [ + { + "bbox": [ + 305, + 464, + 545, + 523 + ], + "type": "text", + "content": "Neighboring Correlation Network. The neighboring correlation network (NCNet) performs as the neighboring correlation operator, described in Equation (5). By taking the noise value and its neighboring noise realization as input, NCNet generates the SDNC noise " + }, + { + "bbox": [ + 305, + 464, + 545, + 523 + ], + "type": "inline_equation", + "content": "\\hat{n}" + }, + { + "bbox": [ + 305, + 464, + 545, + 523 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 394, + 530, + 545, + 544 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 394, + 530, + 545, + 544 + ], + "spans": [ + { + "bbox": [ + 394, + 530, + 545, + 544 + ], + "type": "interline_equation", + "content": "\\hat {\\boldsymbol {n}} _ {i} = G _ {2} \\left(\\Omega_ {\\hat {\\boldsymbol {v}}}\\right), \\tag {10}", + "image_path": "1fbb9e4134e51a361aa7d3d60f01056e9cf069cf4805bf9734cc7e3c2064655a.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 305, + 550, + 545, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 550, + 545, + 586 + ], + "spans": [ + { + "bbox": [ + 305, + 550, + 545, + 586 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 550, + 545, + 586 + ], + "type": "inline_equation", + "content": "\\Omega_{\\hat{v}}" + }, + { + "bbox": [ + 305, + 550, + 545, + 586 + ], + "type": "text", + "content": " is the noise patch of " + }, + { + "bbox": [ + 305, + 550, + 545, + 586 + ], + "type": "inline_equation", + "content": "\\hat{v}" + }, + { + "bbox": [ + 305, + 550, + 545, + 586 + ], + "type": "text", + "content": " located at pixel " + }, + { + "bbox": [ + 305, + 550, + 545, + 586 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 305, + 550, + 545, + 586 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 550, + 545, + 586 + ], + "type": "inline_equation", + "content": "G_{2}" + }, + { + "bbox": [ + 305, + 550, + 545, + 586 + ], + "type": "text", + "content": " denotes the NCNet. The SDNC noise can be directly generated by taking the SDNU noise into the network:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 399, + 593, + 545, + 605 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 399, + 593, + 545, + 605 + ], + "spans": [ + { + "bbox": [ + 399, + 593, + 545, + 605 + ], + "type": "interline_equation", + "content": "\\hat {\\boldsymbol {n}} = G _ {2} (\\hat {\\boldsymbol {v}}). \\tag {11}", + "image_path": "384af025974489e07b243746f0d7e6621e184d5f522f2a6522392e6e34a2992b.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 306, + 611, + 399, + 623 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 611, + 399, + 623 + ], + "spans": [ + { + "bbox": [ + 306, + 611, + 399, + 623 + ], + "type": "text", + "content": "3.3. Loss Functions" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 305, + 629, + 545, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 629, + 545, + 688 + ], + "spans": [ + { + "bbox": [ + 305, + 629, + 545, + 688 + ], + "type": "text", + "content": "To jointly train the proposed networks, five loss functions are introduced: (1) standard deviation losses " + }, + { + "bbox": [ + 305, + 629, + 545, + 688 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{std1}" + }, + { + "bbox": [ + 305, + 629, + 545, + 688 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 629, + 545, + 688 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{std2}" + }, + { + "bbox": [ + 305, + 629, + 545, + 688 + ], + "type": "text", + "content": ", (2) adversarial losses " + }, + { + "bbox": [ + 305, + 629, + 545, + 688 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{adv1}" + }, + { + "bbox": [ + 305, + 629, + 545, + 688 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 629, + 545, + 688 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{adv2}" + }, + { + "bbox": [ + 305, + 629, + 545, + 688 + ], + "type": "text", + "content": ", (3) the regularization loss " + }, + { + "bbox": [ + 305, + 629, + 545, + 688 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{reg}" + }, + { + "bbox": [ + 305, + 629, + 545, + 688 + ], + "type": "text", + "content": ". The details of these loss functions will be introduced later." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "text", + "content": "Standard Deviation Loss. We introduce " + }, + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{std1}" + }, + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "text", + "content": " to enforce the estimated gain factor " + }, + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\hat{\\beta}" + }, + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "text", + "content": " by GENet to be close to the" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "text", + "content": "1686" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 95 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 95 + ], + "type": "text", + "content": "global noise level " + }, + { + "bbox": [ + 46, + 72, + 287, + 95 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 46, + 72, + 287, + 95 + ], + "type": "text", + "content": " of the noisy image, which is defined as follows:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 95, + 287, + 110 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 95, + 287, + 110 + ], + "spans": [ + { + "bbox": [ + 116, + 95, + 287, + 110 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {s t d 1} = \\mathbb {E} _ {\\boldsymbol {y}} \\left[ (\\hat {\\beta} - \\beta) ^ {2} \\right], \\tag {12}", + "image_path": "35917567fe25604dc5b79edc04fd734ff7b24bd250a0c57980c333831553684a.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 114, + 258, + 126 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 114, + 258, + 126 + ], + "spans": [ + { + "bbox": [ + 47, + 114, + 258, + 126 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 114, + 258, + 126 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 47, + 114, + 258, + 126 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 114, + 258, + 126 + ], + "type": "inline_equation", + "content": "\\hat{\\beta}" + }, + { + "bbox": [ + 47, + 114, + 258, + 126 + ], + "type": "text", + "content": " are obtained by Equation (6) and (7)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 126, + 287, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 126, + 287, + 198 + ], + "spans": [ + { + "bbox": [ + 46, + 126, + 287, + 198 + ], + "type": "text", + "content": "The objective of NPNet is to predict the noise level map " + }, + { + "bbox": [ + 46, + 126, + 287, + 198 + ], + "type": "inline_equation", + "content": "\\hat{m}" + }, + { + "bbox": [ + 46, + 126, + 287, + 198 + ], + "type": "text", + "content": " by taking the clean image and gain factor as input. However, since the groundtruth noise level map is not available, we propose to use a simple local noise level estimation method to approximate the noise level map " + }, + { + "bbox": [ + 46, + 126, + 287, + 198 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 46, + 126, + 287, + 198 + ], + "type": "text", + "content": " from the noise, which is calculated as follows:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 92, + 204, + 287, + 224 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 92, + 204, + 287, + 224 + ], + "spans": [ + { + "bbox": [ + 92, + 204, + 287, + 224 + ], + "type": "interline_equation", + "content": "\\boldsymbol {m} _ {i, c} = \\sqrt {\\mathcal {M F} \\left(\\Omega_ {n} ^ {2}\\right) - \\mathcal {M F} ^ {2} \\left(\\Omega_ {n}\\right)}, \\tag {13}", + "image_path": "e2262ac9f1565d47ac1d823f7c062af31b23f5f7434c511d8956a86973301ea2.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 228, + 287, + 264 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 228, + 287, + 264 + ], + "spans": [ + { + "bbox": [ + 47, + 228, + 287, + 264 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 228, + 287, + 264 + ], + "type": "inline_equation", + "content": "\\Omega_{n}" + }, + { + "bbox": [ + 47, + 228, + 287, + 264 + ], + "type": "text", + "content": " denotes the " + }, + { + "bbox": [ + 47, + 228, + 287, + 264 + ], + "type": "inline_equation", + "content": "7\\times 7" + }, + { + "bbox": [ + 47, + 228, + 287, + 264 + ], + "type": "text", + "content": " noise patch located at pixel " + }, + { + "bbox": [ + 47, + 228, + 287, + 264 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 47, + 228, + 287, + 264 + ], + "type": "text", + "content": ", channel " + }, + { + "bbox": [ + 47, + 228, + 287, + 264 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 47, + 228, + 287, + 264 + ], + "type": "text", + "content": " of noise map " + }, + { + "bbox": [ + 47, + 228, + 287, + 264 + ], + "type": "inline_equation", + "content": "\\pmb{n}" + }, + { + "bbox": [ + 47, + 228, + 287, + 264 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 47, + 228, + 287, + 264 + ], + "type": "inline_equation", + "content": "\\mathcal{MF}(\\cdot)" + }, + { + "bbox": [ + 47, + 228, + 287, + 264 + ], + "type": "text", + "content": " represents the mean filter. Then the " + }, + { + "bbox": [ + 47, + 228, + 287, + 264 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{std2}" + }, + { + "bbox": [ + 47, + 228, + 287, + 264 + ], + "type": "text", + "content": " is defined as follows:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 269, + 287, + 285 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 269, + 287, + 285 + ], + "spans": [ + { + "bbox": [ + 107, + 269, + 287, + 285 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {s t d 2} = \\mathbb {E} _ {\\boldsymbol {x}, \\boldsymbol {y}} \\left[ \\left| | \\hat {\\boldsymbol {m}} - \\boldsymbol {m} \\right| \\right| _ {2} ^ {2} ]. \\tag {14}", + "image_path": "f42e1790ceda0d20a204379071b3fc23cac4e1e4e35c2100c19c034760fdb4e9.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 290, + 287, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 290, + 287, + 589 + ], + "spans": [ + { + "bbox": [ + 46, + 290, + 287, + 589 + ], + "type": "text", + "content": "Adversarial Loss. In order to guarantee that the generated noise shares the similar distribution with real noise, we introduce two adversarial losses. Our first adversarial loss " + }, + { + "bbox": [ + 46, + 290, + 287, + 589 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{adv1}" + }, + { + "bbox": [ + 46, + 290, + 287, + 589 + ], + "type": "text", + "content": " is imposed between the final synthetic SDNC noise and real noise " + }, + { + "bbox": [ + 46, + 290, + 287, + 589 + ], + "type": "inline_equation", + "content": "(\\hat{n}, n)" + }, + { + "bbox": [ + 46, + 290, + 287, + 589 + ], + "type": "text", + "content": " to enforce the highly neighboring correlation in the generated noise, similar to that of the real noise. Our second adversarial loss " + }, + { + "bbox": [ + 46, + 290, + 287, + 589 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{adv2}" + }, + { + "bbox": [ + 46, + 290, + 287, + 589 + ], + "type": "text", + "content": " is calculated by using Pixel-shuffle Down-sampling [29] versions of synthesized intermediate noise " + }, + { + "bbox": [ + 46, + 290, + 287, + 589 + ], + "type": "inline_equation", + "content": "\\hat{v}" + }, + { + "bbox": [ + 46, + 290, + 287, + 589 + ], + "type": "text", + "content": " and real noise " + }, + { + "bbox": [ + 46, + 290, + 287, + 589 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 46, + 290, + 287, + 589 + ], + "type": "text", + "content": ". Specifically, " + }, + { + "bbox": [ + 46, + 290, + 287, + 589 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{adv2}" + }, + { + "bbox": [ + 46, + 290, + 287, + 589 + ], + "type": "text", + "content": " servers as a complementary loss for " + }, + { + "bbox": [ + 46, + 290, + 287, + 589 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{std2}" + }, + { + "bbox": [ + 46, + 290, + 287, + 589 + ], + "type": "text", + "content": " because estimating the approximate noise level map using Equation (13) may not be reliable, as this method struggles to differentiate between noise originating from different intensities. However, directly calculating the adversarial loss between noise " + }, + { + "bbox": [ + 46, + 290, + 287, + 589 + ], + "type": "inline_equation", + "content": "\\hat{v}" + }, + { + "bbox": [ + 46, + 290, + 287, + 589 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 290, + 287, + 589 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 46, + 290, + 287, + 589 + ], + "type": "text", + "content": " is unreasonable since " + }, + { + "bbox": [ + 46, + 290, + 287, + 589 + ], + "type": "inline_equation", + "content": "\\hat{v}" + }, + { + "bbox": [ + 46, + 290, + 287, + 589 + ], + "type": "text", + "content": " is neighboring uncorrelated. To address this problem, we utilize the Pixel-shuffle Down-sampling (PD) scheme proposed in [29] to obtain down-sampled versions " + }, + { + "bbox": [ + 46, + 290, + 287, + 589 + ], + "type": "inline_equation", + "content": "((\\hat{v})_{\\downarrow s}, (n)_{\\downarrow s})" + }, + { + "bbox": [ + 46, + 290, + 287, + 589 + ], + "type": "text", + "content": " of both synthetic noise " + }, + { + "bbox": [ + 46, + 290, + 287, + 589 + ], + "type": "inline_equation", + "content": "\\hat{v}" + }, + { + "bbox": [ + 46, + 290, + 287, + 589 + ], + "type": "text", + "content": " and real noise " + }, + { + "bbox": [ + 46, + 290, + 287, + 589 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 46, + 290, + 287, + 589 + ], + "type": "text", + "content": ". Here " + }, + { + "bbox": [ + 46, + 290, + 287, + 589 + ], + "type": "inline_equation", + "content": "\\downarrow_s" + }, + { + "bbox": [ + 46, + 290, + 287, + 589 + ], + "type": "text", + "content": " denotes the PD operation with a stride of " + }, + { + "bbox": [ + 46, + 290, + 287, + 589 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 46, + 290, + 287, + 589 + ], + "type": "text", + "content": " (in this paper, " + }, + { + "bbox": [ + 46, + 290, + 287, + 589 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 46, + 290, + 287, + 589 + ], + "type": "text", + "content": " is set to 3). According to [29], the neighboring correlation in the PD real noise " + }, + { + "bbox": [ + 46, + 290, + 287, + 589 + ], + "type": "inline_equation", + "content": "(n)_{\\downarrow s}" + }, + { + "bbox": [ + 46, + 290, + 287, + 589 + ], + "type": "text", + "content": " will be greatly attenuated. This allows us to calculate the adversarial loss between the two down-sampled versions. We utilize WGAN-GP [13] to compute adversarial losses, while " + }, + { + "bbox": [ + 46, + 290, + 287, + 589 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{adv1}" + }, + { + "bbox": [ + 46, + 290, + 287, + 589 + ], + "type": "text", + "content": " is defined as follows:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 116, + 596, + 287, + 609 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 596, + 287, + 609 + ], + "spans": [ + { + "bbox": [ + 116, + 596, + 287, + 609 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {a d v 1} = - \\mathbb {E} _ {\\hat {\\boldsymbol {n}}} \\left[ D _ {1} (\\hat {\\boldsymbol {n}}) \\right], \\tag {15}", + "image_path": "12e1f9acf76d8d24220b3d7f936c554b192adec59ad3e5a7f1d39193ce711758.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 615, + 287, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 615, + 287, + 651 + ], + "spans": [ + { + "bbox": [ + 46, + 615, + 287, + 651 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 615, + 287, + 651 + ], + "type": "inline_equation", + "content": "D_{1}" + }, + { + "bbox": [ + 46, + 615, + 287, + 651 + ], + "type": "text", + "content": " is the discriminator for NCNet, which scores the realness of synthesized noise. Similarly, " + }, + { + "bbox": [ + 46, + 615, + 287, + 651 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{adv2}" + }, + { + "bbox": [ + 46, + 615, + 287, + 651 + ], + "type": "text", + "content": " is computed as follows:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 100, + 658, + 287, + 672 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 100, + 658, + 287, + 672 + ], + "spans": [ + { + "bbox": [ + 100, + 658, + 287, + 672 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {a d v 2} = - \\mathbb {E} _ {(\\hat {\\boldsymbol {v}}) \\downarrow_ {s}} \\left[ D _ {2} \\left((\\hat {\\boldsymbol {v}}) \\downarrow_ {s}\\right) \\right], \\tag {16}", + "image_path": "db810f0d9f84ecc942934436f259a9a65b9f319af41d6a2e96efb7525bf1e796.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "type": "inline_equation", + "content": "D_{2}" + }, + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "type": "text", + "content": " is the discriminator for NPNet. More detail about the PD scheme and the discriminator losses will be discussed in the supplementary material." + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 309, + 71, + 545, + 196 + ], + "blocks": [ + { + "bbox": [ + 309, + 71, + 545, + 196 + ], + "lines": [ + { + "bbox": [ + 309, + 71, + 545, + 196 + ], + "spans": [ + { + "bbox": [ + 309, + 71, + 545, + 196 + ], + "type": "image", + "image_path": "4f05c02adcff079c93c835184027659623560bf60716b415777050469c80ba15.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 211, + 545, + 255 + ], + "lines": [ + { + "bbox": [ + 305, + 211, + 545, + 255 + ], + "spans": [ + { + "bbox": [ + 305, + 211, + 545, + 255 + ], + "type": "text", + "content": "Figure 3. The designed two inference versions. NeCA-W utilizes the whole framework to synthesize SDNC noise. NeCA-S only adopts NCNet to synthesize signal-independent neighboring correlated (SINC) noise by taking the AWGN as input." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 266, + 545, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 266, + 545, + 316 + ], + "spans": [ + { + "bbox": [ + 305, + 266, + 545, + 316 + ], + "type": "text", + "content": "Regularization Loss. Besides the losses mentioned above, a regularization loss " + }, + { + "bbox": [ + 305, + 266, + 545, + 316 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{reg}" + }, + { + "bbox": [ + 305, + 266, + 545, + 316 + ], + "type": "text", + "content": " is utilized to stabilize training. It is imposed between the estimated gain factor " + }, + { + "bbox": [ + 305, + 266, + 545, + 316 + ], + "type": "inline_equation", + "content": "\\hat{\\beta}" + }, + { + "bbox": [ + 305, + 266, + 545, + 316 + ], + "type": "text", + "content": " and the predicted noise level map " + }, + { + "bbox": [ + 305, + 266, + 545, + 316 + ], + "type": "inline_equation", + "content": "\\hat{m}" + }, + { + "bbox": [ + 305, + 266, + 545, + 316 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 380, + 323, + 545, + 338 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 380, + 323, + 545, + 338 + ], + "spans": [ + { + "bbox": [ + 380, + 323, + 545, + 338 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {r e g} = \\mathbb {E} _ {\\boldsymbol {x}, \\boldsymbol {y}} \\left[ \\| \\boldsymbol {w} \\| _ {2} ^ {2} \\right], \\tag {17}", + "image_path": "efd783e0006809a6a5b3869bdc220dee15f5f9843274734d97e9c16304a985b8.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 346, + 408, + 359 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 346, + 408, + 359 + ], + "spans": [ + { + "bbox": [ + 306, + 346, + 408, + 359 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 306, + 346, + 408, + 359 + ], + "type": "inline_equation", + "content": "\\pmb{w}_{i,c} = \\hat{\\pmb{m}}_{i,c} - \\hat{\\beta}" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 305, + 359, + 545, + 382 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 359, + 545, + 382 + ], + "spans": [ + { + "bbox": [ + 305, + 359, + 545, + 382 + ], + "type": "text", + "content": "Finally, the full loss functions of the framework are described as follows:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 310, + 392, + 545, + 405 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 392, + 545, + 405 + ], + "spans": [ + { + "bbox": [ + 310, + 392, + 545, + 405 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\mathcal {L} _ {\\text {r e g}} + \\lambda_ {1} \\mathcal {L} _ {\\text {a d v 1}} + \\lambda_ {2} \\mathcal {L} _ {\\text {a d v 2}} + \\lambda_ {3} \\mathcal {L} _ {\\text {s t d 1}} + \\lambda_ {4} \\mathcal {L} _ {\\text {s t d 2}}, \\tag {18}", + "image_path": "f406826873443f6eae37feac12804e91f9fffb7bc62ffc86b8b3be33310e4f0b.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 305, + 412, + 545, + 436 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 412, + 545, + 436 + ], + "spans": [ + { + "bbox": [ + 305, + 412, + 545, + 436 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 412, + 545, + 436 + ], + "type": "inline_equation", + "content": "\\lambda_1, \\lambda_2, \\lambda_3" + }, + { + "bbox": [ + 305, + 412, + 545, + 436 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 412, + 545, + 436 + ], + "type": "inline_equation", + "content": "\\lambda_4" + }, + { + "bbox": [ + 305, + 412, + 545, + 436 + ], + "type": "text", + "content": " are hyperparameters to balance the importance between different losses." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 306, + 444, + 401, + 456 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 444, + 401, + 456 + ], + "spans": [ + { + "bbox": [ + 306, + 444, + 401, + 456 + ], + "type": "text", + "content": "3.4. Inference Stage" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 305, + 462, + 545, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 462, + 545, + 641 + ], + "spans": [ + { + "bbox": [ + 305, + 462, + 545, + 641 + ], + "type": "text", + "content": "We provide two inference versions to generate noise, as illustrated in Figure 3: (1) NeCA-W applies the entire framework to synthesize \"real\" noise. It first estimates the gain factor from an arbitrary noisy image and synthesizes noise by conditioning on a clean image and the estimated gain factor. (2) NeCA-S is the simplified version of NeCA-W which uses only NCNet for inference. In this method, AWGN is synthesized and then NCNet maps it with the neighboring correlation. We refer to this synthetic noise as signal-independent neighboring correlated (SINC) noise. Notably, NeCA-S still enhances the performance of deep denoiser on real noise, even though the denoiser is trained using the synthesized SINC noise. In the meantime, this inference version only requires minimal data to train the NCNet, which we will demonstrate in our experiments." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 306, + 651, + 387, + 664 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 651, + 387, + 664 + ], + "spans": [ + { + "bbox": [ + 306, + 651, + 387, + 664 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 306, + 671, + 422, + 684 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 671, + 422, + 684 + ], + "spans": [ + { + "bbox": [ + 306, + 671, + 422, + 684 + ], + "type": "text", + "content": "4.1. Experimental Setup" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "text", + "content": "To assess the effectiveness of our proposed noise synthesizing framework, we conduct experiments in two parts:" + } + ] + } + ], + "index": 24 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "text", + "content": "1687" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 286, + 119 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 286, + 119 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 286, + 119 + ], + "type": "text", + "content": "First, we assess the quality of the generated noise. Second, we examine the performance of NeCA on the downstream image denoising task. The details of the experiments will be discussed in the following subsections." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 121, + 286, + 263 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 121, + 286, + 263 + ], + "spans": [ + { + "bbox": [ + 46, + 121, + 286, + 263 + ], + "type": "text", + "content": "Dataset. We evaluate our NeCA on the medium version of Smartphone Image Denoising Dataset (SIDD) [2], which comprises 320 noisy-clean image pairs captured by five different smartphone cameras, including Samsung Galaxy S6 Edge (S6), iPhone 7 (IP), Google Pixel (GP), Motorola Nexus 6 (N6), and LG G4 (G4). These images are collected in ten different scenes with varying ISO levels and lighting conditions. The SIDD provides both raw-RGB and sRGB images, with the sRGB version obtained by rendering the captured raw-RGB images through the manually defined ISP pipeline provided in [2]. In our experiments, we use the sRGB version to evaluate the proposed method." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 265, + 286, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 265, + 286, + 384 + ], + "spans": [ + { + "bbox": [ + 46, + 265, + 286, + 384 + ], + "type": "text", + "content": "Metrics. We evaluate the performance of NeCA using three metrics: Discrete Kullback-Leibler (KL) divergence, Signal-to-Noise Ratio (PSNR) and Structural Similarity (SSIM) [24]. The KL divergence is used to measure the similarity of histograms between real noise and generated noise. The histogram range is set from " + }, + { + "bbox": [ + 46, + 265, + 286, + 384 + ], + "type": "inline_equation", + "content": "-0.1" + }, + { + "bbox": [ + 46, + 265, + 286, + 384 + ], + "type": "text", + "content": " to 0.1 with 64 intervals. The PSNR and SSIM are used to evaluate the performance of deep denoisers. A higher PSNR and SSIM reflect better denoising performance, while a smaller KL divergence represents better noise synthesizing quality." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 385, + 286, + 492 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 385, + 286, + 492 + ], + "spans": [ + { + "bbox": [ + 46, + 385, + 286, + 492 + ], + "type": "text", + "content": "Implementation Details. All the networks are optimized using Adam optimizer [18] with a batch size of 32. Images are cropped to a size of " + }, + { + "bbox": [ + 46, + 385, + 286, + 492 + ], + "type": "inline_equation", + "content": "96 \\times 96" + }, + { + "bbox": [ + 46, + 385, + 286, + 492 + ], + "type": "text", + "content": " pixels for training. For noise generation, we train individual networks for 300 epochs with the learning rate of " + }, + { + "bbox": [ + 46, + 385, + 286, + 492 + ], + "type": "inline_equation", + "content": "10^{-4}" + }, + { + "bbox": [ + 46, + 385, + 286, + 492 + ], + "type": "text", + "content": ". For denoising, we select the DnCNN [27] as the default deep denoiser for comparison and train it for 300 epochs with the learning rate of " + }, + { + "bbox": [ + 46, + 385, + 286, + 492 + ], + "type": "inline_equation", + "content": "10^{-3}" + }, + { + "bbox": [ + 46, + 385, + 286, + 492 + ], + "type": "text", + "content": ". The " + }, + { + "bbox": [ + 46, + 385, + 286, + 492 + ], + "type": "inline_equation", + "content": "\\lambda_{1}, \\lambda_{2}, \\lambda_{3}" + }, + { + "bbox": [ + 46, + 385, + 286, + 492 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 385, + 286, + 492 + ], + "type": "inline_equation", + "content": "\\lambda_{4}" + }, + { + "bbox": [ + 46, + 385, + 286, + 492 + ], + "type": "text", + "content": " in the loss functions are set to 0.1, 0.1, 50, 10 respectively." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 502, + 185, + 514 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 502, + 185, + 514 + ], + "spans": [ + { + "bbox": [ + 47, + 502, + 185, + 514 + ], + "type": "text", + "content": "4.2. Noise Synthesis on SIDD" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 521, + 286, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 521, + 286, + 688 + ], + "spans": [ + { + "bbox": [ + 46, + 521, + 286, + 688 + ], + "type": "text", + "content": "Compared Baselines. We compare NeCA with several noise models, including Additive White Gaussian Noise (AWGN), C2N [16], and the NLF (described in Equation (1)). To synthesize AWGN, we estimate the noise level from each noisy image by applying a noise estimation method introduced in [7] and add it to its corresponding clean image. To synthesize noise using the C2N, we directly utilize the pretrained model provided by the authors. For the NLF, we synthesize heteroscedastic Gaussian noise on the raw-RGB clean images from SIDD, where the signal-dependent term " + }, + { + "bbox": [ + 46, + 521, + 286, + 688 + ], + "type": "inline_equation", + "content": "\\sigma_s^2" + }, + { + "bbox": [ + 46, + 521, + 286, + 688 + ], + "type": "text", + "content": " and signal-independent term " + }, + { + "bbox": [ + 46, + 521, + 286, + 688 + ], + "type": "inline_equation", + "content": "\\sigma_c^2" + }, + { + "bbox": [ + 46, + 521, + 286, + 688 + ], + "type": "text", + "content": " are obtained from the metadata provided by SIDD. We then apply the same ISP pipeline as used in the SIDD to render them to sRGB. We refer to this model as NLF-ISP for simplicity." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 689, + 286, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 689, + 286, + 712 + ], + "spans": [ + { + "bbox": [ + 46, + 689, + 286, + 712 + ], + "type": "text", + "content": "Preparation. We evaluate the results of generated noise on each camera in SIDD, where " + }, + { + "bbox": [ + 46, + 689, + 286, + 712 + ], + "type": "inline_equation", + "content": "80\\%" + }, + { + "bbox": [ + 46, + 689, + 286, + 712 + ], + "type": "text", + "content": " of image pairs are allo" + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 307, + 70, + 545, + 206 + ], + "blocks": [ + { + "bbox": [ + 307, + 70, + 545, + 206 + ], + "lines": [ + { + "bbox": [ + 307, + 70, + 545, + 206 + ], + "spans": [ + { + "bbox": [ + 307, + 70, + 545, + 206 + ], + "type": "table", + "html": "
CameraMetricsAWGNC2N [16]NeCANLF-ISPReal
G4KL1.97550.16600.02420.0102-
PSNR28.1537.8138.8538.5140.60
GPKL1.83510.13150.04320.0126-
PSNR28.4537.0837.7237.7438.33
IPKL1.85620.05810.04100.0475-
PSNR28.0139.1239.4639.5339.45
N6KL2.14650.35240.02060.0063-
PSNR26.3133.5935.5434.8435.56
S6KL0.45170.45170.03020.0902-
PSNR27.2233.1835.5635.9936.85
AverageKL2.00620.21290.03420.0414-
PSNR27.9036.3737.5837.5938.27
", + "image_path": "bc2a396d6882e1a32e2f1ff93c4ed58fee080fdee2b2fbba7eb630e92807cf32.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 305, + 215, + 545, + 247 + ], + "lines": [ + { + "bbox": [ + 305, + 215, + 545, + 247 + ], + "spans": [ + { + "bbox": [ + 305, + 215, + 545, + 247 + ], + "type": "text", + "content": "Table 1. Quantitative results of synthetic noise. The results are computed on the validation sets of five SIDD cameras with KL divergence and PSNR (dB). The best results are highlighted in bold." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 304, + 275, + 545, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 275, + 545, + 502 + ], + "spans": [ + { + "bbox": [ + 304, + 275, + 545, + 502 + ], + "type": "text", + "content": "cated for training the noise synthesizing framework, while the rest " + }, + { + "bbox": [ + 304, + 275, + 545, + 502 + ], + "type": "inline_equation", + "content": "20\\%" + }, + { + "bbox": [ + 304, + 275, + 545, + 502 + ], + "type": "text", + "content": " are reserved for validation. The quality of the synthesized noise was evaluated using two metrics: KL divergence and PSNR. We calculate the KL divergence between the histograms of ground truth noise in the validation set and the noise synthesized by NeCA with clean images and corresponding gain factors from the validation set. Notably, the gain factors used for evaluation are estimated by GENet from the noisy images paired with the clean images, as they cannot be set to random values for evaluation. Besides, we also use the PSNR to further evaluate the quality of synthesized noisy images. We train the DnCNN with the synthesized noisy-clean image pairs on the training set and apply it to denoise the noisy images from the validation set. We calculate the PSNR between the denoised images and corresponding clean images to evaluate the denoising performance. In order to maintain consistency between the training and validation sets, we ensure that both sets contain the same set of ISO levels." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "text", + "content": "Noise Synthesis Results. Table 1 shows the KL divergence and PSNR results computed on validation sets of five devices. For the results of average KL divergence over all five cameras, our method exhibits the best performance among all noise models. Additionally, our method lags slightly behind NLF-ISP by " + }, + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "inline_equation", + "content": "0.01\\mathrm{dB}" + }, + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "text", + "content": " on the average PSNR. It is worth noting that noise samples generated by NLF-ISP are first synthesized in the raw-RGB domain and then rendered to sRGB using the same ISP pipelines as in SIDD, suggesting the minimal discrepancies between noise samples from NLF-ISP and real data. The similar results on each camera between NLF-ISP and our NeCA model demonstrate the promising performance of the proposed model. Figure 4 shows generated noise maps from compared methods. Remarkable visual similarities observed between generated noise maps and real noise maps indicate that our framework is capable to synthesize realistic noise." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "text", + "content": "1688" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 49, + 69, + 287, + 473 + ], + "blocks": [ + { + "bbox": [ + 49, + 69, + 287, + 473 + ], + "lines": [ + { + "bbox": [ + 49, + 69, + 287, + 473 + ], + "spans": [ + { + "bbox": [ + 49, + 69, + 287, + 473 + ], + "type": "image", + "image_path": "8c9671b6b6592f1848853eeac1bfc697e649c4ce13775fddf19081826b9bf377.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 487, + 288, + 533 + ], + "lines": [ + { + "bbox": [ + 46, + 487, + 288, + 533 + ], + "spans": [ + { + "bbox": [ + 46, + 487, + 288, + 533 + ], + "type": "text", + "content": "Figure 4. Visualization of synthetic noise samples under different ISO-lighting conditions on SIDD [2]. The displayed images, from left to right, correspond to clean image, C2N, Our method, NLF-ISP and real noisy image." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 547, + 248, + 561 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 547, + 248, + 561 + ], + "spans": [ + { + "bbox": [ + 47, + 547, + 248, + 561 + ], + "type": "text", + "content": "4.3. Applications on Real Image Denoising" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 567, + 287, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 567, + 287, + 651 + ], + "spans": [ + { + "bbox": [ + 46, + 567, + 287, + 651 + ], + "type": "text", + "content": "Compared Baselines. Various noise generation methods are evaluated to demonstrate the effectiveness of these baselines performed on the downstream real image denoising task, including GCBD [8], C2N [16], Flow-sRGB [20], NeCA-S and NeCA-W. When assessing denoising performance, classical denoisers such as BM3D [9] and WNNM [12] are also included in the experiments." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 653, + 287, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 653, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 46, + 653, + 287, + 712 + ], + "type": "text", + "content": "Preparation. We establish the synthetic SIDD where clean images are from the original SIDD and noisy images are synthesized by using NeCA-W and NeCA-S. Specifically, the proposed framework is trained on the entire SIDD for each camera and the whole framework (NeCA-W) is used to" + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 307, + 69, + 547, + 180 + ], + "blocks": [ + { + "bbox": [ + 307, + 69, + 547, + 180 + ], + "lines": [ + { + "bbox": [ + 307, + 69, + 547, + 180 + ], + "spans": [ + { + "bbox": [ + 307, + 69, + 547, + 180 + ], + "type": "table", + "html": "
MethodSIDDDND
PSNR(dB)SSIMPSNR(dB)SSIM
BM3D [9]25.650.68534.510.851
WNNM [12]25.780.80934.670.865
GCBD [8]--35.580.922
C2N* [16]33.760.90136.080.903
Flow-sRGB* [20]34.740.912--
NeCA-S*36.100.92736.960.938
NeCA-W*36.820.93237.530.940
Real*37.120.93437.890.942
", + "image_path": "c6f33c507302525de1289666ba278ab512610d6fd5630cf7205adb7f1b428470.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 305, + 184, + 545, + 228 + ], + "lines": [ + { + "bbox": [ + 305, + 184, + 545, + 228 + ], + "spans": [ + { + "bbox": [ + 305, + 184, + 545, + 228 + ], + "type": "text", + "content": "Table 2. Quantitative evaluation of denoising performance on SIDD and DND benchmark. * denotes the DnCNN denoiser is trained on either the synthetic or real image pairs with the SIDD. (red: the best result, blue: the second best)" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 304, + 244, + 546, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 244, + 546, + 460 + ], + "spans": [ + { + "bbox": [ + 304, + 244, + 546, + 460 + ], + "type": "text", + "content": "generate noise for each clean image from the SIDD, where the gain factor is estimated from its paired noisy image. On the other hand, We train NeCA with a few paired images, e.g., three image pairs with varying ISO levels (800, 1600, 3200) from camera N6 and use only NCNet (NeCA-S) to generate signal-independent neighboring correlated (SINC) noise for clean images from SIDD, as seen in Figure 3. The synthesized SINC noise is added to the clean image. For each clean image, the noise level of AWGN is randomly selected from a range of [0, 75]. Our experiments with NeCA-S aim to demonstrate the advantages of explicitly modeling the neighboring correlation of real noise. Other sRGB real noise generation baselines, including C2N [16] and Flow-sRGB [20], also follow the same experimental settings with NeCA-W. With the synthetic noisy-clean image pairs, we train the DnCNN on either synthetic or real pairs of SIDD. Then the denoising performances are evaluated on both the SIDD and DND [23] benchmarks." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 460, + 546, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 460, + 546, + 700 + ], + "spans": [ + { + "bbox": [ + 304, + 460, + 546, + 700 + ], + "type": "text", + "content": "Results and Discussions. Table 2 shows the denoising results of the compared denoisers. Obviously, DnCNN trained on the synthetic samples from NeCA-W, achieves the best results among all compared methods in terms of both PSNR and SSIM. Specifically, NeCA-W gets 2.08 dB gains from Flow-sRGB on the SIDD benchmark, where Flow-sRGB is an end-to-end flow model which implicitly synthesizes real noise. The improvement of denoising performance obtained by NeCA-W indicates the accuracy of our noise model. Moreover, even though the denoising performance of NeCA-W still does not surpass the denoiser trained on the real data, the slight PSNR and SSIM discrepancies between them suggest our model does shrink this gap. Furthermore, the most impressive thing is that NeCA-S still achieves comparable denoising results on both the SIDD and DND benchmarks, outperforming the Flow-sRGB by a large margin. Note that the synthetic noise from NeCA-S is signal-independent. The superior performance of NeCA-S further verifies explicitly modeling neighboring correlation benefits the sRGB real noise synthesis." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 317, + 701, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 701, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 317, + 701, + 545, + 713 + ], + "type": "text", + "content": "Figure 6 and 7 show the denoised images from the SIDD" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "text", + "content": "1689" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 80, + 70, + 152, + 142 + ], + "blocks": [ + { + "bbox": [ + 80, + 70, + 152, + 142 + ], + "lines": [ + { + "bbox": [ + 80, + 70, + 152, + 142 + ], + "spans": [ + { + "bbox": [ + 80, + 70, + 152, + 142 + ], + "type": "image", + "image_path": "6d68097a8c3c6eb4f4bea8fea82bcfca1935609ca28c61b4a94b06f9b258630e.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 100, + 144, + 132, + 153 + ], + "lines": [ + { + "bbox": [ + 100, + 144, + 132, + 153 + ], + "spans": [ + { + "bbox": [ + 100, + 144, + 132, + 153 + ], + "type": "text", + "content": "(a) Clean" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 153, + 70, + 223, + 141 + ], + "blocks": [ + { + "bbox": [ + 153, + 70, + 223, + 141 + ], + "lines": [ + { + "bbox": [ + 153, + 70, + 223, + 141 + ], + "spans": [ + { + "bbox": [ + 153, + 70, + 223, + 141 + ], + "type": "image", + "image_path": "ef022acb930b4a1c30e1c88d9e9216049df18d68f066d87dc583f88bf776d432.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 166, + 143, + 211, + 153 + ], + "lines": [ + { + "bbox": [ + 166, + 143, + 211, + 153 + ], + "spans": [ + { + "bbox": [ + 166, + 143, + 211, + 153 + ], + "type": "text", + "content": "(b) " + }, + { + "bbox": [ + 166, + 143, + 211, + 153 + ], + "type": "inline_equation", + "content": "\\hat{\\beta} = 0.02" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 225, + 70, + 294, + 141 + ], + "blocks": [ + { + "bbox": [ + 225, + 70, + 294, + 141 + ], + "lines": [ + { + "bbox": [ + 225, + 70, + 294, + 141 + ], + "spans": [ + { + "bbox": [ + 225, + 70, + 294, + 141 + ], + "type": "image", + "image_path": "4739bb193f2c932858b2f22493569e60fdcd48759bdf30c48bc919404879c02f.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 238, + 143, + 282, + 153 + ], + "lines": [ + { + "bbox": [ + 238, + 143, + 282, + 153 + ], + "spans": [ + { + "bbox": [ + 238, + 143, + 282, + 153 + ], + "type": "text", + "content": "(c) " + }, + { + "bbox": [ + 238, + 143, + 282, + 153 + ], + "type": "inline_equation", + "content": "\\hat{\\beta} = 0.06" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 294, + 70, + 367, + 141 + ], + "blocks": [ + { + "bbox": [ + 294, + 70, + 367, + 141 + ], + "lines": [ + { + "bbox": [ + 294, + 70, + 367, + 141 + ], + "spans": [ + { + "bbox": [ + 294, + 70, + 367, + 141 + ], + "type": "image", + "image_path": "f45d3d6be20a3c36a584d69ecb19eb7d7c5fc047b1e283311d8c8140c3f1a9c1.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 310, + 143, + 354, + 153 + ], + "lines": [ + { + "bbox": [ + 310, + 143, + 354, + 153 + ], + "spans": [ + { + "bbox": [ + 310, + 143, + 354, + 153 + ], + "type": "text", + "content": "(d) " + }, + { + "bbox": [ + 310, + 143, + 354, + 153 + ], + "type": "inline_equation", + "content": "\\hat{\\beta} = 0.10" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 369, + 70, + 439, + 141 + ], + "blocks": [ + { + "bbox": [ + 369, + 70, + 439, + 141 + ], + "lines": [ + { + "bbox": [ + 369, + 70, + 439, + 141 + ], + "spans": [ + { + "bbox": [ + 369, + 70, + 439, + 141 + ], + "type": "image", + "image_path": "516ec8201b7b54e84f809303b973dd23781f225f6e0b96343afa85a9d8f861d0.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 382, + 143, + 425, + 154 + ], + "lines": [ + { + "bbox": [ + 382, + 143, + 425, + 154 + ], + "spans": [ + { + "bbox": [ + 382, + 143, + 425, + 154 + ], + "type": "text", + "content": "(e) " + }, + { + "bbox": [ + 382, + 143, + 425, + 154 + ], + "type": "inline_equation", + "content": "\\hat{\\beta} = 0.14" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 440, + 70, + 511, + 141 + ], + "blocks": [ + { + "bbox": [ + 440, + 70, + 511, + 141 + ], + "lines": [ + { + "bbox": [ + 440, + 70, + 511, + 141 + ], + "spans": [ + { + "bbox": [ + 440, + 70, + 511, + 141 + ], + "type": "image", + "image_path": "5e2617478418d058fbb049cac1aeefec9b17fe721f91ce0e60702bc087e96e91.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 454, + 143, + 496, + 153 + ], + "lines": [ + { + "bbox": [ + 454, + 143, + 496, + 153 + ], + "spans": [ + { + "bbox": [ + 454, + 143, + 496, + 153 + ], + "type": "text", + "content": "(f) " + }, + { + "bbox": [ + 454, + 143, + 496, + 153 + ], + "type": "inline_equation", + "content": "\\hat{\\beta} = 0.18" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 58, + 179, + 130, + 252 + ], + "blocks": [ + { + "bbox": [ + 93, + 158, + 499, + 169 + ], + "lines": [ + { + "bbox": [ + 93, + 158, + 499, + 169 + ], + "spans": [ + { + "bbox": [ + 93, + 158, + 499, + 169 + ], + "type": "text", + "content": "Figure 5. Results of controllable noise synthesis. The gain factor ranges from 0.02 to 0.18 with intervals of 0.04." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 58, + 179, + 130, + 252 + ], + "lines": [ + { + "bbox": [ + 58, + 179, + 130, + 252 + ], + "spans": [ + { + "bbox": [ + 58, + 179, + 130, + 252 + ], + "type": "image", + "image_path": "fa67a832ba461681aa6a61cccc56f71e3221a747a2fbddd6be88b08e53ddd654.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 132, + 180, + 204, + 253 + ], + "blocks": [ + { + "bbox": [ + 132, + 180, + 204, + 253 + ], + "lines": [ + { + "bbox": [ + 132, + 180, + 204, + 253 + ], + "spans": [ + { + "bbox": [ + 132, + 180, + 204, + 253 + ], + "type": "image", + "image_path": "9c7c9f87e5ad8ed1f80c19f1b9e660e28340038f249e2e227313542ecd9f045f.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 141, + 255, + 194, + 266 + ], + "lines": [ + { + "bbox": [ + 141, + 255, + 194, + 266 + ], + "spans": [ + { + "bbox": [ + 141, + 255, + 194, + 266 + ], + "type": "text", + "content": "(b) C2N [16]" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 205, + 180, + 277, + 253 + ], + "blocks": [ + { + "bbox": [ + 205, + 180, + 277, + 253 + ], + "lines": [ + { + "bbox": [ + 205, + 180, + 277, + 253 + ], + "spans": [ + { + "bbox": [ + 205, + 180, + 277, + 253 + ], + "type": "image", + "image_path": "ff1f381eba36e6da51584c2580c47cacf2b4663ec63269c1d346aeba13b0aa95.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 216, + 255, + 265, + 266 + ], + "lines": [ + { + "bbox": [ + 216, + 255, + 265, + 266 + ], + "spans": [ + { + "bbox": [ + 216, + 255, + 265, + 266 + ], + "type": "text", + "content": "(c) NeCA-S" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 57, + 267, + 129, + 338 + ], + "blocks": [ + { + "bbox": [ + 75, + 256, + 113, + 266 + ], + "lines": [ + { + "bbox": [ + 75, + 256, + 113, + 266 + ], + "spans": [ + { + "bbox": [ + 75, + 256, + 113, + 266 + ], + "type": "text", + "content": "(a) Noisy" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 57, + 267, + 129, + 338 + ], + "lines": [ + { + "bbox": [ + 57, + 267, + 129, + 338 + ], + "spans": [ + { + "bbox": [ + 57, + 267, + 129, + 338 + ], + "type": "image", + "image_path": "fc607dac803c972cc879a21ba7b44774f6765dae48a0f9325f15b88ce4aa5c09.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 66, + 342, + 119, + 352 + ], + "lines": [ + { + "bbox": [ + 66, + 342, + 119, + 352 + ], + "spans": [ + { + "bbox": [ + 66, + 342, + 119, + 352 + ], + "type": "text", + "content": "(d) NeCA-W" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 130, + 267, + 202, + 338 + ], + "blocks": [ + { + "bbox": [ + 130, + 267, + 202, + 338 + ], + "lines": [ + { + "bbox": [ + 130, + 267, + 202, + 338 + ], + "spans": [ + { + "bbox": [ + 130, + 267, + 202, + 338 + ], + "type": "image", + "image_path": "6c8aa11b4bc9fc744a83fadc5dc423828ba5fc1adeef17fda07ba3640fb3d1fb.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 150, + 342, + 182, + 352 + ], + "lines": [ + { + "bbox": [ + 150, + 342, + 182, + 352 + ], + "spans": [ + { + "bbox": [ + 150, + 342, + 182, + 352 + ], + "type": "text", + "content": "(e) Real" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 203, + 267, + 276, + 338 + ], + "blocks": [ + { + "bbox": [ + 203, + 267, + 276, + 338 + ], + "lines": [ + { + "bbox": [ + 203, + 267, + 276, + 338 + ], + "spans": [ + { + "bbox": [ + 203, + 267, + 276, + 338 + ], + "type": "image", + "image_path": "ce2c034828c1d63c9e5c67690dbd8e354f2b7dc6f7b9ba3f07ad6df88ae5142f.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 221, + 342, + 258, + 352 + ], + "lines": [ + { + "bbox": [ + 221, + 342, + 258, + 352 + ], + "spans": [ + { + "bbox": [ + 221, + 342, + 258, + 352 + ], + "type": "text", + "content": "(f) Clean" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 58, + 404, + 130, + 479 + ], + "blocks": [ + { + "bbox": [ + 47, + 355, + 287, + 388 + ], + "lines": [ + { + "bbox": [ + 47, + 355, + 287, + 388 + ], + "spans": [ + { + "bbox": [ + 47, + 355, + 287, + 388 + ], + "type": "text", + "content": "Figure 6. Denoising results on the SIDD dataset. DnCNN denoisers are trained on the noisy images from (b) C2N, (c, d) our models, and (e) real noisy images of the SIDD." + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 58, + 404, + 130, + 479 + ], + "lines": [ + { + "bbox": [ + 58, + 404, + 130, + 479 + ], + "spans": [ + { + "bbox": [ + 58, + 404, + 130, + 479 + ], + "type": "image", + "image_path": "baf7ebbe63e91d30aca03f798a85425fa8b68b3ac7d02a9c436d07795d21e863.jpg" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_body" + } + ], + "index": 26 + }, + { + "type": "image", + "bbox": [ + 132, + 405, + 204, + 479 + ], + "blocks": [ + { + "bbox": [ + 132, + 405, + 204, + 479 + ], + "lines": [ + { + "bbox": [ + 132, + 405, + 204, + 479 + ], + "spans": [ + { + "bbox": [ + 132, + 405, + 204, + 479 + ], + "type": "image", + "image_path": "577c34bc1dec9e4b436b76e0a1491243a99cd25377208c4b2be0e650bb31445d.jpg" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 139, + 480, + 196, + 491 + ], + "lines": [ + { + "bbox": [ + 139, + 480, + 196, + 491 + ], + "spans": [ + { + "bbox": [ + 139, + 480, + 196, + 491 + ], + "type": "text", + "content": "(b) BM3D [9]" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_caption" + } + ], + "index": 28 + }, + { + "type": "image", + "bbox": [ + 204, + 405, + 277, + 479 + ], + "blocks": [ + { + "bbox": [ + 204, + 405, + 277, + 479 + ], + "lines": [ + { + "bbox": [ + 204, + 405, + 277, + 479 + ], + "spans": [ + { + "bbox": [ + 204, + 405, + 277, + 479 + ], + "type": "image", + "image_path": "62e07428b834f79a4ad27c96cdb99a1aed99296e4fab164e30f267674c1fa872.jpg" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_body" + } + ], + "index": 30 + }, + { + "type": "image", + "bbox": [ + 57, + 492, + 129, + 564 + ], + "blocks": [ + { + "bbox": [ + 75, + 481, + 113, + 492 + ], + "lines": [ + { + "bbox": [ + 75, + 481, + 113, + 492 + ], + "spans": [ + { + "bbox": [ + 75, + 481, + 113, + 492 + ], + "type": "text", + "content": "(a) Noisy" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 57, + 492, + 129, + 564 + ], + "lines": [ + { + "bbox": [ + 57, + 492, + 129, + 564 + ], + "spans": [ + { + "bbox": [ + 57, + 492, + 129, + 564 + ], + "type": "image", + "image_path": "58d081b82dada00d053c63984181cb25b957486f15ac2262a88eb15362771af2.jpg" + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 68, + 567, + 117, + 578 + ], + "lines": [ + { + "bbox": [ + 68, + 567, + 117, + 578 + ], + "spans": [ + { + "bbox": [ + 68, + 567, + 117, + 578 + ], + "type": "text", + "content": "(d) NeCA-S" + } + ] + } + ], + "index": 33, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 47, + 581, + 287, + 614 + ], + "lines": [ + { + "bbox": [ + 47, + 581, + 287, + 614 + ], + "spans": [ + { + "bbox": [ + 47, + 581, + 287, + 614 + ], + "type": "text", + "content": "Figure 7. Denoising results on the DND dataset. DnCNN denoisers are trained on the noisy images from (c) C2N, (d, e) our models, and (f) real noisy images of the SIDD." + } + ] + } + ], + "index": 38, + "angle": 0, + "type": "image_caption" + } + ], + "index": 32 + }, + { + "type": "image", + "bbox": [ + 130, + 492, + 203, + 564 + ], + "blocks": [ + { + "bbox": [ + 130, + 492, + 203, + 564 + ], + "lines": [ + { + "bbox": [ + 130, + 492, + 203, + 564 + ], + "spans": [ + { + "bbox": [ + 130, + 492, + 203, + 564 + ], + "type": "image", + "image_path": "d1c13e763586c1a69a63b058e60ada9357f40f635041fc934f4abcf5bead0f79.jpg" + } + ] + } + ], + "index": 34, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 140, + 567, + 192, + 578 + ], + "lines": [ + { + "bbox": [ + 140, + 567, + 192, + 578 + ], + "spans": [ + { + "bbox": [ + 140, + 567, + 192, + 578 + ], + "type": "text", + "content": "(e) NeCA-W" + } + ] + } + ], + "index": 35, + "angle": 0, + "type": "image_caption" + } + ], + "index": 34 + }, + { + "type": "image", + "bbox": [ + 203, + 492, + 276, + 564 + ], + "blocks": [ + { + "bbox": [ + 214, + 481, + 266, + 492 + ], + "lines": [ + { + "bbox": [ + 214, + 481, + 266, + 492 + ], + "spans": [ + { + "bbox": [ + 214, + 481, + 266, + 492 + ], + "type": "text", + "content": "(c) C2N [16]" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 203, + 492, + 276, + 564 + ], + "lines": [ + { + "bbox": [ + 203, + 492, + 276, + 564 + ], + "spans": [ + { + "bbox": [ + 203, + 492, + 276, + 564 + ], + "type": "image", + "image_path": "93b7aa1f1696eb8d6c4b2fe2410bb65e822829a37b82b2bc7285b638a8dc0924.jpg" + } + ] + } + ], + "index": 36, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 223, + 567, + 255, + 578 + ], + "lines": [ + { + "bbox": [ + 223, + 567, + 255, + 578 + ], + "spans": [ + { + "bbox": [ + 223, + 567, + 255, + 578 + ], + "type": "text", + "content": "(f) Real" + } + ] + } + ], + "index": 37, + "angle": 0, + "type": "image_caption" + } + ], + "index": 36 + }, + { + "bbox": [ + 47, + 629, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 629, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 629, + 287, + 713 + ], + "type": "text", + "content": "and DND datasets. The results indicate that the denoisers trained on the synthetic image pairs from NeCA-W and NeCA-S achieve similar denoising results compared to the denoiser trained on real image pairs. In contrast, the denoiser trained on noisy samples from C2N, which employs an unpaired training scheme, fails to suppress the noise effectively, partly due to its unpaired train scheme." + } + ] + } + ], + "index": 39 + }, + { + "type": "table", + "bbox": [ + 330, + 179, + 520, + 212 + ], + "blocks": [ + { + "bbox": [ + 330, + 179, + 520, + 212 + ], + "lines": [ + { + "bbox": [ + 330, + 179, + 520, + 212 + ], + "spans": [ + { + "bbox": [ + 330, + 179, + 520, + 212 + ], + "type": "table", + "html": "
Lossw/o Lstd2w/o Ladv1w/o Lregall
KL0.0520.0480.1080.041
", + "image_path": "e716acd9cb0bc4cc37d5ec50e0f7c9fb752b205e82814c55189d7ebc53af1287.jpg" + } + ] + } + ], + "index": 40, + "angle": 0, + "type": "table_body" + } + ], + "index": 40 + }, + { + "bbox": [ + 306, + 220, + 545, + 253 + ], + "lines": [ + { + "bbox": [ + 306, + 220, + 545, + 253 + ], + "spans": [ + { + "bbox": [ + 306, + 220, + 545, + 253 + ], + "type": "text", + "content": "Table 3. Ablation study on the effectiveness of different loss functions. We train the framework on the training set of camera IP and calculate KL divergence on its validation set." + } + ] + } + ], + "index": 41, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 306, + 267, + 440, + 278 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 267, + 440, + 278 + ], + "spans": [ + { + "bbox": [ + 306, + 267, + 440, + 278 + ], + "type": "text", + "content": "4.4.Customized Generation" + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 304, + 286, + 545, + 383 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 286, + 545, + 383 + ], + "spans": [ + { + "bbox": [ + 304, + 286, + 545, + 383 + ], + "type": "text", + "content": "Our proposed noise synthesizing framework allows for controlling the generated noise with multiple noise levels by manipulating the gain factors. Figure 5 illustrates the controllable synthesizing results, which are generated by varying the gain factor within the range of 0.02 to 0.18 with intervals of 0.04. The results demonstrate that an increase in the gain factor value leads to a proportional increase in the magnitude of the generated noise." + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 306, + 393, + 399, + 406 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 393, + 399, + 406 + ], + "spans": [ + { + "bbox": [ + 306, + 393, + 399, + 406 + ], + "type": "text", + "content": "4.5. Ablation Study" + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 304, + 412, + 545, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 412, + 545, + 579 + ], + "spans": [ + { + "bbox": [ + 304, + 412, + 545, + 579 + ], + "type": "text", + "content": "In this section, we conduct ablation studies to verify the effectiveness of individual loss functions in our framework, including " + }, + { + "bbox": [ + 304, + 412, + 545, + 579 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{std2}" + }, + { + "bbox": [ + 304, + 412, + 545, + 579 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 412, + 545, + 579 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{adv2}" + }, + { + "bbox": [ + 304, + 412, + 545, + 579 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 412, + 545, + 579 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{reg}" + }, + { + "bbox": [ + 304, + 412, + 545, + 579 + ], + "type": "text", + "content": ". We exclude " + }, + { + "bbox": [ + 304, + 412, + 545, + 579 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{std1}" + }, + { + "bbox": [ + 304, + 412, + 545, + 579 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 412, + 545, + 579 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{adv1}" + }, + { + "bbox": [ + 304, + 412, + 545, + 579 + ], + "type": "text", + "content": " from evaluation since they are indispensable for framework training. As indicated in Table 3, the model achieves optimal performance in KL divergence with complete loss functions, suggesting all the components contribute to the final synthetic noise. However, removing " + }, + { + "bbox": [ + 304, + 412, + 545, + 579 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{reg}" + }, + { + "bbox": [ + 304, + 412, + 545, + 579 + ], + "type": "text", + "content": " significantly reduces the KL divergence, suggesting the importance of stabilizing the training process. Moreover, both " + }, + { + "bbox": [ + 304, + 412, + 545, + 579 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{adv2}" + }, + { + "bbox": [ + 304, + 412, + 545, + 579 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 412, + 545, + 579 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{std2}" + }, + { + "bbox": [ + 304, + 412, + 545, + 579 + ], + "type": "text", + "content": " improve the quality of synthetic noise, supporting our claim that " + }, + { + "bbox": [ + 304, + 412, + 545, + 579 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{adv2}" + }, + { + "bbox": [ + 304, + 412, + 545, + 579 + ], + "type": "text", + "content": " serves as a complementary loss for " + }, + { + "bbox": [ + 304, + 412, + 545, + 579 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{std2}" + }, + { + "bbox": [ + 304, + 412, + 545, + 579 + ], + "type": "text", + "content": ", enabling the NPNet to predict more accurate noise levels." + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 306, + 594, + 378, + 605 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 594, + 378, + 605 + ], + "spans": [ + { + "bbox": [ + 306, + 594, + 378, + 605 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 304, + 613, + 545, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 613, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 304, + 613, + 545, + 712 + ], + "type": "text", + "content": "In this paper, we propose a neighboring correlation-aware noise model for sRGB real noise generation. Our proposed method effectively bridges the gap between synthetic noise and real noise by explicitly modeling the signal dependency and neighboring correlation of real noise. The experimental results demonstrate the proposed noise model achieves superior performance on both real noise synthesis and downstream real image denoising tasks." + } + ] + } + ], + "index": 47 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "text", + "content": "1690" + } + ] + } + ], + "index": 48 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 286, + 712 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 53, + 91, + 286, + 124 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 286, + 124 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 286, + 124 + ], + "type": "text", + "content": "[1] Abdelrahman Abdelhamed, Marcus A Brubaker, and Michael S Brown. Noise flow: Noise modeling with conditional normalizing flows. In ICCV, 2019. 1, 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 125, + 286, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 125, + 286, + 158 + ], + "spans": [ + { + "bbox": [ + 53, + 125, + 286, + 158 + ], + "type": "text", + "content": "[2] Abdelrahman Abdelhamed, Stephen Lin, and Michael S Brown. A high-quality denoising dataset for smartphone cameras. In CVPR, 2018. 6, 7" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 54, + 159, + 286, + 180 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 159, + 286, + 180 + ], + "spans": [ + { + "bbox": [ + 54, + 159, + 286, + 180 + ], + "type": "text", + "content": "[3] Saeed Anwar and Nick Barnes. Real image denoising with feature attention. In ICCV, 2019. 1" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 182, + 286, + 214 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 182, + 286, + 214 + ], + "spans": [ + { + "bbox": [ + 53, + 182, + 286, + 214 + ], + "type": "text", + "content": "[4] Jimmy Lei Ba, Jamie Ryan Kiros, and Geoffrey E Hinton. Layer normalization. arXiv preprint arXiv:1607.06450, 2016. 4" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 216, + 286, + 249 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 216, + 286, + 249 + ], + "spans": [ + { + "bbox": [ + 53, + 216, + 286, + 249 + ], + "type": "text", + "content": "[5] Tim Brooks, Ben Mildenhall, Tianfan Xue, Jiawen Chen, Dillon Sharlet, and Jonathan T Barron. Unprocessing images for learned raw denoising. In CVPR, 2019. 1, 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 251, + 286, + 283 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 251, + 286, + 283 + ], + "spans": [ + { + "bbox": [ + 53, + 251, + 286, + 283 + ], + "type": "text", + "content": "[6] Ke-Chi Chang, Ren Wang, Hung-Jin Lin, Yu-Lun Liu, Chia-Ping Chen, Yu-Lin Chang, and Hwann-Tzong Chen. Learning camera-aware noise models. In ECCV, 2020. 1, 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 285, + 286, + 316 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 285, + 286, + 316 + ], + "spans": [ + { + "bbox": [ + 53, + 285, + 286, + 316 + ], + "type": "text", + "content": "[7] Guangyong Chen, Fengyuan Zhu, and Pheng Ann Heng. An efficient statistical method for image noise level estimation. In ICCV, 2015. 6" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 318, + 286, + 351 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 318, + 286, + 351 + ], + "spans": [ + { + "bbox": [ + 53, + 318, + 286, + 351 + ], + "type": "text", + "content": "[8] Jingwen Chen, Jiawei Chen, Hongyang Chao, and Ming Yang. Image blind denoising with generative adversarial network based noise modeling. In CVPR, 2018. 2, 7" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 353, + 286, + 385 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 353, + 286, + 385 + ], + "spans": [ + { + "bbox": [ + 53, + 353, + 286, + 385 + ], + "type": "text", + "content": "[9] Kostadin Dabov, Alessandro Foi, Vladimir Katkovnik, and Karen Egiazarian. Image denoising by sparse 3-d transform-domain collaborative filtering. TIP, 2007. 7, 8" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 387, + 286, + 418 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 387, + 286, + 418 + ], + "spans": [ + { + "bbox": [ + 48, + 387, + 286, + 418 + ], + "type": "text", + "content": "[10] Alessandro Foi. Clipped noisy images: Heteroskedastic modeling and practical denoising. Signal Processing, 2009. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 421, + 286, + 463 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 421, + 286, + 463 + ], + "spans": [ + { + "bbox": [ + 48, + 421, + 286, + 463 + ], + "type": "text", + "content": "[11] Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial networks. Communications of the ACM, 2020. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 465, + 286, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 465, + 286, + 498 + ], + "spans": [ + { + "bbox": [ + 48, + 465, + 286, + 498 + ], + "type": "text", + "content": "[12] Shuhang Gu, Lei Zhang, Wangmeng Zuo, and Xiangchu Feng. Weighted nuclear norm minimization with application to image denoising. In CVPR, 2014. 7" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 499, + 286, + 532 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 499, + 286, + 532 + ], + "spans": [ + { + "bbox": [ + 48, + 499, + 286, + 532 + ], + "type": "text", + "content": "[13] Ishaan Gulrajani, Faruk Ahmed, Martin Arjovsky, Vincent Dumoulin, and Aaron C Courville. Improved training of wasserstein gans. NIPS, 2017. 5" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 533, + 286, + 566 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 533, + 286, + 566 + ], + "spans": [ + { + "bbox": [ + 48, + 533, + 286, + 566 + ], + "type": "text", + "content": "[14] Lanqing Guo, Siyu Huang, Haosen Liu, and Bihan Wen. Fino: Flow-based joint image and noise model. arXiv preprint arXiv:2111.06031, 2021. 1" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 48, + 567, + 286, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 567, + 286, + 600 + ], + "spans": [ + { + "bbox": [ + 48, + 567, + 286, + 600 + ], + "type": "text", + "content": "[15] Shi Guo, Zifei Yan, Kai Zhang, Wangmeng Zuo, and Lei Zhang. Toward convolutional blind denoising of real photographs. In CVPR, 2019. 1, 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 48, + 602, + 286, + 634 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 602, + 286, + 634 + ], + "spans": [ + { + "bbox": [ + 48, + 602, + 286, + 634 + ], + "type": "text", + "content": "[16] Geonwoon Jang, Wooseok Lee, Sanghyun Son, and Kyoung Mu Lee. C2n: Practical generative noise modeling for real-world denoising. In ICCV, 2021. 1, 2, 6, 7, 8" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 48, + 635, + 286, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 635, + 286, + 678 + ], + "spans": [ + { + "bbox": [ + 48, + 635, + 286, + 678 + ], + "type": "text", + "content": "[17] Dong-Wook Kim, Jae Ryun Chung, and Seung-Won Jung. Grdn: Grouped residual dense network for real image denoising and gan-based real-world noise modeling. In CVPRW, 2019. 1, 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 48, + 681, + 286, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 681, + 286, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 681, + 286, + 712 + ], + "type": "text", + "content": "[18] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. 6" + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 433 + ], + "type": "list", + "angle": 0, + "index": 31, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 95 + ], + "type": "text", + "content": "[19] Durk P Kingma and Prafulla Dhariwal. Glow: Generative flow with invertible 1x1 convolutions. NIPS, 2018. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 96, + 545, + 129 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 96, + 545, + 129 + ], + "spans": [ + { + "bbox": [ + 307, + 96, + 545, + 129 + ], + "type": "text", + "content": "[20] Shayan Kousha, Ali Maleky, Michael S Brown, and Marcus A Brubaker. Modeling srgb camera noise with normalizing flows. In CVPR, 2022. 1, 2, 3, 7" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 130, + 545, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 130, + 545, + 162 + ], + "spans": [ + { + "bbox": [ + 307, + 130, + 545, + 162 + ], + "type": "text", + "content": "[21] Ce Liu, William T Freeman, Richard Szeliski, and Sing Bing Kang. Noise estimation from a single image. In CVPR, 2006. 2, 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 164, + 545, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 164, + 545, + 206 + ], + "spans": [ + { + "bbox": [ + 307, + 164, + 545, + 206 + ], + "type": "text", + "content": "[22] Seonghyeon Nam, Youngbae Hwang, Yasuyuki Matsushita, and Seon Joo Kim. A holistic approach to cross-channel image noise modeling and its application to image denoising. In CVPR, 2016. 1, 2, 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 209, + 545, + 230 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 209, + 545, + 230 + ], + "spans": [ + { + "bbox": [ + 307, + 209, + 545, + 230 + ], + "type": "text", + "content": "[23] Tobias Plotz and Stefan Roth. Benchmarking denoising algorithms with real photographs. In CVPR, 2017. 7" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 232, + 545, + 264 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 232, + 545, + 264 + ], + "spans": [ + { + "bbox": [ + 307, + 232, + 545, + 264 + ], + "type": "text", + "content": "[24] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. Image quality assessment: from error visibility to structural similarity. TIP, 2004. 6" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 266, + 545, + 298 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 266, + 545, + 298 + ], + "spans": [ + { + "bbox": [ + 307, + 266, + 545, + 298 + ], + "type": "text", + "content": "[25] Kaixuan Wei, Ying Fu, Jiaolong Yang, and Hua Huang. A physics-based noise formation model for extreme low-light raw denoising. In CVPR, 2020. 1, 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 300, + 545, + 332 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 300, + 545, + 332 + ], + "spans": [ + { + "bbox": [ + 307, + 300, + 545, + 332 + ], + "type": "text", + "content": "[26] Zongsheng Yue, Qian Zhao, Lei Zhang, and Deyu Meng. Dual adversarial network: Toward real-world noise removal and noise generation. In ECCV, 2020. 1, 2" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 334, + 545, + 366 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 334, + 545, + 366 + ], + "spans": [ + { + "bbox": [ + 307, + 334, + 545, + 366 + ], + "type": "text", + "content": "[27] Kai Zhang, Wangmeng Zuo, Yunjin Chen, Deyu Meng, and Lei Zhang. Beyond a gaussian denoiser: Residual learning of deep cnn for image denoising. TIP, 2017. 6" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 307, + 368, + 545, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 368, + 545, + 399 + ], + "spans": [ + { + "bbox": [ + 307, + 368, + 545, + 399 + ], + "type": "text", + "content": "[28] Yi Zhang, Hongwei Qin, Xiaogang Wang, and Hongsheng Li. Rethinking noise synthesis and modeling in raw denoising. In ICCV, 2021. 1, 2" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 307, + 401, + 545, + 433 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 401, + 545, + 433 + ], + "spans": [ + { + "bbox": [ + 307, + 401, + 545, + 433 + ], + "type": "text", + "content": "[29] Yuqian Zhou, Jianbo Jiao, Haibin Huang, Yang Wang, Jue Wang, Honghui Shi, and Thomas Huang. When awgn-based denoiser meets real noises. In AAAI, 2020. 4, 5" + } + ] + } + ], + "index": 30 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 296, + 748, + 314, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 748, + 314, + 757 + ], + "spans": [ + { + "bbox": [ + 296, + 748, + 314, + 757 + ], + "type": "text", + "content": "1691" + } + ] + } + ], + "index": 32 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/vMAP_ Vectorised Object Mapping for Neural Field SLAM/ff44bc7e-edea-45f5-9989-5020c0b824b0_content_list.json b/2023/vMAP_ Vectorised Object Mapping for Neural Field SLAM/ff44bc7e-edea-45f5-9989-5020c0b824b0_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..f965ccd561d6d41a36b40cf809e45b7674323c98 --- /dev/null +++ b/2023/vMAP_ Vectorised Object Mapping for Neural Field SLAM/ff44bc7e-edea-45f5-9989-5020c0b824b0_content_list.json @@ -0,0 +1,1585 @@ +[ + { + "type": "text", + "text": "vMAP: Vectorised Object Mapping for Neural Field SLAM", + "text_level": 1, + "bbox": [ + 184, + 130, + 782, + 151 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Xin Kong Shikun Liu Marwan Taher Andrew J. Davison Dyson Robotics Lab, Imperial College London", + "bbox": [ + 235, + 180, + 733, + 217 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{x.kong21, shikun.liu17, m.taher, a.davison}@imperial.ac.uk", + "bbox": [ + 222, + 219, + 741, + 233 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/7d9a6ae6db468eb20e0ac63efa0cdd689fbb21ad9fb2211e33edc8381b5de0f0.jpg", + "image_caption": [ + "Figure 1. vMAP automatically builds an object-level scene model from a real-time RGB-D input stream. Each object is represented by a separate MLP neural field model, all optimised in parallel via vectorised training. We use no 3D shape priors, but the MLP representation encourages object reconstruction to be watertight and complete, even when objects are partially observed or are heavily occluded in the input images. See for instance the separate reconstructions of the armchairs, sofas and cushions, which were mutually occluding each other, in this example from Replica." + ], + "image_footnote": [], + "bbox": [ + 81, + 268, + 893, + 406 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 497, + 313, + 513 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We present vMAP, an object-level dense SLAM system using neural field representations. Each object is represented by a small MLP, enabling efficient, watertight object modelling without the need for 3D priors.", + "bbox": [ + 75, + 529, + 468, + 589 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "As an RGB-D camera browses a scene with no prior information, vMAP detects object instances on-the-fly, and dynamically adds them to its map. Specifically, thanks to the power of vectorised training, vMAP can optimise as many as 50 individual objects in a single scene, with an extremely efficient training speed of $5\\mathrm{Hz}$ map update. We experimentally demonstrate significantly improved scene-level and object-level reconstruction quality compared to prior neural field SLAM systems. Project page: https://kxhit.github.io/vMAP.", + "bbox": [ + 73, + 590, + 470, + 741 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 753, + 209, + 768 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "For robotics and other interactive vision applications, an object-level model is arguably semantically optimal, with scene entities represented in a separated, composable way, but also efficiently focusing resources on what is important in an environment.", + "bbox": [ + 75, + 779, + 468, + 854 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The key question in building an object-level mapping system is what level of prior information is known about the objects in a scene in order to segment, classify and re", + "bbox": [ + 75, + 854, + 470, + 900 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "construct them. If no 3D object priors are available, then usually only the directly observed parts of objects can be reconstructed, leading to holes and missing parts [4, 46]. Prior object information such as CAD models or category-level shape space models enable full object shape estimation from partial views, but only for the subset of objects in a scene for which these models are available.", + "bbox": [ + 496, + 500, + 892, + 604 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In this paper, we present a new approach which applies to the case where no 3D priors are available but still often enables watertight object reconstruction in realistic real-time scene scanning. Our system, vMAP, builds on the attractive properties shown by neural fields as a real-time scene representation [31], with efficient and complete representation of shape, but now reconstructs a separate tiny MLP model of each object. The key technical contribution of our work is to show that a large number of separate MLP object models can be simultaneously and efficiently optimised on a single GPU during live operation via vectorised training.", + "bbox": [ + 496, + 609, + 893, + 776 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We show that we can achieve much more accurate and complete scene reconstruction by separately modelling objects, compared with using a similar number of weights in a single neural field model of the whole scene. Our real-time system is highly efficient in terms of both computation and memory, and we show that scenes with up to 50 objects can be mapped with 40KB per object of learned parameters across the multiple, independent object networks.", + "bbox": [ + 496, + 779, + 893, + 901 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 807, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "952", + "bbox": [ + 485, + 945, + 509, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We also demonstrate the flexibility of our disentangled object representation to enable recomposition of scenes with new object configurations. Extensive experiments have been conducted on both simulated and real-world datasets, showing state-of-the-art scene-level and object-level reconstruction performance.", + "bbox": [ + 76, + 90, + 470, + 181 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 76, + 195, + 218, + 210 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "This work follows in long series of efforts to build real-time scene representations which are decomposed into explicit rigid objects, with the promise of flexible and efficient scene representation and even the possibility to represent changing scenes. Different systems assumed varying types of representation and levels of prior knowledge, from CAD models [28], via category-level shape models [10, 11, 32, 36] to no prior shape knowledge, although in this case only the visible parts of objects could be reconstructed [15, 27, 38].", + "bbox": [ + 76, + 220, + 468, + 357 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Neural Field SLAM Neural fields have recently been widely used as efficient, accurate and flexible representations of whole scenes [16, 17, 19, 22]. To adopt these representations into real-time SLAM systems, iMAP [31] demonstrated for the first time that a simple MLP network, incrementally trained with the aid of depth measurements from RGB-D sensors, can represent room-scaled 3D scenes in real-time. Some of iMAP's most interesting properties were its tendency to produce watertight reconstructions, even often plausibly completing the unobserved back of objects. These coherence properties of neural fields were particularly revealed when semantic output channels were added, as in SemanticNeRF [43] and iLabel [44], and were found to inherit the coherence. To make implicit representation more scalable and efficient, a group of implicit SLAM systems [25, 35, 40, 45, 48] fused neural fields with conventional volumetric representations.", + "bbox": [ + 76, + 363, + 470, + 621 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Object Representations with Neural Fields However, obtaining individual object representations from these neural field methods is difficult, as the correspondences between network parameters and specific scene regions are complicated and difficult to determine. To tackle this, DeRF [23] decomposed a scene spatially and dedicated smaller networks to each decomposed part. Similarly, KiloNeRF [24] divided a scene into thousands of volumetric parts, each represented by a tiny MLP, and trained them in parallel with custom CUDA kernels to speed up NeRF. Different from KiloNeRF, vMAP decomposes the scene into objects which are semantically meaningful.", + "bbox": [ + 76, + 628, + 468, + 809 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To represent multiple objects, ObjectNeRF [39] and ObjSDF [37] took pre-computed instance masks as additional input and conditioned object representation on learnable object activation code. But these methods are still trained offline and tangle object representations with the main scene network, so that they need to optimise the network weights", + "bbox": [ + 75, + 810, + 470, + 902 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "with all object codes during training, and infer the whole network to get the shape of a desired object. This contrasts with vMAP which models objects individually, and is able to stop and resume training for any objects without any inter-object interference.", + "bbox": [ + 496, + 90, + 890, + 166 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The recent work most similar to ours has used the attractive properties of neural field MLPs to represent single objects. The analysis in [5] explicitly evaluated the use of over-fit neural implicit networks as a 3D shape representation for graphics, considering that they should be taken seriously. The work in [1] furthered this analysis, showing how object representation was affected by different observation conditions, though using the hybrid Instant NGP rather than a single MLP representation, so it is not clear whether some object coherence properties would be lost. Finally, the CodeNeRF system [9] trained a NeRF conditioned on learnable object codes, again proving the attractive properties of neural fields to represent single objects.", + "bbox": [ + 496, + 167, + 892, + 362 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We build on this work in our paper, but for the first time show that many individual neural field models making up a whole scene can be simultaneously trained within a real-time system, resulting in accurate and efficient representation of many-object scenes.", + "bbox": [ + 496, + 363, + 890, + 438 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3. vMAP: An Efficient Object Mapping System with Vectorised Training", + "text_level": 1, + "bbox": [ + 498, + 452, + 890, + 488 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3.1. System Overview", + "text_level": 1, + "bbox": [ + 500, + 496, + 669, + 512 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We first introduce our detailed design for object-level mapping with efficient vectorised training (Section 3.2), and then explain our improved training strategies of pixel sampling and surface rendering (Section 3.3). Finally, we show how we may recompose and render a new scene with these learned object models (Section 3.4). An overview of our training and rendering pipeline is shown in Fig. 2.", + "bbox": [ + 496, + 518, + 890, + 626 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3.2. Vectorised Object Level Mapping", + "text_level": 1, + "bbox": [ + 500, + 635, + 794, + 652 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Object Initialisation and Association To start with, each frame is associated with densely labelled object masks. These object masks are either directly provided in the dataset, or predicted with an off-the-shelf 2D instance segmentation network. Since those predicted object masks have no temporal consistency across different frames, we perform object association between the previous and the current live frame, based on two criteria: i) Semantic Consistency: the object in the current frame is predicted as the same semantic class from the previous frame, and ii) Spatial Consistency: the object in the current frame is spatially close to the object in the previous frames, measured by the mean IoU of their 3D object bounds. When these two criteria are satisfied, we assume they are the same object instance and represent them with the same object model. Otherwise, they are different object instances and we initialise", + "bbox": [ + 496, + 657, + 892, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "953", + "bbox": [ + 485, + 945, + 509, + 955 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/49bed62b879a9243dc8649cbc650b8273b3ece64d5e4b39a5abcf819e31b25fd.jpg", + "image_caption": [ + "Figure 2. An overview of training and rendering pipeline of vMAP." + ], + "image_footnote": [], + "bbox": [ + 80, + 95, + 888, + 438 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "a new object model and append it to the models stack.", + "bbox": [ + 75, + 474, + 434, + 489 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "For each object in a frame, we estimate its 3D object bound by its 3D point cloud, parameterised by its depth map and the camera pose. Camera tracking is externally provided by an off-the-shelf tracking system, which we found to be more accurate and robust compared to jointly optimising pose and geometry. If we detect the same object instance in a new frame, we merge its 3D point cloud from the previous frames to the current frame and re-estimate its 3D object bound. Therefore, these object bounds are dynamically updated and refined with more observations.", + "bbox": [ + 75, + 489, + 468, + 641 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Object Supervision We apply object-level supervision only for pixels inside a 2D object bounding box, for maximal training efficiency. For those pixels within an object mask, we encourage the object radiance field to be occupied and supervise them with depth and colour loss. Otherwise we encourage the object radiance field to be empty.", + "bbox": [ + 75, + 648, + 468, + 739 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Each object instance samples training pixels from its own independent keyframe buffer. Therefore, we have flexibility to stop or resume the training of any object, with no training interference between objects.", + "bbox": [ + 75, + 739, + 468, + 801 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Vectorised Training Representing a neural field with multiple small networks can lead to efficient training, as shown in prior work [24]. In vMAP, all object models are of the same design, except for the background object which we represent with a slightly larger network. Therefore, we are able to stack these small object models together for vec", + "bbox": [ + 75, + 809, + 470, + 901 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "torised training, leveraging the highly optimised vectorised operations in PyTorch [8]. Since multiple object models are batched and trained simultaneously as opposed to sequentially, we optimise the use of the available GPU resources. We show that vectorised training is an essential design element to the system, resulting in significantly improved training speed, further discussed in Section 4.3.", + "bbox": [ + 496, + 474, + 890, + 580 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.3. Neural Implicit Mapping", + "text_level": 1, + "bbox": [ + 498, + 587, + 728, + 604 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Depth Guided Sampling Neural fields trained on RGB data only have no guarantee to model accurate object geometry, due to the fact that they are optimising for appearance rather than the geometry. To obtain more geometrically accurate object models, we benefit from the depth map available from an RGB-D sensor, providing a strong prior for learning the density field of 3D volumes. Specifically, we sample $N_{s}$ and $N_{c}$ points along each ray, for which $N_{s}$ points are sampled with a Normal distribution centered around the surface $t_{s}$ (from the depth map), with a small $d_{\\sigma}$ variance, and $N_{c}$ points are uniformly sampled between the camera $t_{n}$ (the near bound) and the surface $t_{s}$ , with a stratified sampling approach. When the depth measurement is invalid, the surface $t_{s}$ is then replaced with the far bound $t_{f}$ . Mathematically, we have:", + "bbox": [ + 496, + 609, + 890, + 838 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nt _ {i} \\sim \\mathcal {U} \\left(t _ {n} + \\frac {i - 1}{N _ {c}} \\left(t _ {s} - t _ {n}\\right), t _ {n} + \\frac {i}{N _ {c}} \\left(t _ {s} - t _ {n}\\right)\\right), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 509, + 844, + 890, + 877 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nt _ {i} \\sim \\mathcal {N} \\left(t _ {s}, d _ {\\sigma} ^ {2}\\right). \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 511, + 878, + 890, + 897 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "954", + "bbox": [ + 485, + 944, + 511, + 955 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We choose $d_{\\sigma} = 3cm$ which works well in our implementation. We observe that training more points near the surface helps to guide the object models to quickly focus on representing accurate object geometry.", + "bbox": [ + 75, + 90, + 468, + 151 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Surface and Volume Rendering As we are concerned more by 3D surface reconstruction than 2D rendering, we omit the viewing direction from the network input, and model object visibility with a binary indicator (no transparent objects). With similar motivation to UniSURF [21], we parameterise the occupancy probability of a 3D point $x_{i}$ as $o_{\\theta}(x_i)\\rightarrow [0,1]$ , where $o_{\\theta}$ is a continuous occupancy field. Therefore, the termination probability at point $x_{i}$ along ray $\\mathbf{r}$ becomes $T_{i} = o(x_{i})\\prod_{j < i}(1 - o(x_{j}))$ , indicating that no occupied samples $x_{j}$ with $j < i$ exist before $x_{i}$ . The corresponding rendered occupancy, depth and colour are defined as follows:", + "bbox": [ + 75, + 159, + 472, + 339 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {O} (\\mathbf {r}) = \\sum_ {i = 1} ^ {N} T _ {i}, \\hat {D} (\\mathbf {r}) = \\sum_ {i = 1} ^ {N} T _ {i} d _ {i}, \\hat {C} (\\mathbf {r}) = \\sum_ {i = 1} ^ {N} T _ {i} c _ {i}. \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 86, + 349, + 468, + 391 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Training Objective For each object $k$ , we only sample training pixels inside that object's 2D bounding box, denoted by $\\mathcal{R}^k$ , and only optimise depth and colour for pixels inside its 2D object mask, denoted by $M^k$ . Note that it is always true that $M^k \\subset \\mathcal{R}^k$ . The depth, colour and occupancy loss for the object $k$ are defined as follows:", + "bbox": [ + 75, + 401, + 468, + 492 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nL _ {d e p t h} ^ {k} = M ^ {k} \\odot \\sum_ {\\mathbf {r} \\in R ^ {k}} | \\hat {D} (\\mathbf {r}) - D (\\mathbf {r}) |, \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 158, + 503, + 468, + 537 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nL _ {\\text {c o l o u r}} ^ {k} = M ^ {k} \\odot \\sum_ {\\mathbf {r} \\in R ^ {k}} | \\hat {C} (\\mathbf {r}) - C (\\mathbf {r}) |, \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 156, + 540, + 468, + 573 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nL _ {\\text {o c c u p a n c y}} ^ {k} = \\sum_ {\\mathbf {r} \\in R ^ {k}} | \\hat {O} (\\mathbf {r}) - M ^ {k} (\\mathbf {r}) |. \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 132, + 575, + 468, + 609 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The overall training objective then accumulates losses for all $K$ objects:", + "bbox": [ + 75, + 621, + 468, + 651 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nL = \\sum_ {k = 1} ^ {K} L _ {\\text {d e p t h}} ^ {k} + \\lambda_ {1} \\cdot L _ {\\text {c o l o u r}} ^ {k} + \\lambda_ {2} \\cdot L _ {\\text {o c c u p a n c y}} ^ {k}. \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 94, + 662, + 468, + 704 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We choose loss weightings $\\lambda_1 = 5$ and $\\lambda_{2} = 10$ , which we found to work well in our experiments.", + "bbox": [ + 75, + 715, + 468, + 746 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.4. Compositional Scene Rendering", + "text_level": 1, + "bbox": [ + 76, + 756, + 359, + 772 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Since vMAP represents objects in a purely disentangled representation space, we can obtain each 3D object by querying within its estimated 3D object bounds and easily manipulate it. For 2D novel view synthesis, we use the Ray-Box Intersection algorithm [14] to calculate near and far bounds for each object, and then rank rendered depths along each ray to achieve occlusion-aware scene-level rendering. This disentangled representation also opens up other types", + "bbox": [ + 75, + 780, + 470, + 901 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "of fine-grained object-level manipulation, such as changing object shape or textures by conditioning on disentangled pre-trained feature fields [20, 42], which we consider as an interesting future direction.", + "bbox": [ + 496, + 90, + 890, + 151 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 498, + 161, + 632, + 179 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We have comprehensively evaluated vMAP on a range of different datasets, which include both simulated and real-world sequences, with and without ground-truth object masks and poses. For all datasets, we qualitatively compare our system to prior state-of-the-art SLAM frameworks on 2D and 3D scene-level and object-level rendering. We further quantitatively compare these systems in datasets where ground-truth meshes are available. Please see our attached supplementary material for more results.", + "bbox": [ + 496, + 186, + 890, + 323 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.1. Experimental Setup", + "text_level": 1, + "bbox": [ + 498, + 332, + 689, + 349 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Datasets We evaluated on Replica [29], ScanNet [3], and TUM RGB-D [6]. Each dataset contains sequences with different levels of quality in object masks, depth and pose measurements. Additionally, we also showed vMAP's performance in complex real-world with self-captured video sequences recorded by an Azure Kinect RGB-D camera. An overview of these datasets is shown in Tab. 1.", + "bbox": [ + 496, + 354, + 890, + 460 + ], + "page_idx": 3 + }, + { + "type": "table", + "img_path": "images/8c8023181474d15da3baad08fe14767bf86a356b8237a2cb0ff515a2c4a85e99.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Object MasksDepth QualityPose Estimation
ReplicaPerfect GTPerfect GTPerfect GT
ScanNetNoisyNoisyPerfect GT
TUM RGB-DDeticNoisyORB-SLAM3
Our RecordingDeticNoisyORB-SLAM3
", + "bbox": [ + 501, + 469, + 890, + 547 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Table 1. An overview of datasets we evaluated.", + "bbox": [ + 553, + 558, + 834, + 571 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Datasets with perfect ground-truth information represent the upper-bound performance of our system. We expect vMAP's performance in the real-world setting can be further improved, when coupled with a better instance segmentation and pose estimation framework.", + "bbox": [ + 496, + 577, + 890, + 652 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Implementation Details We conduct all experiments on a desktop PC with a 3.60 GHz i7-11700K CPU and a single Nvidia RTX 3090 GPU. We choose our instance segmentation detector to be Detic [47], pre-trained on an open-vocabulary LVIS dataset [7] which contains more than 1000 object classes. We choose our pose estimation framework to be ORB-SLAM3 [2], for its fast and accurate tracking performance. We continuously update the keyframe poses using the latest estimates from ORB-SLAM3.", + "bbox": [ + 496, + 657, + 890, + 794 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We applied the same set of hyper-parameters for all datasets. Both our object and background model use 4-layer MLPs, with each layer having hidden size 32 (object) and 128 (background). For object / background, we selected keyframes every $25/50$ frames, $120/1200$ rays each training step with 10 points per ray. The number of objects in a scene typically varies between 20 and 70, among which the", + "bbox": [ + 496, + 795, + 890, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "955", + "bbox": [ + 485, + 944, + 509, + 955 + ], + "page_idx": 3 + }, + { + "type": "table", + "img_path": "images/b85e48cf1d501b1055e72fdcec21f287b7f09c09800f95098e6a6d7e04ea85e5.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
TSDF-Fusion*iMAPiMAP*NICE-SLAMNICE-SLAM*vMAP
Scene Acc. [cm] ↓1.284.432.152.943.043.20
Scene Comp. [cm] ↓5.615.562.884.023.842.39
Scene Comp. Ratio [<5cm %] ↑82.6779.0690.8586.7386.5292.99
Object Acc. [cm] ↓0.45-3.57-3.912.23
Object Comp. [cm] ↓3.69-2.38-3.271.44
Object Comp. Ratio [<5cm %] ↑82.98-90.19-83.9794.55
Object Comp. Ratio [<1cm %] ↑61.70-47.79-37.7969.23
", + "bbox": [ + 78, + 88, + 890, + 200 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Table 2. Averaged reconstruction results for 8 indoor Replica scenes. * represents the baselines we re-trained with ground-truth pose.", + "bbox": [ + 89, + 203, + 877, + 218 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/101e2a669703e2eb7a292e30351473b042a8c2f462df91ce1ab39684fcaab209.jpg", + "image_caption": [ + "Figure 3. Scene reconstruction for 4 selected Replica scenes. Interesting regions are highlighted with coloured boxes, showing vMAP's significantly improved reconstruction quality. All scene meshes are provided by the original authors." + ], + "image_footnote": [], + "bbox": [ + 96, + 223, + 895, + 837 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "956", + "bbox": [ + 485, + 945, + 511, + 955 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/a9842deedfad70f6912b9dc18e83685a385d58d49881a8cf0405ca4dff393cc4.jpg", + "image_caption": [ + "TSDF-Fusion" + ], + "image_footnote": [], + "bbox": [ + 94, + 107, + 202, + 184 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/e396859132901ab9dfabd570622f2f6fb3f929f13a7f21b7f37f97587bb9a85f.jpg", + "image_caption": [ + "ObjSDF" + ], + "image_footnote": [], + "bbox": [ + 233, + 108, + 336, + 183 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/b8e4699eb72f2854f3a10949d247f924f14fa4123d1d68ffc12e41c57151bc78.jpg", + "image_caption": [ + "vMAP" + ], + "image_footnote": [], + "bbox": [ + 370, + 107, + 483, + 184 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/a4c957d7487883bc99b0592446b261e5d59ea70b3fa1d39744b892b2ffcdec25.jpg", + "image_caption": [ + "TSDF-Fusion" + ], + "image_footnote": [], + "bbox": [ + 501, + 111, + 614, + 180 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/b7880ce3b18f3d1c5dbbc19679d80157c707e03397a7e9074e8b2fb3b8b1fc3a.jpg", + "image_caption": [ + "ObjSDF" + ], + "image_footnote": [], + "bbox": [ + 635, + 121, + 751, + 183 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/b5e3c4667ee120a8345d89b107c58717fbc96a6d55eabb194c397d5d75ef5a4b.jpg", + "image_caption": [ + "vMAP" + ], + "image_footnote": [], + "bbox": [ + 777, + 109, + 887, + 181 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/29fbad12d300d6b7e9635941ce1a5f178cef86cb70174de42a002978c7eac868.jpg", + "image_caption": [ + "Figure 4. Visualisation of object reconstructions with vMAP compared to TSDF-Fusion and ObjSDF. Note that all object reconstructions from ObjSDF require much longer off-line training. All object meshes from ObjSDF are provided by the original authors." + ], + "image_footnote": [], + "bbox": [ + 94, + 205, + 192, + 272 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/2300ca72cea5e322f566e720f4d69662710bd3a1e4589e3e47bfb276192fb8f9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 233, + 207, + 330, + 272 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/4c282363d906f8363f25d7ec23f2024ca14ced2b0d17b05d3173a5a9ff8ffefe.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 369, + 205, + 467, + 272 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/8853dd08f1584336f224ba73c4c14cf3bc140d00fa4b61c25390aba4adb59fb4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 493, + 203, + 594, + 271 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/27d589b3fcea57ac72a942113af6ae1386492d2daa6d632a49bfcb817f7621ae.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 645, + 205, + 728, + 271 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/9bcccd19d754b2561327fa0deb36a0f583910aca2b025998b03b4de2c2c836d3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 777, + 203, + 867, + 270 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/92d81487f672b50fa6d13830f40ba428f6d7cb797b6e24806c8956c98e23d47e.jpg", + "image_caption": [ + "NICE-SLAM*", + "Figure 5. Visualisation of scene reconstruction from NICE-SLAM* (left) and vMAP (right) in a selected ScanNet sequence. Interesting regions are zoomed in. NICE-SLAM* was re-trained with ground-truth poses." + ], + "image_footnote": [], + "bbox": [ + 91, + 339, + 495, + 503 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/8a02031b70e79897803f030b8c695f33b451a8a63e57fef24dc1d533df2dba8a.jpg", + "image_caption": [ + "vMAP" + ], + "image_footnote": [], + "bbox": [ + 506, + 339, + 910, + 503 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "largest number of objects are in Replica and ScanNet scenes with an average of 50 objects per scene.", + "bbox": [ + 75, + 556, + 468, + 588 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Metrics Following the convention of prior work [31, 48], we adopt Accuracy, Completion, and Completion Ratio for 3D scene-level reconstruction metrics. Besides, we note that such scene-level metrics are heavily biased towards the reconstruction of large objects like walls and floors. Therefore, we additionally provide these metrics at the object-level, by averaging metrics for all objects in each scene.", + "bbox": [ + 75, + 595, + 468, + 700 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Evaluation on Scene and Object Reconstruction", + "text_level": 1, + "bbox": [ + 76, + 710, + 468, + 726 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Results on Replica We experimented on 8 Replica scenes, using the rendered trajectories provided in [31], with 2000 RGB-D frames in each scene. Tab. 2 shows the averaged quantitative reconstruction results in these Replica indoor sequences. For scene-level reconstruction, we compared with TSDF-Fusion [46], iMAP [31] and NICE-SLAM [48]. To isolate reconstruction, we also provided results for these baselines re-trained with ground-truth pose (marked with $*$ ), with their open-sourced code for the fair comparison. Specifically, iMAP* was implemented as a special case of vMAP, when considering the entire scene", + "bbox": [ + 75, + 734, + 468, + 900 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "as one object instance. For object-level reconstruction, we compared baselines trained with ground-truth pose.", + "bbox": [ + 498, + 558, + 888, + 588 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "vMAP's significant advantage thanks to object-level representation is to reconstruct tiny objects and objects with fine-grained details. Noticeably, vMAP achieved more than $50 - 70\\%$ improvement over iMAP and NICE-SLAM for object-level completion. The scene reconstructions of 4 selected Replica sequences are shown in Fig. 3, with interesting regions highlighted in coloured boxes. The quantitative results for 2D novel view rendering are further provided in the supplementary material.", + "bbox": [ + 496, + 589, + 890, + 724 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Results on ScanNet To evaluate on a more challenging setting, we experimented on ScanNet [3], a dataset composed of real scenes, with much noisier ground-truth depth maps and object masks. We choose a ScanNet sequence selected by ObjSDF [37], and we compared with TSDF-Fusion and ObjSDF for object-level reconstruction, and we compared with NICE-SLAM (re-trained with ground-truth pose) for scene-level reconstruction. Unlike ObjSDF, which was optimised from pre-selected posed images without depth for much longer off-line training, we ran both vMAP and TSDF-Fusion in an online setting with depth. As", + "bbox": [ + 496, + 734, + 892, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "957", + "bbox": [ + 485, + 945, + 509, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/e863167255c280c7952889e039ef2ad0b785edb9b06e5906579d211e933c372f.jpg", + "image_caption": [ + "Figure 6. Visualisation of scene reconstruction from TSDF-Fusion (left) and vMAP (right) in a selected TUM RGB-D sequence, trained in real time for 99 seconds." + ], + "image_footnote": [], + "bbox": [ + 86, + 90, + 467, + 223 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/7b2112da8c8339a2b1a99d0b09b3bb8bf0c359ec2dc9484e7b2e70a52e4661bf.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ATE RMSE [cm]↓iMAPNICE-SLAMvMAPORB-SLAM2
fr1/desk4.92.72.61.6
fr2.xyz2.01.81.60.4
fr3/office5.83.03.01.0
", + "bbox": [ + 81, + 292, + 460, + 358 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "shown in Fig. 4, we see that vMAP generates objects with more coherent geometry than TSDF-Fusion; and with much finer details than ObjSDF, though with a much shorter training time. And consistently, we can see that vMAP generates much sharper object boundaries and textures compared to NICE-SLAM, as shown in Fig. 5.", + "bbox": [ + 75, + 396, + 468, + 488 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Results on TUM RGB-D We evaluated on a TUM RGB-D sequence captured in the real-world, with object masks predicted by an off-the-shelf pre-trained instance segmentation network [47], and poses estimated by ORB-SLAM3 [2]. Since our object detector has no spatio-temporal consistency, we found that the same object can be occasionally detected as two different instances, which leads to some reconstruction artifacts. For example, the object ' globe' shown in Fig. 6 was also detected as 'balloon' in some frames, resulting the 'splitting' artifacts in the final object reconstruction. Overall, vMAP still predicts more coherent reconstruction for most objects in a scene, with realistic hole-filling capabilities compared to TSDF-Fusion. However, we acknowledge that the completion of complete out-of-view regions (e.g., the back of a chair) is beyond the reach of our system due to the lack of general 3D prior.", + "bbox": [ + 75, + 505, + 468, + 747 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Though our work focuses more on mapping performance than pose estimation, we also report ATE RMSE [30] in Tab. 3 following [31,48], by jointly optimising camera pose with map. We can observe that vMAP achieves superior performance, due to the fact that reconstruction and tracking quality are typically highly interdependent. However, there is a noticeable performance gap compared to ORBSLAM. As such, we directly choose ORB-SLAM as our external tracking system, which leads to faster training speed, cleaner implementation, and higher tracking quality.", + "bbox": [ + 75, + 750, + 468, + 901 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/1b340c53001a5cd2b483109310669dbbe01b840f94a625255a7df53bc5d227e8.jpg", + "image_caption": [ + "Figure 7. Visualisation of table-top reconstruction (top) and individual object reconstructions (bottom), from vMAP running in real time using an Azure Kinect RGB-D camera for 170 seconds." + ], + "image_footnote": [], + "bbox": [ + 504, + 89, + 888, + 284 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/64f453d5a3bdbf58a652417dda9e707bdcc4ba56cab0a15e4a939ddc124261f8.jpg", + "table_caption": [ + "Table 3. Camera tracking results on TUM RGB-D." + ], + "table_footnote": [], + "table_body": "
NICE-SLAM*iMAPvMAPvMAP (w/o BG)
Model Param. ↓12.12M0.32M0.66M0.56M
Runtime ↓34min34s12min29s8min16s6min01s
Mapping Time ↓845ms360ms226ms120ms
", + "bbox": [ + 504, + 353, + 887, + 417 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 4. vMAP is extremely memory-efficient and runs $1.5\\mathrm{x}$ and $4\\mathrm{x}$ faster than iMAP and NICE-SLAM respectively, with even higher performance gains without the background (BG) model.", + "bbox": [ + 498, + 428, + 890, + 470 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Results on Live Kinect Data Finally, we show the reconstruction result of vMAP on a table-top scene, from running in real-time with an Azure Kinect RGB-D camera. As shown in Fig. 7, vMAP is able to generate a range of realistic, watertight object meshes from different categories.", + "bbox": [ + 498, + 484, + 890, + 560 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3. Performance Analysis", + "text_level": 1, + "bbox": [ + 500, + 571, + 705, + 585 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In this section, we compare different training strategies and architectural design choices for our vMAP system. For simplicity, all experiments were done on the Replica Room-0 sequence, with our default training hyper-parameters.", + "bbox": [ + 498, + 595, + 890, + 656 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Memory and Runtime We compared memory usage and runtime with iMAP and NICE-SLAM in Tab. 4 and Fig. 9, all trained with ground-truth pose, and with the default training hyper-parameters listed in each method, for fair comparison. Specifically, we reported the Runtime for training the entire sequence, and Mapping Time for training each single frame, given the exact same hardware. We can observe that vMAP is highly memory efficient with less than 1M parameters. We want to highlight that vMAP achieves better reconstruction quality, and runs significantly faster ( $\\sim$ 5Hz) than iMAP and NICE-SLAM with 1.5x and 4x training speed improvement respectively.", + "bbox": [ + 496, + 664, + 892, + 845 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Vectorised v.s. Sequential Training We ablated training speed with vectorised and sequential operations (for loops), conditioned on different numbers of objects and different", + "bbox": [ + 498, + 854, + 893, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "958", + "bbox": [ + 485, + 945, + 511, + 955 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/7c401d551ce6d8f26fdfddb4ced3a26d30ad59eae3098e791383f566ff3cb60e.jpg", + "image_caption": [ + "Figure 8. Vectorised operation allows extremely fast training speed compared to standard sequential operations using for loops." + ], + "image_footnote": [], + "bbox": [ + 81, + 89, + 269, + 242 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/f7e64f3dc1b4a31e45229d4c55f0cdf16ad3e99589189dde6199a03081c9121a.jpg", + "image_caption": [ + "Figure 9. Object-level Reconstruction v.s. Model Param. (denoted by network hidden size). vMAP is more compact than iMAP, with the performance starting to saturate from hidden size 16." + ], + "image_footnote": [], + "bbox": [ + 276, + 89, + 464, + 244 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "sizes of object model. In Fig. 8, we can see that vectorised training enables tremendous improvements in optimisation speed, especially when we have a large number of objects. And with vectorised training, each optimisation step takes no more than $15\\mathrm{ms}$ even when we train as many as 200 objects. Additionally, vectorised training is also stable across a wide range of model sizes, suggesting that we can train our object models with an even larger size if required, with minimal additional training time. As expected, vectorised training and for loops will eventually have similar training speed, when we reach the hardware's memory limit.", + "bbox": [ + 75, + 295, + 467, + 460 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "To train multiple models in parallel, an initial approach we tried was spawning a process per object. However, we were only able to spawn a very limited number of processes, due to the per process CUDA memory overhead, which significantly limited the number of objects.", + "bbox": [ + 76, + 462, + 467, + 537 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Object Model Capacity As vectorised training has minimal effect on training speed in terms of object model design, we also investigated how the object-level reconstruction quality is affected by different object model sizes. We experimented with different object model sizes by varying the hidden size of each MLP layer. In Fig. 9, we can see that the object-level performance starts to saturate starting from hidden size 16, with minimal or no improvement by further increasing model sizes. This indicates that object-level representation is highly compressible, and can be efficiently and accurately parameterised by very few parameters.", + "bbox": [ + 75, + 545, + 467, + 712 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Stacked MLPs vs. Shared MLP Apart from representing each object by a single individual MLP, we also explored a shared MLP design by considering multi-object mapping as a multi-task learning problem [26, 33]. Here, each object is additionally associated with a learnable latent code, and this latent code is considered as an conditional input to the network, jointly optimised with the network weights. Though we have tried multiple multi-task learning architectures [12, 18], early experiments (denoted as vMAP-S in Fig. 9) showed that this shared MLP design achieved slightly degraded reconstruction quality and had no distinct training speed improvement compared to stacked", + "bbox": [ + 75, + 719, + 467, + 900 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/02860218b955d22345fa48a3b9991abca8f15895c9b30df086162795b18d2b0b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 90, + 692, + 196 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/07fa112510c756f6f16a7b78c975c9b13830cb89c4a27e74994d6e0871f9875c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 697, + 90, + 890, + 196 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "MLPs, particularly when powered by vectorised training. Furthermore, we found that shared MLP design can lead to undesired training properties: i) The shared MLP needs to be optimised along with the latent codes from all the objects, since the network weights and all object codes are entangled in a shared representation space. ii) The shared MLP capacity is fixed during training, and therefore the representation space might not be sufficient with an increasing number of objects. This accentuates the advantages of disentangled object representation space, which is a crucial design element of vMAP system.", + "bbox": [ + 496, + 263, + 890, + 431 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 444, + 617, + 459 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We have presented vMAP, a real-time object-level mapping system with simple and compact neural implicit representation. By decomposing the 3D scene into meaningful instances, represented by a batch of tiny separate MLPs, the system models the 3D scene in an efficient and flexible way, enabling scene re-composition, independent tracking and continually updating of objects of interest. In addition to more accurate and compact object-centric 3D reconstruction, our system is able to predict plausible watertight surfaces for each object, even under partial occlusion.", + "bbox": [ + 496, + 469, + 890, + 621 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Limitations and Future Work Our current system relies on an off-the-shelf detector for instance masks, which are not necessarily spatio-temporally consistent. Though the ambiguity is partially alleviated by data association and multi-view supervision, a reasonable global constraints will be better. As objects are modelled independently, dynamic objects can be continually tracked and reconstructed to enable downstream tasks, e.g., robotic manipulation [34]. To extend our system to a monocular dense mapping system, depth estimation networks [13, 41] or more efficient neural rendering approaches [19] could be further integrated.", + "bbox": [ + 496, + 627, + 890, + 792 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgements", + "text_level": 1, + "bbox": [ + 500, + 800, + 666, + 816 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Research presented in this paper has been supported by Dyson Technology Ltd. Xin Kong holds a China Scholarship Council-Imperial Scholarship. We are very grateful to Edgar Sucar, Binbin Xu, Hidenobu Matsuki and Anagh Malik for fruitful discussions.", + "bbox": [ + 496, + 825, + 890, + 898 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "959", + "bbox": [ + 485, + 945, + 509, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Jad Abou-Chakra, Feras Dayoub, and Niko Sunderhauf. Implicit object mapping with noisy data. arXiv preprint arXiv:2204.10516, 2022. 2", + "[2] Carlos Campos, Richard Elvira, Juan J Gomez Rodríguez, José MM Montiel, and Juan D Tardós. Orb-slam3: An accurate open-source library for visual, visual-inertial, and multimap slam. IEEE Transactions on Robotics (T-RO), 2021. 4, 7", + "[3] Angela Dai, Angel X Chang, Manolis Savva, Maciej Halber, Thomas Funkhouser, and Matthias Nießner. Scannet: Richly-annotated 3d reconstructions of indoor scenes. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2017. 4, 6", + "[4] Angela Dai, Matthias Nießner, Michael Zollhöfer, Shahram Izadi, and Christian Theobalt. Bundlefusion: Real-time globally consistent 3d reconstruction using on-the-fly surface reintegration. ACM Transactions on Graphics (ToG), 2017. 1", + "[5] Thomas Davies, Derek Nowrouzezahrai, and Alec Jacobson. On the effectiveness of weight-encoded neural implicit 3d shapes. In Proceedings of the International Conference on Machine Learning (ICML), 2021. 2", + "[6] Felix Endres, Jürgen Hess, Nikolas Engelhard, Jürgen Sturm, Daniel Cremers, and Wolfram Burgard. An Evaluation of the RGB-D SLAM System. In Proceedings of the IEEE International Conference on Robotics and Automation (ICRA), 2012. 4", + "[7] Agrim Gupta, Piotr Dollar, and Ross Girshick. Lvis: A dataset for large vocabulary instance segmentation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 4", + "[8] He Horace and Zou Richard. functorch: Jax-like composable function transforms for pytorch. https://github.com/pytorch/functorch, 2021.3", + "[9] Wonbong Jang and Lourdes Agapito. Codenerf: Disentangled neural radiance fields for object categories. In Proceedings of the International Conference on Computer Vision (ICCV), 2021. 2", + "[10] Xin Kong, Xuemeng Yang, Guangyao Zhai, Xiangrui Zhao, Xianfang Zeng, Mengmeng Wang, Yong Liu, Wanlong Li, and Feng Wen. Semantic graph based place recognition for 3d point clouds. In Proceedings of the IEEE/RSJ Conference on Intelligent Robots and Systems (IROS), 2020. 2", + "[11] Guanglin Li, Yifeng Li, Zhichao Ye, Qihang Zhang, Tao Kong, Zhaopeng Cui, and Guofeng Zhang. Generative category-level shape and pose estimation with semantic primitives. In Conference on Robot Learning (CoRL), 2022. 2", + "[12] Shikun Liu, Edward Johns, and Andrew J Davison. End-to-end multi-task learning with attention. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 8", + "[13] Xiaoyang Lyu, Liang Liu, Mengmeng Wang, Xin Kong, Lina Liu, Yong Liu, Xinxin Chen, and Yi Yuan. Hr-depth: High resolution self-supervised monocular depth estimation. In" + ], + "bbox": [ + 78, + 114, + 470, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Proceedings of the National Conference on Artificial Intelligence (AAAI), 2021. 8", + "[14] Alexander Majercik, Cyril Crassin, Peter Shirley, and Morgan McGuire. A ray-box intersection algorithm and efficient dynamic voxel rendering. Journal of Computer Graphics Techniques (JCGT), 2018. 4", + "[15] John McCormac, Ronald Clark, Michael Bloesch, Andrew Davison, and Stefan Leutenegger. Fusion++: Volumetric object-level slam. In Proceedings of the International Conference on 3D Vision (3DV), 2018. 2", + "[16] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3d reconstruction in function space. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2", + "[17] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. NeRF: Representing scenes as neural radiance fields for view synthesis. In Proceedings of the European Conference on Computer Vision (ECCV), 2020. 2", + "[18] Ishan Misra, Abhinav Shrivastava, Abhinav Gupta, and Martial Hebert. Cross-stitch networks for multi-task learning. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 8", + "[19] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Transactions on Graphics (ToG), 2022. 2, 8", + "[20] Michael Niemeyer and Andreas Geiger. Giraffe: Representing scenes as compositional generative neural feature fields. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 4", + "[21] Michael Oechsle, Songyou Peng, and Andreas Geiger. Unisurf: Unifying neural implicit surfaces and radiance fields for multi-view reconstruction. In Proceedings of the International Conference on Computer Vision (ICCV), 2021. 4", + "[22] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. DeepSDF: Learning continuous signed distance functions for shape representation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2", + "[23] Daniel Rebain, Wei Jiang, Soroosh Yazdani, Ke Li, Kwang Moo Yi, and Andrea Tagliasacchi. Derf: Decomposed radiance fields. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2", + "[24] Christian Reiser, Songyou Peng, Yiyi Liao, and Andreas Geiger. Kilonerf: Speeding up neural radiance fields with thousands of tiny mlp's. In Proceedings of the International Conference on Computer Vision (ICCV), 2021. 2, 3", + "[25] Antoni Rosinol, John J Leonard, and Luca Carlone. Nerf-slam: Real-time dense monocular slam with neural radiance fields. arXiv preprint arXiv:2210.13641, 2022. 2", + "[26] Sebastian Ruder. An overview of multi-task learning in deep neural networks. arXiv preprint arXiv:1706.05098, 2017. 8", + "[27] Martin Rünz and Lourdes Agapito. Co-fusion: Real-time segmentation, tracking and fusion of multiple objects." + ], + "bbox": [ + 503, + 92, + 890, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "960", + "bbox": [ + 485, + 945, + 511, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "In Proceedings of the IEEE International Conference on Robotics and Automation (ICRA), 2017. 2", + "[28] Renato F Salas-Moreno, Richard A Newcombe, Hauke Strasdat, Paul HJ Kelly, and Andrew J Davison. SLAM++: Simultaneous Localisation and Mapping at the Level of Objects. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2013. 2", + "[29] Julian Straub, Thomas Whelan, Lingni Ma, Yufan Chen, Erik Wijmans, Simon Green, Jakob J Engel, Raul Mur-Artal, Carl Ren, Shobhit Verma, et al. The replica dataset: A digital replica of indoor spaces. arXiv preprint arXiv:1906.05797, 2019.4", + "[30] J. Sturm, N. Engelhard, F. Endres, W. Burgard, and D. Cremers. A Benchmark for the Evaluation of RGB-D SLAM Systems. In Proceedings of the IEEE/RSJ Conference on Intelligent Robots and Systems (IROS), 2012. 7", + "[31] Edgar Sucar, Shikun Liu, Joseph Ortiz, and Andrew J. Davison. imap: Implicit mapping and positioning in real-time. In Proceedings of the International Conference on Computer Vision (ICCV), 2021. 1, 2, 6, 7", + "[32] Edgar Sucar, Kentaro Wada, and Andrew Davison. NodeSLAM: Neural object descriptors for multi-view shape reconstruction. In Proceedings of the International Conference on 3D Vision (3DV), 2020. 2", + "[33] Simon Vandenhende, Stamatios Georgoulis, Wouter Van Gansbeke, Marc Proesmans, Dengxin Dai, and Luc Van Gool. Multi-task learning for dense prediction tasks: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence (PAMI), 2021. 8", + "[34] Kentaro Wada, Edgar Sucar, Stephen James, Daniel Lenton, and Andrew J Davison. Morefusion: Multi-object reasoning for 6d pose estimation from volumetric fusion. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 8", + "[35] Jingwen Wang, Tymoteusz Bleja, and Lourdes Agapito. Gosurf: Neural feature grid optimization for fast, high-fidelity rgb-d surface reconstruction. In Proceedings of the International Conference on 3D Vision (3DV), 2022. 2", + "[36] Jingwen Wang, Martin Rünz, and Lourdes Agapito. Dsp-slam: object oriented slam with deep shape priors. In 2021 International Conference on 3D Vision (3DV), 2021. 2", + "[37] Qianyi Wu, Xian Liu, Yuedong Chen, Kejie Li, Chuanxia Zheng, Jianfei Cai, and Jianmin Zheng. Object-compositional neural implicit surfaces. In Proceedings of the European Conference on Computer Vision (ECCV), 2022. 2, 6", + "[38] Binbin Xu, Wenbin Li, Dimos Tzoumanikas, Michael Bloesch, Andrew Davison, and Stefan Leutenegger. MID-Fusion: Octree-based object-level multi-instance dynamic slam. In Proceedings of the IEEE International Conference on Robotics and Automation (ICRA), 2019. 2", + "[39] Bangbang Yang, Yinda Zhang, Yinghao Xu, Yijin Li, Han Zhou, Hujun Bao, Guofeng Zhang, and Zhaopeng Cui. Learning object-compositional neural radiance field for ed-itable scene rendering. In Proceedings of the International Conference on Computer Vision (ICCV), 2021. 2", + "[40] Xingrui Yang, Hai Li, Hongjia Zhai, Yuhang Ming, Yuqian Liu, and Guofeng Zhang. Vox-Fusion: Dense tracking and" + ], + "bbox": [ + 78, + 90, + 468, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "mapping with voxel-based neural implicit representation. In Proceedings of the International Symposium on Mixed and Augmented Reality (ISMAR), 2022. 2", + "[41] Zehao Yu, Songyou Peng, Michael Niemeyer, Torsten Sattler, and Andreas Geiger. Monosdf: Exploring monocular geometric cues for neural implicit surface reconstruction. Advances in Neural Information Processing Systems (NeurIPS), 2022. 8", + "[42] Yu-Jie Yuan, Yang-Tian Sun, Yu-Kun Lai, Yuewen Ma, Rongfei Jia, and Lin Gao. Nerf-editing: geometry editing of neural radiance fields. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 4", + "[43] Shuaifeng Zhi, Michael Bloesch, Stefan Leutenegger, and Andrew J Davison. SceneCode: Monocular dense semantic reconstruction using learned encoded scene representations. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2", + "[44] Shuaifeng Zhi, Edgar Sucar, Andre Mouton, Iain Haughton, Tristan Laidlow, and Andrew J Davison. ilabel: Revealing objects in neural fields. IEEE Robotics and Automation Letters (RA-L), 2022. 2", + "[45] Xingguang Zhong, Yue Pan, Jens Behley, and Cyril Stachniss. Shine-mapping: Large-scale 3d mapping using sparse hierarchical implicit neural representations. In Proceedings of the IEEE International Conference on Robotics and Automation (ICRA), 2023. 2", + "[46] Qian-Yi Zhou and Vladlen Koltun. Dense scene reconstruction with points of interest. ACM Transactions on Graphics (ToG), 2013. 1, 6", + "[47] Xingyi Zhou, Rohit Girdhar, Armand Joulin, Philipp Krahenbuhl, and Ishan Misra. Detecting twenty-thousand classes using image-level supervision. In Proceedings of the European Conference on Computer Vision (ECCV), 2022. 4, 7", + "[48] Zihan Zhu, Songyou Peng, Viktor Larsson, Weiwei Xu, Hujun Bao, Zhaopeng Cui, Martin R Oswald, and Marc Pollefeys. Nice-slam: Neural implicit scalable encoding for slam. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2, 6, 7" + ], + "bbox": [ + 501, + 92, + 890, + 656 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "961", + "bbox": [ + 486, + 945, + 509, + 955 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2023/vMAP_ Vectorised Object Mapping for Neural Field SLAM/ff44bc7e-edea-45f5-9989-5020c0b824b0_model.json b/2023/vMAP_ Vectorised Object Mapping for Neural Field SLAM/ff44bc7e-edea-45f5-9989-5020c0b824b0_model.json new file mode 100644 index 0000000000000000000000000000000000000000..b04f2b54297b3e13b8d107b2a423270773e70c66 --- /dev/null +++ b/2023/vMAP_ Vectorised Object Mapping for Neural Field SLAM/ff44bc7e-edea-45f5-9989-5020c0b824b0_model.json @@ -0,0 +1,2189 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.808, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.186, + 0.131, + 0.784, + 0.152 + ], + "angle": 0, + "content": "vMAP: Vectorised Object Mapping for Neural Field SLAM" + }, + { + "type": "text", + "bbox": [ + 0.236, + 0.181, + 0.734, + 0.218 + ], + "angle": 0, + "content": "Xin Kong Shikun Liu Marwan Taher Andrew J. Davison Dyson Robotics Lab, Imperial College London" + }, + { + "type": "text", + "bbox": [ + 0.223, + 0.22, + 0.742, + 0.234 + ], + "angle": 0, + "content": "{x.kong21, shikun.liu17, m.taher, a.davison}@imperial.ac.uk" + }, + { + "type": "image", + "bbox": [ + 0.082, + 0.27, + 0.895, + 0.407 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.418, + 0.893, + 0.488 + ], + "angle": 0, + "content": "Figure 1. vMAP automatically builds an object-level scene model from a real-time RGB-D input stream. Each object is represented by a separate MLP neural field model, all optimised in parallel via vectorised training. We use no 3D shape priors, but the MLP representation encourages object reconstruction to be watertight and complete, even when objects are partially observed or are heavily occluded in the input images. See for instance the separate reconstructions of the armchairs, sofas and cushions, which were mutually occluding each other, in this example from Replica." + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.498, + 0.314, + 0.515 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.53, + 0.47, + 0.59 + ], + "angle": 0, + "content": "We present vMAP, an object-level dense SLAM system using neural field representations. Each object is represented by a small MLP, enabling efficient, watertight object modelling without the need for 3D priors." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.591, + 0.471, + 0.742 + ], + "angle": 0, + "content": "As an RGB-D camera browses a scene with no prior information, vMAP detects object instances on-the-fly, and dynamically adds them to its map. Specifically, thanks to the power of vectorised training, vMAP can optimise as many as 50 individual objects in a single scene, with an extremely efficient training speed of \\(5\\mathrm{Hz}\\) map update. We experimentally demonstrate significantly improved scene-level and object-level reconstruction quality compared to prior neural field SLAM systems. Project page: https://kxhit.github.io/vMAP." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.755, + 0.21, + 0.77 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.78, + 0.47, + 0.855 + ], + "angle": 0, + "content": "For robotics and other interactive vision applications, an object-level model is arguably semantically optimal, with scene entities represented in a separated, composable way, but also efficiently focusing resources on what is important in an environment." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.856, + 0.471, + 0.901 + ], + "angle": 0, + "content": "The key question in building an object-level mapping system is what level of prior information is known about the objects in a scene in order to segment, classify and re" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.5, + 0.893, + 0.606 + ], + "angle": 0, + "content": "construct them. If no 3D object priors are available, then usually only the directly observed parts of objects can be reconstructed, leading to holes and missing parts [4, 46]. Prior object information such as CAD models or category-level shape space models enable full object shape estimation from partial views, but only for the subset of objects in a scene for which these models are available." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.61, + 0.895, + 0.777 + ], + "angle": 0, + "content": "In this paper, we present a new approach which applies to the case where no 3D priors are available but still often enables watertight object reconstruction in realistic real-time scene scanning. Our system, vMAP, builds on the attractive properties shown by neural fields as a real-time scene representation [31], with efficient and complete representation of shape, but now reconstructs a separate tiny MLP model of each object. The key technical contribution of our work is to show that a large number of separate MLP object models can be simultaneously and efficiently optimised on a single GPU during live operation via vectorised training." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.78, + 0.895, + 0.902 + ], + "angle": 0, + "content": "We show that we can achieve much more accurate and complete scene reconstruction by separately modelling objects, compared with using a similar number of weights in a single neural field model of the whole scene. Our real-time system is highly efficient in terms of both computation and memory, and we show that scenes with up to 50 objects can be mapped with 40KB per object of learned parameters across the multiple, independent object networks." + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.946, + 0.511, + 0.957 + ], + "angle": 0, + "content": "952" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.471, + 0.182 + ], + "angle": 0, + "content": "We also demonstrate the flexibility of our disentangled object representation to enable recomposition of scenes with new object configurations. Extensive experiments have been conducted on both simulated and real-world datasets, showing state-of-the-art scene-level and object-level reconstruction performance." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.196, + 0.22, + 0.212 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.222, + 0.47, + 0.358 + ], + "angle": 0, + "content": "This work follows in long series of efforts to build real-time scene representations which are decomposed into explicit rigid objects, with the promise of flexible and efficient scene representation and even the possibility to represent changing scenes. Different systems assumed varying types of representation and levels of prior knowledge, from CAD models [28], via category-level shape models [10, 11, 32, 36] to no prior shape knowledge, although in this case only the visible parts of objects could be reconstructed [15, 27, 38]." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.364, + 0.471, + 0.622 + ], + "angle": 0, + "content": "Neural Field SLAM Neural fields have recently been widely used as efficient, accurate and flexible representations of whole scenes [16, 17, 19, 22]. To adopt these representations into real-time SLAM systems, iMAP [31] demonstrated for the first time that a simple MLP network, incrementally trained with the aid of depth measurements from RGB-D sensors, can represent room-scaled 3D scenes in real-time. Some of iMAP's most interesting properties were its tendency to produce watertight reconstructions, even often plausibly completing the unobserved back of objects. These coherence properties of neural fields were particularly revealed when semantic output channels were added, as in SemanticNeRF [43] and iLabel [44], and were found to inherit the coherence. To make implicit representation more scalable and efficient, a group of implicit SLAM systems [25, 35, 40, 45, 48] fused neural fields with conventional volumetric representations." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.629, + 0.47, + 0.81 + ], + "angle": 0, + "content": "Object Representations with Neural Fields However, obtaining individual object representations from these neural field methods is difficult, as the correspondences between network parameters and specific scene regions are complicated and difficult to determine. To tackle this, DeRF [23] decomposed a scene spatially and dedicated smaller networks to each decomposed part. Similarly, KiloNeRF [24] divided a scene into thousands of volumetric parts, each represented by a tiny MLP, and trained them in parallel with custom CUDA kernels to speed up NeRF. Different from KiloNeRF, vMAP decomposes the scene into objects which are semantically meaningful." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.811, + 0.471, + 0.903 + ], + "angle": 0, + "content": "To represent multiple objects, ObjectNeRF [39] and ObjSDF [37] took pre-computed instance masks as additional input and conditioned object representation on learnable object activation code. But these methods are still trained offline and tangle object representations with the main scene network, so that they need to optimise the network weights" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.167 + ], + "angle": 0, + "content": "with all object codes during training, and infer the whole network to get the shape of a desired object. This contrasts with vMAP which models objects individually, and is able to stop and resume training for any objects without any inter-object interference." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.168, + 0.893, + 0.363 + ], + "angle": 0, + "content": "The recent work most similar to ours has used the attractive properties of neural field MLPs to represent single objects. The analysis in [5] explicitly evaluated the use of over-fit neural implicit networks as a 3D shape representation for graphics, considering that they should be taken seriously. The work in [1] furthered this analysis, showing how object representation was affected by different observation conditions, though using the hybrid Instant NGP rather than a single MLP representation, so it is not clear whether some object coherence properties would be lost. Finally, the CodeNeRF system [9] trained a NeRF conditioned on learnable object codes, again proving the attractive properties of neural fields to represent single objects." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.364, + 0.892, + 0.439 + ], + "angle": 0, + "content": "We build on this work in our paper, but for the first time show that many individual neural field models making up a whole scene can be simultaneously trained within a real-time system, resulting in accurate and efficient representation of many-object scenes." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.453, + 0.892, + 0.489 + ], + "angle": 0, + "content": "3. vMAP: An Efficient Object Mapping System with Vectorised Training" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.497, + 0.671, + 0.513 + ], + "angle": 0, + "content": "3.1. System Overview" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.52, + 0.892, + 0.627 + ], + "angle": 0, + "content": "We first introduce our detailed design for object-level mapping with efficient vectorised training (Section 3.2), and then explain our improved training strategies of pixel sampling and surface rendering (Section 3.3). Finally, we show how we may recompose and render a new scene with these learned object models (Section 3.4). An overview of our training and rendering pipeline is shown in Fig. 2." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.636, + 0.795, + 0.653 + ], + "angle": 0, + "content": "3.2. Vectorised Object Level Mapping" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.659, + 0.893, + 0.901 + ], + "angle": 0, + "content": "Object Initialisation and Association To start with, each frame is associated with densely labelled object masks. These object masks are either directly provided in the dataset, or predicted with an off-the-shelf 2D instance segmentation network. Since those predicted object masks have no temporal consistency across different frames, we perform object association between the previous and the current live frame, based on two criteria: i) Semantic Consistency: the object in the current frame is predicted as the same semantic class from the previous frame, and ii) Spatial Consistency: the object in the current frame is spatially close to the object in the previous frames, measured by the mean IoU of their 3D object bounds. When these two criteria are satisfied, we assume they are the same object instance and represent them with the same object model. Otherwise, they are different object instances and we initialise" + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.946, + 0.511, + 0.957 + ], + "angle": 0, + "content": "953" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.081, + 0.096, + 0.89, + 0.439 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.283, + 0.448, + 0.685, + 0.463 + ], + "angle": 0, + "content": "Figure 2. An overview of training and rendering pipeline of vMAP." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.475, + 0.435, + 0.49 + ], + "angle": 0, + "content": "a new object model and append it to the models stack." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.491, + 0.47, + 0.642 + ], + "angle": 0, + "content": "For each object in a frame, we estimate its 3D object bound by its 3D point cloud, parameterised by its depth map and the camera pose. Camera tracking is externally provided by an off-the-shelf tracking system, which we found to be more accurate and robust compared to jointly optimising pose and geometry. If we detect the same object instance in a new frame, we merge its 3D point cloud from the previous frames to the current frame and re-estimate its 3D object bound. Therefore, these object bounds are dynamically updated and refined with more observations." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.65, + 0.47, + 0.741 + ], + "angle": 0, + "content": "Object Supervision We apply object-level supervision only for pixels inside a 2D object bounding box, for maximal training efficiency. For those pixels within an object mask, we encourage the object radiance field to be occupied and supervise them with depth and colour loss. Otherwise we encourage the object radiance field to be empty." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.741, + 0.47, + 0.802 + ], + "angle": 0, + "content": "Each object instance samples training pixels from its own independent keyframe buffer. Therefore, we have flexibility to stop or resume the training of any object, with no training interference between objects." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.81, + 0.471, + 0.902 + ], + "angle": 0, + "content": "Vectorised Training Representing a neural field with multiple small networks can lead to efficient training, as shown in prior work [24]. In vMAP, all object models are of the same design, except for the background object which we represent with a slightly larger network. Therefore, we are able to stack these small object models together for vec" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.475, + 0.892, + 0.581 + ], + "angle": 0, + "content": "torised training, leveraging the highly optimised vectorised operations in PyTorch [8]. Since multiple object models are batched and trained simultaneously as opposed to sequentially, we optimise the use of the available GPU resources. We show that vectorised training is an essential design element to the system, resulting in significantly improved training speed, further discussed in Section 4.3." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.588, + 0.73, + 0.605 + ], + "angle": 0, + "content": "3.3. Neural Implicit Mapping" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.611, + 0.892, + 0.839 + ], + "angle": 0, + "content": "Depth Guided Sampling Neural fields trained on RGB data only have no guarantee to model accurate object geometry, due to the fact that they are optimising for appearance rather than the geometry. To obtain more geometrically accurate object models, we benefit from the depth map available from an RGB-D sensor, providing a strong prior for learning the density field of 3D volumes. Specifically, we sample \\( N_{s} \\) and \\( N_{c} \\) points along each ray, for which \\( N_{s} \\) points are sampled with a Normal distribution centered around the surface \\( t_{s} \\) (from the depth map), with a small \\( d_{\\sigma} \\) variance, and \\( N_{c} \\) points are uniformly sampled between the camera \\( t_{n} \\) (the near bound) and the surface \\( t_{s} \\), with a stratified sampling approach. When the depth measurement is invalid, the surface \\( t_{s} \\) is then replaced with the far bound \\( t_{f} \\). Mathematically, we have:" + }, + { + "type": "equation", + "bbox": [ + 0.51, + 0.845, + 0.892, + 0.878 + ], + "angle": 0, + "content": "\\[\nt _ {i} \\sim \\mathcal {U} \\left(t _ {n} + \\frac {i - 1}{N _ {c}} \\left(t _ {s} - t _ {n}\\right), t _ {n} + \\frac {i}{N _ {c}} \\left(t _ {s} - t _ {n}\\right)\\right), \\tag {1}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.512, + 0.88, + 0.892, + 0.898 + ], + "angle": 0, + "content": "\\[\nt _ {i} \\sim \\mathcal {N} \\left(t _ {s}, d _ {\\sigma} ^ {2}\\right). \\tag {2}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.945, + 0.512, + 0.957 + ], + "angle": 0, + "content": "954" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.152 + ], + "angle": 0, + "content": "We choose \\( d_{\\sigma} = 3cm \\) which works well in our implementation. We observe that training more points near the surface helps to guide the object models to quickly focus on representing accurate object geometry." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.16, + 0.473, + 0.34 + ], + "angle": 0, + "content": "Surface and Volume Rendering As we are concerned more by 3D surface reconstruction than 2D rendering, we omit the viewing direction from the network input, and model object visibility with a binary indicator (no transparent objects). With similar motivation to UniSURF [21], we parameterise the occupancy probability of a 3D point \\( x_{i} \\) as \\( o_{\\theta}(x_i)\\rightarrow [0,1] \\), where \\( o_{\\theta} \\) is a continuous occupancy field. Therefore, the termination probability at point \\( x_{i} \\) along ray \\( \\mathbf{r} \\) becomes \\( T_{i} = o(x_{i})\\prod_{j < i}(1 - o(x_{j})) \\), indicating that no occupied samples \\( x_{j} \\) with \\( j < i \\) exist before \\( x_{i} \\). The corresponding rendered occupancy, depth and colour are defined as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.088, + 0.35, + 0.47, + 0.392 + ], + "angle": 0, + "content": "\\[\n\\hat {O} (\\mathbf {r}) = \\sum_ {i = 1} ^ {N} T _ {i}, \\hat {D} (\\mathbf {r}) = \\sum_ {i = 1} ^ {N} T _ {i} d _ {i}, \\hat {C} (\\mathbf {r}) = \\sum_ {i = 1} ^ {N} T _ {i} c _ {i}. \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.402, + 0.47, + 0.493 + ], + "angle": 0, + "content": "Training Objective For each object \\( k \\), we only sample training pixels inside that object's 2D bounding box, denoted by \\( \\mathcal{R}^k \\), and only optimise depth and colour for pixels inside its 2D object mask, denoted by \\( M^k \\). Note that it is always true that \\( M^k \\subset \\mathcal{R}^k \\). The depth, colour and occupancy loss for the object \\( k \\) are defined as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.16, + 0.505, + 0.47, + 0.539 + ], + "angle": 0, + "content": "\\[\nL _ {d e p t h} ^ {k} = M ^ {k} \\odot \\sum_ {\\mathbf {r} \\in R ^ {k}} | \\hat {D} (\\mathbf {r}) - D (\\mathbf {r}) |, \\tag {4}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.157, + 0.541, + 0.47, + 0.574 + ], + "angle": 0, + "content": "\\[\nL _ {\\text {c o l o u r}} ^ {k} = M ^ {k} \\odot \\sum_ {\\mathbf {r} \\in R ^ {k}} | \\hat {C} (\\mathbf {r}) - C (\\mathbf {r}) |, \\tag {5}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.133, + 0.577, + 0.469, + 0.611 + ], + "angle": 0, + "content": "\\[\nL _ {\\text {o c c u p a n c y}} ^ {k} = \\sum_ {\\mathbf {r} \\in R ^ {k}} | \\hat {O} (\\mathbf {r}) - M ^ {k} (\\mathbf {r}) |. \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.622, + 0.47, + 0.652 + ], + "angle": 0, + "content": "The overall training objective then accumulates losses for all \\(K\\) objects:" + }, + { + "type": "equation", + "bbox": [ + 0.096, + 0.663, + 0.47, + 0.705 + ], + "angle": 0, + "content": "\\[\nL = \\sum_ {k = 1} ^ {K} L _ {\\text {d e p t h}} ^ {k} + \\lambda_ {1} \\cdot L _ {\\text {c o l o u r}} ^ {k} + \\lambda_ {2} \\cdot L _ {\\text {o c c u p a n c y}} ^ {k}. \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.716, + 0.47, + 0.747 + ], + "angle": 0, + "content": "We choose loss weightings \\(\\lambda_1 = 5\\) and \\(\\lambda_{2} = 10\\), which we found to work well in our experiments." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.757, + 0.36, + 0.773 + ], + "angle": 0, + "content": "3.4. Compositional Scene Rendering" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.781, + 0.471, + 0.902 + ], + "angle": 0, + "content": "Since vMAP represents objects in a purely disentangled representation space, we can obtain each 3D object by querying within its estimated 3D object bounds and easily manipulate it. For 2D novel view synthesis, we use the Ray-Box Intersection algorithm [14] to calculate near and far bounds for each object, and then rank rendered depths along each ray to achieve occlusion-aware scene-level rendering. This disentangled representation also opens up other types" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.152 + ], + "angle": 0, + "content": "of fine-grained object-level manipulation, such as changing object shape or textures by conditioning on disentangled pre-trained feature fields [20, 42], which we consider as an interesting future direction." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.162, + 0.633, + 0.18 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.187, + 0.892, + 0.324 + ], + "angle": 0, + "content": "We have comprehensively evaluated vMAP on a range of different datasets, which include both simulated and real-world sequences, with and without ground-truth object masks and poses. For all datasets, we qualitatively compare our system to prior state-of-the-art SLAM frameworks on 2D and 3D scene-level and object-level rendering. We further quantitatively compare these systems in datasets where ground-truth meshes are available. Please see our attached supplementary material for more results." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.333, + 0.691, + 0.35 + ], + "angle": 0, + "content": "4.1. Experimental Setup" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.356, + 0.892, + 0.462 + ], + "angle": 0, + "content": "Datasets We evaluated on Replica [29], ScanNet [3], and TUM RGB-D [6]. Each dataset contains sequences with different levels of quality in object masks, depth and pose measurements. Additionally, we also showed vMAP's performance in complex real-world with self-captured video sequences recorded by an Azure Kinect RGB-D camera. An overview of these datasets is shown in Tab. 1." + }, + { + "type": "table", + "bbox": [ + 0.502, + 0.47, + 0.892, + 0.549 + ], + "angle": 0, + "content": "
Object MasksDepth QualityPose Estimation
ReplicaPerfect GTPerfect GTPerfect GT
ScanNetNoisyNoisyPerfect GT
TUM RGB-DDeticNoisyORB-SLAM3
Our RecordingDeticNoisyORB-SLAM3
" + }, + { + "type": "table_caption", + "bbox": [ + 0.555, + 0.559, + 0.836, + 0.572 + ], + "angle": 0, + "content": "Table 1. An overview of datasets we evaluated." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.578, + 0.892, + 0.654 + ], + "angle": 0, + "content": "Datasets with perfect ground-truth information represent the upper-bound performance of our system. We expect vMAP's performance in the real-world setting can be further improved, when coupled with a better instance segmentation and pose estimation framework." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.659, + 0.892, + 0.795 + ], + "angle": 0, + "content": "Implementation Details We conduct all experiments on a desktop PC with a 3.60 GHz i7-11700K CPU and a single Nvidia RTX 3090 GPU. We choose our instance segmentation detector to be Detic [47], pre-trained on an open-vocabulary LVIS dataset [7] which contains more than 1000 object classes. We choose our pose estimation framework to be ORB-SLAM3 [2], for its fast and accurate tracking performance. We continuously update the keyframe poses using the latest estimates from ORB-SLAM3." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.796, + 0.892, + 0.901 + ], + "angle": 0, + "content": "We applied the same set of hyper-parameters for all datasets. Both our object and background model use 4-layer MLPs, with each layer having hidden size 32 (object) and 128 (background). For object / background, we selected keyframes every \\(25/50\\) frames, \\(120/1200\\) rays each training step with 10 points per ray. The number of objects in a scene typically varies between 20 and 70, among which the" + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.945, + 0.511, + 0.957 + ], + "angle": 0, + "content": "955" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.08, + 0.089, + 0.891, + 0.201 + ], + "angle": 0, + "content": "
TSDF-Fusion*iMAPiMAP*NICE-SLAMNICE-SLAM*vMAP
Scene Acc. [cm] ↓1.284.432.152.943.043.20
Scene Comp. [cm] ↓5.615.562.884.023.842.39
Scene Comp. Ratio [<5cm %] ↑82.6779.0690.8586.7386.5292.99
Object Acc. [cm] ↓0.45-3.57-3.912.23
Object Comp. [cm] ↓3.69-2.38-3.271.44
Object Comp. Ratio [<5cm %] ↑82.98-90.19-83.9794.55
Object Comp. Ratio [<1cm %] ↑61.70-47.79-37.7969.23
" + }, + { + "type": "table_caption", + "bbox": [ + 0.091, + 0.204, + 0.878, + 0.219 + ], + "angle": 0, + "content": "Table 2. Averaged reconstruction results for 8 indoor Replica scenes. * represents the baselines we re-trained with ground-truth pose." + }, + { + "type": "image", + "bbox": [ + 0.097, + 0.224, + 0.897, + 0.838 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.843, + 0.895, + 0.872 + ], + "angle": 0, + "content": "Figure 3. Scene reconstruction for 4 selected Replica scenes. Interesting regions are highlighted with coloured boxes, showing vMAP's significantly improved reconstruction quality. All scene meshes are provided by the original authors." + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.946, + 0.512, + 0.957 + ], + "angle": 0, + "content": "956" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.099, + 0.088, + 0.195, + 0.101 + ], + "angle": 0, + "content": "TSDF-Fusion" + }, + { + "type": "image", + "bbox": [ + 0.096, + 0.108, + 0.203, + 0.185 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.258, + 0.089, + 0.315, + 0.102 + ], + "angle": 0, + "content": "ObjSDF" + }, + { + "type": "image", + "bbox": [ + 0.235, + 0.109, + 0.338, + 0.184 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.401, + 0.089, + 0.446, + 0.101 + ], + "angle": 0, + "content": "vMAP" + }, + { + "type": "image", + "bbox": [ + 0.371, + 0.108, + 0.485, + 0.185 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.514, + 0.089, + 0.605, + 0.101 + ], + "angle": 0, + "content": "TSDF-Fusion" + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.112, + 0.615, + 0.181 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.668, + 0.089, + 0.725, + 0.102 + ], + "angle": 0, + "content": "ObjSDF" + }, + { + "type": "image", + "bbox": [ + 0.637, + 0.122, + 0.753, + 0.184 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.811, + 0.089, + 0.856, + 0.101 + ], + "angle": 0, + "content": "vMAP" + }, + { + "type": "image", + "bbox": [ + 0.778, + 0.111, + 0.888, + 0.182 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.096, + 0.207, + 0.194, + 0.273 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.234, + 0.208, + 0.331, + 0.273 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.37, + 0.206, + 0.468, + 0.273 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.495, + 0.204, + 0.596, + 0.272 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.646, + 0.207, + 0.73, + 0.272 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.778, + 0.204, + 0.869, + 0.271 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.288, + 0.89, + 0.317 + ], + "angle": 0, + "content": "Figure 4. Visualisation of object reconstructions with vMAP compared to TSDF-Fusion and ObjSDF. Note that all object reconstructions from ObjSDF require much longer off-line training. All object meshes from ObjSDF are provided by the original authors." + }, + { + "type": "image_caption", + "bbox": [ + 0.244, + 0.326, + 0.332, + 0.338 + ], + "angle": 0, + "content": "NICE-SLAM*" + }, + { + "type": "image", + "bbox": [ + 0.092, + 0.34, + 0.496, + 0.505 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.683, + 0.327, + 0.725, + 0.338 + ], + "angle": 0, + "content": "vMAP" + }, + { + "type": "image", + "bbox": [ + 0.507, + 0.34, + 0.911, + 0.505 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.514, + 0.89, + 0.543 + ], + "angle": 0, + "content": "Figure 5. Visualisation of scene reconstruction from NICE-SLAM* (left) and vMAP (right) in a selected ScanNet sequence. Interesting regions are zoomed in. NICE-SLAM* was re-trained with ground-truth poses." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.558, + 0.469, + 0.589 + ], + "angle": 0, + "content": "largest number of objects are in Replica and ScanNet scenes with an average of 50 objects per scene." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.596, + 0.469, + 0.702 + ], + "angle": 0, + "content": "Metrics Following the convention of prior work [31, 48], we adopt Accuracy, Completion, and Completion Ratio for 3D scene-level reconstruction metrics. Besides, we note that such scene-level metrics are heavily biased towards the reconstruction of large objects like walls and floors. Therefore, we additionally provide these metrics at the object-level, by averaging metrics for all objects in each scene." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.712, + 0.469, + 0.727 + ], + "angle": 0, + "content": "4.2. Evaluation on Scene and Object Reconstruction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.735, + 0.469, + 0.901 + ], + "angle": 0, + "content": "Results on Replica We experimented on 8 Replica scenes, using the rendered trajectories provided in [31], with 2000 RGB-D frames in each scene. Tab. 2 shows the averaged quantitative reconstruction results in these Replica indoor sequences. For scene-level reconstruction, we compared with TSDF-Fusion [46], iMAP [31] and NICE-SLAM [48]. To isolate reconstruction, we also provided results for these baselines re-trained with ground-truth pose (marked with \\(*\\)), with their open-sourced code for the fair comparison. Specifically, iMAP* was implemented as a special case of vMAP, when considering the entire scene" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.559, + 0.89, + 0.589 + ], + "angle": 0, + "content": "as one object instance. For object-level reconstruction, we compared baselines trained with ground-truth pose." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.59, + 0.892, + 0.726 + ], + "angle": 0, + "content": "vMAP's significant advantage thanks to object-level representation is to reconstruct tiny objects and objects with fine-grained details. Noticeably, vMAP achieved more than \\(50 - 70\\%\\) improvement over iMAP and NICE-SLAM for object-level completion. The scene reconstructions of 4 selected Replica sequences are shown in Fig. 3, with interesting regions highlighted in coloured boxes. The quantitative results for 2D novel view rendering are further provided in the supplementary material." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.735, + 0.893, + 0.901 + ], + "angle": 0, + "content": "Results on ScanNet To evaluate on a more challenging setting, we experimented on ScanNet [3], a dataset composed of real scenes, with much noisier ground-truth depth maps and object masks. We choose a ScanNet sequence selected by ObjSDF [37], and we compared with TSDF-Fusion and ObjSDF for object-level reconstruction, and we compared with NICE-SLAM (re-trained with ground-truth pose) for scene-level reconstruction. Unlike ObjSDF, which was optimised from pre-selected posed images without depth for much longer off-line training, we ran both vMAP and TSDF-Fusion in an online setting with depth. As" + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.946, + 0.511, + 0.957 + ], + "angle": 0, + "content": "957" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.088, + 0.092, + 0.468, + 0.224 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.077, + 0.234, + 0.47, + 0.276 + ], + "angle": 0, + "content": "Figure 6. Visualisation of scene reconstruction from TSDF-Fusion (left) and vMAP (right) in a selected TUM RGB-D sequence, trained in real time for 99 seconds." + }, + { + "type": "table", + "bbox": [ + 0.083, + 0.294, + 0.462, + 0.359 + ], + "angle": 0, + "content": "
ATE RMSE [cm]↓iMAPNICE-SLAMvMAPORB-SLAM2
fr1/desk4.92.72.61.6
fr2.xyz2.01.81.60.4
fr3/office5.83.03.01.0
" + }, + { + "type": "table_caption", + "bbox": [ + 0.122, + 0.365, + 0.424, + 0.38 + ], + "angle": 0, + "content": "Table 3. Camera tracking results on TUM RGB-D." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.397, + 0.47, + 0.489 + ], + "angle": 0, + "content": "shown in Fig. 4, we see that vMAP generates objects with more coherent geometry than TSDF-Fusion; and with much finer details than ObjSDF, though with a much shorter training time. And consistently, we can see that vMAP generates much sharper object boundaries and textures compared to NICE-SLAM, as shown in Fig. 5." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.506, + 0.469, + 0.748 + ], + "angle": 0, + "content": "Results on TUM RGB-D We evaluated on a TUM RGB-D sequence captured in the real-world, with object masks predicted by an off-the-shelf pre-trained instance segmentation network [47], and poses estimated by ORB-SLAM3 [2]. Since our object detector has no spatio-temporal consistency, we found that the same object can be occasionally detected as two different instances, which leads to some reconstruction artifacts. For example, the object ' globe' shown in Fig. 6 was also detected as 'balloon' in some frames, resulting the 'splitting' artifacts in the final object reconstruction. Overall, vMAP still predicts more coherent reconstruction for most objects in a scene, with realistic hole-filling capabilities compared to TSDF-Fusion. However, we acknowledge that the completion of complete out-of-view regions (e.g., the back of a chair) is beyond the reach of our system due to the lack of general 3D prior." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.75, + 0.47, + 0.902 + ], + "angle": 0, + "content": "Though our work focuses more on mapping performance than pose estimation, we also report ATE RMSE [30] in Tab. 3 following [31,48], by jointly optimising camera pose with map. We can observe that vMAP achieves superior performance, due to the fact that reconstruction and tracking quality are typically highly interdependent. However, there is a noticeable performance gap compared to ORBSLAM. As such, we directly choose ORB-SLAM as our external tracking system, which leads to faster training speed, cleaner implementation, and higher tracking quality." + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.09, + 0.889, + 0.285 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.297, + 0.892, + 0.34 + ], + "angle": 0, + "content": "Figure 7. Visualisation of table-top reconstruction (top) and individual object reconstructions (bottom), from vMAP running in real time using an Azure Kinect RGB-D camera for 170 seconds." + }, + { + "type": "table", + "bbox": [ + 0.506, + 0.354, + 0.888, + 0.419 + ], + "angle": 0, + "content": "
NICE-SLAM*iMAPvMAPvMAP (w/o BG)
Model Param. ↓12.12M0.32M0.66M0.56M
Runtime ↓34min34s12min29s8min16s6min01s
Mapping Time ↓845ms360ms226ms120ms
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.429, + 0.892, + 0.472 + ], + "angle": 0, + "content": "Table 4. vMAP is extremely memory-efficient and runs \\(1.5\\mathrm{x}\\) and \\(4\\mathrm{x}\\) faster than iMAP and NICE-SLAM respectively, with even higher performance gains without the background (BG) model." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.485, + 0.892, + 0.561 + ], + "angle": 0, + "content": "Results on Live Kinect Data Finally, we show the reconstruction result of vMAP on a table-top scene, from running in real-time with an Azure Kinect RGB-D camera. As shown in Fig. 7, vMAP is able to generate a range of realistic, watertight object meshes from different categories." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.572, + 0.706, + 0.587 + ], + "angle": 0, + "content": "4.3. Performance Analysis" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.596, + 0.892, + 0.657 + ], + "angle": 0, + "content": "In this section, we compare different training strategies and architectural design choices for our vMAP system. For simplicity, all experiments were done on the Replica Room-0 sequence, with our default training hyper-parameters." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.665, + 0.893, + 0.846 + ], + "angle": 0, + "content": "Memory and Runtime We compared memory usage and runtime with iMAP and NICE-SLAM in Tab. 4 and Fig. 9, all trained with ground-truth pose, and with the default training hyper-parameters listed in each method, for fair comparison. Specifically, we reported the Runtime for training the entire sequence, and Mapping Time for training each single frame, given the exact same hardware. We can observe that vMAP is highly memory efficient with less than 1M parameters. We want to highlight that vMAP achieves better reconstruction quality, and runs significantly faster (\\(\\sim\\)5Hz) than iMAP and NICE-SLAM with 1.5x and 4x training speed improvement respectively." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.856, + 0.894, + 0.901 + ], + "angle": 0, + "content": "Vectorised v.s. Sequential Training We ablated training speed with vectorised and sequential operations (for loops), conditioned on different numbers of objects and different" + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.946, + 0.512, + 0.957 + ], + "angle": 0, + "content": "958" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.082, + 0.09, + 0.271, + 0.243 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.277, + 0.09, + 0.465, + 0.245 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.255, + 0.47, + 0.284 + ], + "angle": 0, + "content": "Figure 8. Vectorised operation allows extremely fast training speed compared to standard sequential operations using for loops." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.296, + 0.468, + 0.462 + ], + "angle": 0, + "content": "sizes of object model. In Fig. 8, we can see that vectorised training enables tremendous improvements in optimisation speed, especially when we have a large number of objects. And with vectorised training, each optimisation step takes no more than \\(15\\mathrm{ms}\\) even when we train as many as 200 objects. Additionally, vectorised training is also stable across a wide range of model sizes, suggesting that we can train our object models with an even larger size if required, with minimal additional training time. As expected, vectorised training and for loops will eventually have similar training speed, when we reach the hardware's memory limit." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.463, + 0.468, + 0.539 + ], + "angle": 0, + "content": "To train multiple models in parallel, an initial approach we tried was spawning a process per object. However, we were only able to spawn a very limited number of processes, due to the per process CUDA memory overhead, which significantly limited the number of objects." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.546, + 0.468, + 0.713 + ], + "angle": 0, + "content": "Object Model Capacity As vectorised training has minimal effect on training speed in terms of object model design, we also investigated how the object-level reconstruction quality is affected by different object model sizes. We experimented with different object model sizes by varying the hidden size of each MLP layer. In Fig. 9, we can see that the object-level performance starts to saturate starting from hidden size 16, with minimal or no improvement by further increasing model sizes. This indicates that object-level representation is highly compressible, and can be efficiently and accurately parameterised by very few parameters." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.72, + 0.468, + 0.901 + ], + "angle": 0, + "content": "Stacked MLPs vs. Shared MLP Apart from representing each object by a single individual MLP, we also explored a shared MLP design by considering multi-object mapping as a multi-task learning problem [26, 33]. Here, each object is additionally associated with a learnable latent code, and this latent code is considered as an conditional input to the network, jointly optimised with the network weights. Though we have tried multiple multi-task learning architectures [12, 18], early experiments (denoted as vMAP-S in Fig. 9) showed that this shared MLP design achieved slightly degraded reconstruction quality and had no distinct training speed improvement compared to stacked" + }, + { + "type": "image", + "bbox": [ + 0.507, + 0.091, + 0.693, + 0.198 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.698, + 0.091, + 0.892, + 0.198 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.211, + 0.892, + 0.253 + ], + "angle": 0, + "content": "Figure 9. Object-level Reconstruction v.s. Model Param. (denoted by network hidden size). vMAP is more compact than iMAP, with the performance starting to saturate from hidden size 16." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.265, + 0.892, + 0.432 + ], + "angle": 0, + "content": "MLPs, particularly when powered by vectorised training. Furthermore, we found that shared MLP design can lead to undesired training properties: i) The shared MLP needs to be optimised along with the latent codes from all the objects, since the network weights and all object codes are entangled in a shared representation space. ii) The shared MLP capacity is fixed during training, and therefore the representation space might not be sufficient with an increasing number of objects. This accentuates the advantages of disentangled object representation space, which is a crucial design element of vMAP system." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.445, + 0.619, + 0.46 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.47, + 0.892, + 0.622 + ], + "angle": 0, + "content": "We have presented vMAP, a real-time object-level mapping system with simple and compact neural implicit representation. By decomposing the 3D scene into meaningful instances, represented by a batch of tiny separate MLPs, the system models the 3D scene in an efficient and flexible way, enabling scene re-composition, independent tracking and continually updating of objects of interest. In addition to more accurate and compact object-centric 3D reconstruction, our system is able to predict plausible watertight surfaces for each object, even under partial occlusion." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.628, + 0.892, + 0.794 + ], + "angle": 0, + "content": "Limitations and Future Work Our current system relies on an off-the-shelf detector for instance masks, which are not necessarily spatio-temporally consistent. Though the ambiguity is partially alleviated by data association and multi-view supervision, a reasonable global constraints will be better. As objects are modelled independently, dynamic objects can be continually tracked and reconstructed to enable downstream tasks, e.g., robotic manipulation [34]. To extend our system to a monocular dense mapping system, depth estimation networks [13, 41] or more efficient neural rendering approaches [19] could be further integrated." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.801, + 0.667, + 0.817 + ], + "angle": 0, + "content": "Acknowledgements" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.826, + 0.892, + 0.9 + ], + "angle": 0, + "content": "Research presented in this paper has been supported by Dyson Technology Ltd. Xin Kong holds a China Scholarship Council-Imperial Scholarship. We are very grateful to Edgar Sucar, Binbin Xu, Hidenobu Matsuki and Anagh Malik for fruitful discussions." + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.946, + 0.511, + 0.957 + ], + "angle": 0, + "content": "959" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.105 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.116, + 0.47, + 0.157 + ], + "angle": 0, + "content": "[1] Jad Abou-Chakra, Feras Dayoub, and Niko Sunderhauf. Implicit object mapping with noisy data. arXiv preprint arXiv:2204.10516, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.16, + 0.471, + 0.228 + ], + "angle": 0, + "content": "[2] Carlos Campos, Richard Elvira, Juan J Gomez Rodríguez, José MM Montiel, and Juan D Tardós. Orb-slam3: An accurate open-source library for visual, visual-inertial, and multimap slam. IEEE Transactions on Robotics (T-RO), 2021. 4, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.231, + 0.471, + 0.3 + ], + "angle": 0, + "content": "[3] Angela Dai, Angel X Chang, Manolis Savva, Maciej Halber, Thomas Funkhouser, and Matthias Nießner. Scannet: Richly-annotated 3d reconstructions of indoor scenes. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2017. 4, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.302, + 0.471, + 0.37 + ], + "angle": 0, + "content": "[4] Angela Dai, Matthias Nießner, Michael Zollhöfer, Shahram Izadi, and Christian Theobalt. Bundlefusion: Real-time globally consistent 3d reconstruction using on-the-fly surface reintegration. ACM Transactions on Graphics (ToG), 2017. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.373, + 0.471, + 0.429 + ], + "angle": 0, + "content": "[5] Thomas Davies, Derek Nowrouzezahrai, and Alec Jacobson. On the effectiveness of weight-encoded neural implicit 3d shapes. In Proceedings of the International Conference on Machine Learning (ICML), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.431, + 0.471, + 0.498 + ], + "angle": 0, + "content": "[6] Felix Endres, Jürgen Hess, Nikolas Engelhard, Jürgen Sturm, Daniel Cremers, and Wolfram Burgard. An Evaluation of the RGB-D SLAM System. In Proceedings of the IEEE International Conference on Robotics and Automation (ICRA), 2012. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.502, + 0.471, + 0.557 + ], + "angle": 0, + "content": "[7] Agrim Gupta, Piotr Dollar, and Ross Girshick. Lvis: A dataset for large vocabulary instance segmentation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.559, + 0.471, + 0.601 + ], + "angle": 0, + "content": "[8] He Horace and Zou Richard. functorch: Jax-like composable function transforms for pytorch. https://github.com/pytorch/functorch, 2021.3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.603, + 0.471, + 0.657 + ], + "angle": 0, + "content": "[9] Wonbong Jang and Lourdes Agapito. Codenerf: Disentangled neural radiance fields for object categories. In Proceedings of the International Conference on Computer Vision (ICCV), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.66, + 0.47, + 0.729 + ], + "angle": 0, + "content": "[10] Xin Kong, Xuemeng Yang, Guangyao Zhai, Xiangrui Zhao, Xianfang Zeng, Mengmeng Wang, Yong Liu, Wanlong Li, and Feng Wen. Semantic graph based place recognition for 3d point clouds. In Proceedings of the IEEE/RSJ Conference on Intelligent Robots and Systems (IROS), 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.731, + 0.47, + 0.8 + ], + "angle": 0, + "content": "[11] Guanglin Li, Yifeng Li, Zhichao Ye, Qihang Zhang, Tao Kong, Zhaopeng Cui, and Guofeng Zhang. Generative category-level shape and pose estimation with semantic primitives. In Conference on Robot Learning (CoRL), 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.803, + 0.47, + 0.857 + ], + "angle": 0, + "content": "[12] Shikun Liu, Edward Johns, and Andrew J Davison. End-to-end multi-task learning with attention. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.859, + 0.47, + 0.901 + ], + "angle": 0, + "content": "[13] Xiaoyang Lyu, Liang Liu, Mengmeng Wang, Xin Kong, Lina Liu, Yong Liu, Xinxin Chen, and Yi Yuan. Hr-depth: High resolution self-supervised monocular depth estimation. In" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.116, + 0.471, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.892, + 0.12 + ], + "angle": 0, + "content": "Proceedings of the National Conference on Artificial Intelligence (AAAI), 2021. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.122, + 0.892, + 0.176 + ], + "angle": 0, + "content": "[14] Alexander Majercik, Cyril Crassin, Peter Shirley, and Morgan McGuire. A ray-box intersection algorithm and efficient dynamic voxel rendering. Journal of Computer Graphics Techniques (JCGT), 2018. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.178, + 0.892, + 0.232 + ], + "angle": 0, + "content": "[15] John McCormac, Ronald Clark, Michael Bloesch, Andrew Davison, and Stefan Leutenegger. Fusion++: Volumetric object-level slam. In Proceedings of the International Conference on 3D Vision (3DV), 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.234, + 0.892, + 0.301 + ], + "angle": 0, + "content": "[16] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3d reconstruction in function space. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.303, + 0.892, + 0.371 + ], + "angle": 0, + "content": "[17] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. NeRF: Representing scenes as neural radiance fields for view synthesis. In Proceedings of the European Conference on Computer Vision (ECCV), 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.373, + 0.892, + 0.427 + ], + "angle": 0, + "content": "[18] Ishan Misra, Abhinav Shrivastava, Abhinav Gupta, and Martial Hebert. Cross-stitch networks for multi-task learning. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.428, + 0.892, + 0.482 + ], + "angle": 0, + "content": "[19] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Transactions on Graphics (ToG), 2022. 2, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.484, + 0.892, + 0.538 + ], + "angle": 0, + "content": "[20] Michael Niemeyer and Andreas Geiger. Giraffe: Representing scenes as compositional generative neural feature fields. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.54, + 0.892, + 0.606 + ], + "angle": 0, + "content": "[21] Michael Oechsle, Songyou Peng, and Andreas Geiger. Unisurf: Unifying neural implicit surfaces and radiance fields for multi-view reconstruction. In Proceedings of the International Conference on Computer Vision (ICCV), 2021. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.609, + 0.892, + 0.677 + ], + "angle": 0, + "content": "[22] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. DeepSDF: Learning continuous signed distance functions for shape representation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.679, + 0.892, + 0.746 + ], + "angle": 0, + "content": "[23] Daniel Rebain, Wei Jiang, Soroosh Yazdani, Ke Li, Kwang Moo Yi, and Andrea Tagliasacchi. Derf: Decomposed radiance fields. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.748, + 0.892, + 0.802 + ], + "angle": 0, + "content": "[24] Christian Reiser, Songyou Peng, Yiyi Liao, and Andreas Geiger. Kilonerf: Speeding up neural radiance fields with thousands of tiny mlp's. In Proceedings of the International Conference on Computer Vision (ICCV), 2021. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.804, + 0.892, + 0.845 + ], + "angle": 0, + "content": "[25] Antoni Rosinol, John J Leonard, and Luca Carlone. Nerf-slam: Real-time dense monocular slam with neural radiance fields. arXiv preprint arXiv:2210.13641, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.847, + 0.892, + 0.873 + ], + "angle": 0, + "content": "[26] Sebastian Ruder. An overview of multi-task learning in deep neural networks. arXiv preprint arXiv:1706.05098, 2017. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.874, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[27] Martin Rünz and Lourdes Agapito. Co-fusion: Real-time segmentation, tracking and fusion of multiple objects." + }, + { + "type": "list", + "bbox": [ + 0.504, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.946, + 0.512, + 0.957 + ], + "angle": 0, + "content": "960" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.108, + 0.092, + 0.469, + 0.12 + ], + "angle": 0, + "content": "In Proceedings of the IEEE International Conference on Robotics and Automation (ICRA), 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.121, + 0.47, + 0.189 + ], + "angle": 0, + "content": "[28] Renato F Salas-Moreno, Richard A Newcombe, Hauke Strasdat, Paul HJ Kelly, and Andrew J Davison. SLAM++: Simultaneous Localisation and Mapping at the Level of Objects. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2013. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.19, + 0.469, + 0.258 + ], + "angle": 0, + "content": "[29] Julian Straub, Thomas Whelan, Lingni Ma, Yufan Chen, Erik Wijmans, Simon Green, Jakob J Engel, Raul Mur-Artal, Carl Ren, Shobhit Verma, et al. The replica dataset: A digital replica of indoor spaces. arXiv preprint arXiv:1906.05797, 2019.4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.26, + 0.469, + 0.315 + ], + "angle": 0, + "content": "[30] J. Sturm, N. Engelhard, F. Endres, W. Burgard, and D. Cremers. A Benchmark for the Evaluation of RGB-D SLAM Systems. In Proceedings of the IEEE/RSJ Conference on Intelligent Robots and Systems (IROS), 2012. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.316, + 0.469, + 0.371 + ], + "angle": 0, + "content": "[31] Edgar Sucar, Shikun Liu, Joseph Ortiz, and Andrew J. Davison. imap: Implicit mapping and positioning in real-time. In Proceedings of the International Conference on Computer Vision (ICCV), 2021. 1, 2, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.372, + 0.469, + 0.426 + ], + "angle": 0, + "content": "[32] Edgar Sucar, Kentaro Wada, and Andrew Davison. NodeSLAM: Neural object descriptors for multi-view shape reconstruction. In Proceedings of the International Conference on 3D Vision (3DV), 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.428, + 0.469, + 0.496 + ], + "angle": 0, + "content": "[33] Simon Vandenhende, Stamatios Georgoulis, Wouter Van Gansbeke, Marc Proesmans, Dengxin Dai, and Luc Van Gool. Multi-task learning for dense prediction tasks: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence (PAMI), 2021. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.497, + 0.469, + 0.566 + ], + "angle": 0, + "content": "[34] Kentaro Wada, Edgar Sucar, Stephen James, Daniel Lenton, and Andrew J Davison. Morefusion: Multi-object reasoning for 6d pose estimation from volumetric fusion. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.567, + 0.469, + 0.621 + ], + "angle": 0, + "content": "[35] Jingwen Wang, Tymoteusz Bleja, and Lourdes Agapito. Gosurf: Neural feature grid optimization for fast, high-fidelity rgb-d surface reconstruction. In Proceedings of the International Conference on 3D Vision (3DV), 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.622, + 0.469, + 0.664 + ], + "angle": 0, + "content": "[36] Jingwen Wang, Martin Rünz, and Lourdes Agapito. Dsp-slam: object oriented slam with deep shape priors. In 2021 International Conference on 3D Vision (3DV), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.665, + 0.469, + 0.733 + ], + "angle": 0, + "content": "[37] Qianyi Wu, Xian Liu, Yuedong Chen, Kejie Li, Chuanxia Zheng, Jianfei Cai, and Jianmin Zheng. Object-compositional neural implicit surfaces. In Proceedings of the European Conference on Computer Vision (ECCV), 2022. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.734, + 0.469, + 0.802 + ], + "angle": 0, + "content": "[38] Binbin Xu, Wenbin Li, Dimos Tzoumanikas, Michael Bloesch, Andrew Davison, and Stefan Leutenegger. MID-Fusion: Octree-based object-level multi-instance dynamic slam. In Proceedings of the IEEE International Conference on Robotics and Automation (ICRA), 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.803, + 0.469, + 0.872 + ], + "angle": 0, + "content": "[39] Bangbang Yang, Yinda Zhang, Yinghao Xu, Yijin Li, Han Zhou, Hujun Bao, Guofeng Zhang, and Zhaopeng Cui. Learning object-compositional neural radiance field for ed-itable scene rendering. In Proceedings of the International Conference on Computer Vision (ICCV), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.873, + 0.469, + 0.901 + ], + "angle": 0, + "content": "[40] Xingrui Yang, Hai Li, Hongjia Zhai, Yuhang Ming, Yuqian Liu, and Guofeng Zhang. Vox-Fusion: Dense tracking and" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.892, + 0.134 + ], + "angle": 0, + "content": "mapping with voxel-based neural implicit representation. In Proceedings of the International Symposium on Mixed and Augmented Reality (ISMAR), 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.136, + 0.892, + 0.205 + ], + "angle": 0, + "content": "[41] Zehao Yu, Songyou Peng, Michael Niemeyer, Torsten Sattler, and Andreas Geiger. Monosdf: Exploring monocular geometric cues for neural implicit surface reconstruction. Advances in Neural Information Processing Systems (NeurIPS), 2022. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.206, + 0.892, + 0.274 + ], + "angle": 0, + "content": "[42] Yu-Jie Yuan, Yang-Tian Sun, Yu-Kun Lai, Yuewen Ma, Rongfei Jia, and Lin Gao. Nerf-editing: geometry editing of neural radiance fields. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.276, + 0.892, + 0.345 + ], + "angle": 0, + "content": "[43] Shuaifeng Zhi, Michael Bloesch, Stefan Leutenegger, and Andrew J Davison. SceneCode: Monocular dense semantic reconstruction using learned encoded scene representations. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.347, + 0.892, + 0.402 + ], + "angle": 0, + "content": "[44] Shuaifeng Zhi, Edgar Sucar, Andre Mouton, Iain Haughton, Tristan Laidlow, and Andrew J Davison. ilabel: Revealing objects in neural fields. IEEE Robotics and Automation Letters (RA-L), 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.403, + 0.892, + 0.471 + ], + "angle": 0, + "content": "[45] Xingguang Zhong, Yue Pan, Jens Behley, and Cyril Stachniss. Shine-mapping: Large-scale 3d mapping using sparse hierarchical implicit neural representations. In Proceedings of the IEEE International Conference on Robotics and Automation (ICRA), 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.473, + 0.892, + 0.515 + ], + "angle": 0, + "content": "[46] Qian-Yi Zhou and Vladlen Koltun. Dense scene reconstruction with points of interest. ACM Transactions on Graphics (ToG), 2013. 1, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.517, + 0.892, + 0.584 + ], + "angle": 0, + "content": "[47] Xingyi Zhou, Rohit Girdhar, Armand Joulin, Philipp Krahenbuhl, and Ishan Misra. Detecting twenty-thousand classes using image-level supervision. In Proceedings of the European Conference on Computer Vision (ECCV), 2022. 4, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.586, + 0.892, + 0.657 + ], + "angle": 0, + "content": "[48] Zihan Zhu, Songyou Peng, Viktor Larsson, Weiwei Xu, Hujun Bao, Zhaopeng Cui, Martin R Oswald, and Marc Pollefeys. Nice-slam: Neural implicit scalable encoding for slam. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2, 6, 7" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.657 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.487, + 0.946, + 0.51, + 0.957 + ], + "angle": 0, + "content": "961" + } + ] +] \ No newline at end of file diff --git a/2023/vMAP_ Vectorised Object Mapping for Neural Field SLAM/ff44bc7e-edea-45f5-9989-5020c0b824b0_origin.pdf b/2023/vMAP_ Vectorised Object Mapping for Neural Field SLAM/ff44bc7e-edea-45f5-9989-5020c0b824b0_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..7fb59669011262837a25158d3ef55501583ebe5c --- /dev/null +++ b/2023/vMAP_ Vectorised Object Mapping for Neural Field SLAM/ff44bc7e-edea-45f5-9989-5020c0b824b0_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:adca2fa685437c486da5fdfdab86ca41200e0d2fec58485baae6a74024649a5e +size 1558697 diff --git a/2023/vMAP_ Vectorised Object Mapping for Neural Field SLAM/full.md b/2023/vMAP_ Vectorised Object Mapping for Neural Field SLAM/full.md new file mode 100644 index 0000000000000000000000000000000000000000..33b2385eb60c6f21bca89aa3fbc0116cb0f1cc39 --- /dev/null +++ b/2023/vMAP_ Vectorised Object Mapping for Neural Field SLAM/full.md @@ -0,0 +1,311 @@ +# vMAP: Vectorised Object Mapping for Neural Field SLAM + +Xin Kong Shikun Liu Marwan Taher Andrew J. Davison Dyson Robotics Lab, Imperial College London + +{x.kong21, shikun.liu17, m.taher, a.davison}@imperial.ac.uk + +![](images/7d9a6ae6db468eb20e0ac63efa0cdd689fbb21ad9fb2211e33edc8381b5de0f0.jpg) +Figure 1. vMAP automatically builds an object-level scene model from a real-time RGB-D input stream. Each object is represented by a separate MLP neural field model, all optimised in parallel via vectorised training. We use no 3D shape priors, but the MLP representation encourages object reconstruction to be watertight and complete, even when objects are partially observed or are heavily occluded in the input images. See for instance the separate reconstructions of the armchairs, sofas and cushions, which were mutually occluding each other, in this example from Replica. + +# Abstract + +We present vMAP, an object-level dense SLAM system using neural field representations. Each object is represented by a small MLP, enabling efficient, watertight object modelling without the need for 3D priors. + +As an RGB-D camera browses a scene with no prior information, vMAP detects object instances on-the-fly, and dynamically adds them to its map. Specifically, thanks to the power of vectorised training, vMAP can optimise as many as 50 individual objects in a single scene, with an extremely efficient training speed of $5\mathrm{Hz}$ map update. We experimentally demonstrate significantly improved scene-level and object-level reconstruction quality compared to prior neural field SLAM systems. Project page: https://kxhit.github.io/vMAP. + +# 1. Introduction + +For robotics and other interactive vision applications, an object-level model is arguably semantically optimal, with scene entities represented in a separated, composable way, but also efficiently focusing resources on what is important in an environment. + +The key question in building an object-level mapping system is what level of prior information is known about the objects in a scene in order to segment, classify and re + +construct them. If no 3D object priors are available, then usually only the directly observed parts of objects can be reconstructed, leading to holes and missing parts [4, 46]. Prior object information such as CAD models or category-level shape space models enable full object shape estimation from partial views, but only for the subset of objects in a scene for which these models are available. + +In this paper, we present a new approach which applies to the case where no 3D priors are available but still often enables watertight object reconstruction in realistic real-time scene scanning. Our system, vMAP, builds on the attractive properties shown by neural fields as a real-time scene representation [31], with efficient and complete representation of shape, but now reconstructs a separate tiny MLP model of each object. The key technical contribution of our work is to show that a large number of separate MLP object models can be simultaneously and efficiently optimised on a single GPU during live operation via vectorised training. + +We show that we can achieve much more accurate and complete scene reconstruction by separately modelling objects, compared with using a similar number of weights in a single neural field model of the whole scene. Our real-time system is highly efficient in terms of both computation and memory, and we show that scenes with up to 50 objects can be mapped with 40KB per object of learned parameters across the multiple, independent object networks. + +We also demonstrate the flexibility of our disentangled object representation to enable recomposition of scenes with new object configurations. Extensive experiments have been conducted on both simulated and real-world datasets, showing state-of-the-art scene-level and object-level reconstruction performance. + +# 2. Related Work + +This work follows in long series of efforts to build real-time scene representations which are decomposed into explicit rigid objects, with the promise of flexible and efficient scene representation and even the possibility to represent changing scenes. Different systems assumed varying types of representation and levels of prior knowledge, from CAD models [28], via category-level shape models [10, 11, 32, 36] to no prior shape knowledge, although in this case only the visible parts of objects could be reconstructed [15, 27, 38]. + +Neural Field SLAM Neural fields have recently been widely used as efficient, accurate and flexible representations of whole scenes [16, 17, 19, 22]. To adopt these representations into real-time SLAM systems, iMAP [31] demonstrated for the first time that a simple MLP network, incrementally trained with the aid of depth measurements from RGB-D sensors, can represent room-scaled 3D scenes in real-time. Some of iMAP's most interesting properties were its tendency to produce watertight reconstructions, even often plausibly completing the unobserved back of objects. These coherence properties of neural fields were particularly revealed when semantic output channels were added, as in SemanticNeRF [43] and iLabel [44], and were found to inherit the coherence. To make implicit representation more scalable and efficient, a group of implicit SLAM systems [25, 35, 40, 45, 48] fused neural fields with conventional volumetric representations. + +Object Representations with Neural Fields However, obtaining individual object representations from these neural field methods is difficult, as the correspondences between network parameters and specific scene regions are complicated and difficult to determine. To tackle this, DeRF [23] decomposed a scene spatially and dedicated smaller networks to each decomposed part. Similarly, KiloNeRF [24] divided a scene into thousands of volumetric parts, each represented by a tiny MLP, and trained them in parallel with custom CUDA kernels to speed up NeRF. Different from KiloNeRF, vMAP decomposes the scene into objects which are semantically meaningful. + +To represent multiple objects, ObjectNeRF [39] and ObjSDF [37] took pre-computed instance masks as additional input and conditioned object representation on learnable object activation code. But these methods are still trained offline and tangle object representations with the main scene network, so that they need to optimise the network weights + +with all object codes during training, and infer the whole network to get the shape of a desired object. This contrasts with vMAP which models objects individually, and is able to stop and resume training for any objects without any inter-object interference. + +The recent work most similar to ours has used the attractive properties of neural field MLPs to represent single objects. The analysis in [5] explicitly evaluated the use of over-fit neural implicit networks as a 3D shape representation for graphics, considering that they should be taken seriously. The work in [1] furthered this analysis, showing how object representation was affected by different observation conditions, though using the hybrid Instant NGP rather than a single MLP representation, so it is not clear whether some object coherence properties would be lost. Finally, the CodeNeRF system [9] trained a NeRF conditioned on learnable object codes, again proving the attractive properties of neural fields to represent single objects. + +We build on this work in our paper, but for the first time show that many individual neural field models making up a whole scene can be simultaneously trained within a real-time system, resulting in accurate and efficient representation of many-object scenes. + +# 3. vMAP: An Efficient Object Mapping System with Vectorised Training + +# 3.1. System Overview + +We first introduce our detailed design for object-level mapping with efficient vectorised training (Section 3.2), and then explain our improved training strategies of pixel sampling and surface rendering (Section 3.3). Finally, we show how we may recompose and render a new scene with these learned object models (Section 3.4). An overview of our training and rendering pipeline is shown in Fig. 2. + +# 3.2. Vectorised Object Level Mapping + +Object Initialisation and Association To start with, each frame is associated with densely labelled object masks. These object masks are either directly provided in the dataset, or predicted with an off-the-shelf 2D instance segmentation network. Since those predicted object masks have no temporal consistency across different frames, we perform object association between the previous and the current live frame, based on two criteria: i) Semantic Consistency: the object in the current frame is predicted as the same semantic class from the previous frame, and ii) Spatial Consistency: the object in the current frame is spatially close to the object in the previous frames, measured by the mean IoU of their 3D object bounds. When these two criteria are satisfied, we assume they are the same object instance and represent them with the same object model. Otherwise, they are different object instances and we initialise + +![](images/49bed62b879a9243dc8649cbc650b8273b3ece64d5e4b39a5abcf819e31b25fd.jpg) +Figure 2. An overview of training and rendering pipeline of vMAP. + +a new object model and append it to the models stack. + +For each object in a frame, we estimate its 3D object bound by its 3D point cloud, parameterised by its depth map and the camera pose. Camera tracking is externally provided by an off-the-shelf tracking system, which we found to be more accurate and robust compared to jointly optimising pose and geometry. If we detect the same object instance in a new frame, we merge its 3D point cloud from the previous frames to the current frame and re-estimate its 3D object bound. Therefore, these object bounds are dynamically updated and refined with more observations. + +Object Supervision We apply object-level supervision only for pixels inside a 2D object bounding box, for maximal training efficiency. For those pixels within an object mask, we encourage the object radiance field to be occupied and supervise them with depth and colour loss. Otherwise we encourage the object radiance field to be empty. + +Each object instance samples training pixels from its own independent keyframe buffer. Therefore, we have flexibility to stop or resume the training of any object, with no training interference between objects. + +Vectorised Training Representing a neural field with multiple small networks can lead to efficient training, as shown in prior work [24]. In vMAP, all object models are of the same design, except for the background object which we represent with a slightly larger network. Therefore, we are able to stack these small object models together for vec + +torised training, leveraging the highly optimised vectorised operations in PyTorch [8]. Since multiple object models are batched and trained simultaneously as opposed to sequentially, we optimise the use of the available GPU resources. We show that vectorised training is an essential design element to the system, resulting in significantly improved training speed, further discussed in Section 4.3. + +# 3.3. Neural Implicit Mapping + +Depth Guided Sampling Neural fields trained on RGB data only have no guarantee to model accurate object geometry, due to the fact that they are optimising for appearance rather than the geometry. To obtain more geometrically accurate object models, we benefit from the depth map available from an RGB-D sensor, providing a strong prior for learning the density field of 3D volumes. Specifically, we sample $N_{s}$ and $N_{c}$ points along each ray, for which $N_{s}$ points are sampled with a Normal distribution centered around the surface $t_{s}$ (from the depth map), with a small $d_{\sigma}$ variance, and $N_{c}$ points are uniformly sampled between the camera $t_{n}$ (the near bound) and the surface $t_{s}$ , with a stratified sampling approach. When the depth measurement is invalid, the surface $t_{s}$ is then replaced with the far bound $t_{f}$ . Mathematically, we have: + +$$ +t _ {i} \sim \mathcal {U} \left(t _ {n} + \frac {i - 1}{N _ {c}} \left(t _ {s} - t _ {n}\right), t _ {n} + \frac {i}{N _ {c}} \left(t _ {s} - t _ {n}\right)\right), \tag {1} +$$ + +$$ +t _ {i} \sim \mathcal {N} \left(t _ {s}, d _ {\sigma} ^ {2}\right). \tag {2} +$$ + +We choose $d_{\sigma} = 3cm$ which works well in our implementation. We observe that training more points near the surface helps to guide the object models to quickly focus on representing accurate object geometry. + +Surface and Volume Rendering As we are concerned more by 3D surface reconstruction than 2D rendering, we omit the viewing direction from the network input, and model object visibility with a binary indicator (no transparent objects). With similar motivation to UniSURF [21], we parameterise the occupancy probability of a 3D point $x_{i}$ as $o_{\theta}(x_i)\rightarrow [0,1]$ , where $o_{\theta}$ is a continuous occupancy field. Therefore, the termination probability at point $x_{i}$ along ray $\mathbf{r}$ becomes $T_{i} = o(x_{i})\prod_{j < i}(1 - o(x_{j}))$ , indicating that no occupied samples $x_{j}$ with $j < i$ exist before $x_{i}$ . The corresponding rendered occupancy, depth and colour are defined as follows: + +$$ +\hat {O} (\mathbf {r}) = \sum_ {i = 1} ^ {N} T _ {i}, \hat {D} (\mathbf {r}) = \sum_ {i = 1} ^ {N} T _ {i} d _ {i}, \hat {C} (\mathbf {r}) = \sum_ {i = 1} ^ {N} T _ {i} c _ {i}. \tag {3} +$$ + +Training Objective For each object $k$ , we only sample training pixels inside that object's 2D bounding box, denoted by $\mathcal{R}^k$ , and only optimise depth and colour for pixels inside its 2D object mask, denoted by $M^k$ . Note that it is always true that $M^k \subset \mathcal{R}^k$ . The depth, colour and occupancy loss for the object $k$ are defined as follows: + +$$ +L _ {d e p t h} ^ {k} = M ^ {k} \odot \sum_ {\mathbf {r} \in R ^ {k}} | \hat {D} (\mathbf {r}) - D (\mathbf {r}) |, \tag {4} +$$ + +$$ +L _ {\text {c o l o u r}} ^ {k} = M ^ {k} \odot \sum_ {\mathbf {r} \in R ^ {k}} | \hat {C} (\mathbf {r}) - C (\mathbf {r}) |, \tag {5} +$$ + +$$ +L _ {\text {o c c u p a n c y}} ^ {k} = \sum_ {\mathbf {r} \in R ^ {k}} | \hat {O} (\mathbf {r}) - M ^ {k} (\mathbf {r}) |. \tag {6} +$$ + +The overall training objective then accumulates losses for all $K$ objects: + +$$ +L = \sum_ {k = 1} ^ {K} L _ {\text {d e p t h}} ^ {k} + \lambda_ {1} \cdot L _ {\text {c o l o u r}} ^ {k} + \lambda_ {2} \cdot L _ {\text {o c c u p a n c y}} ^ {k}. \tag {7} +$$ + +We choose loss weightings $\lambda_1 = 5$ and $\lambda_{2} = 10$ , which we found to work well in our experiments. + +# 3.4. Compositional Scene Rendering + +Since vMAP represents objects in a purely disentangled representation space, we can obtain each 3D object by querying within its estimated 3D object bounds and easily manipulate it. For 2D novel view synthesis, we use the Ray-Box Intersection algorithm [14] to calculate near and far bounds for each object, and then rank rendered depths along each ray to achieve occlusion-aware scene-level rendering. This disentangled representation also opens up other types + +of fine-grained object-level manipulation, such as changing object shape or textures by conditioning on disentangled pre-trained feature fields [20, 42], which we consider as an interesting future direction. + +# 4. Experiments + +We have comprehensively evaluated vMAP on a range of different datasets, which include both simulated and real-world sequences, with and without ground-truth object masks and poses. For all datasets, we qualitatively compare our system to prior state-of-the-art SLAM frameworks on 2D and 3D scene-level and object-level rendering. We further quantitatively compare these systems in datasets where ground-truth meshes are available. Please see our attached supplementary material for more results. + +# 4.1. Experimental Setup + +Datasets We evaluated on Replica [29], ScanNet [3], and TUM RGB-D [6]. Each dataset contains sequences with different levels of quality in object masks, depth and pose measurements. Additionally, we also showed vMAP's performance in complex real-world with self-captured video sequences recorded by an Azure Kinect RGB-D camera. An overview of these datasets is shown in Tab. 1. + +
Object MasksDepth QualityPose Estimation
ReplicaPerfect GTPerfect GTPerfect GT
ScanNetNoisyNoisyPerfect GT
TUM RGB-DDeticNoisyORB-SLAM3
Our RecordingDeticNoisyORB-SLAM3
+ +Table 1. An overview of datasets we evaluated. + +Datasets with perfect ground-truth information represent the upper-bound performance of our system. We expect vMAP's performance in the real-world setting can be further improved, when coupled with a better instance segmentation and pose estimation framework. + +Implementation Details We conduct all experiments on a desktop PC with a 3.60 GHz i7-11700K CPU and a single Nvidia RTX 3090 GPU. We choose our instance segmentation detector to be Detic [47], pre-trained on an open-vocabulary LVIS dataset [7] which contains more than 1000 object classes. We choose our pose estimation framework to be ORB-SLAM3 [2], for its fast and accurate tracking performance. We continuously update the keyframe poses using the latest estimates from ORB-SLAM3. + +We applied the same set of hyper-parameters for all datasets. Both our object and background model use 4-layer MLPs, with each layer having hidden size 32 (object) and 128 (background). For object / background, we selected keyframes every $25/50$ frames, $120/1200$ rays each training step with 10 points per ray. The number of objects in a scene typically varies between 20 and 70, among which the + +
TSDF-Fusion*iMAPiMAP*NICE-SLAMNICE-SLAM*vMAP
Scene Acc. [cm] ↓1.284.432.152.943.043.20
Scene Comp. [cm] ↓5.615.562.884.023.842.39
Scene Comp. Ratio [<5cm %] ↑82.6779.0690.8586.7386.5292.99
Object Acc. [cm] ↓0.45-3.57-3.912.23
Object Comp. [cm] ↓3.69-2.38-3.271.44
Object Comp. Ratio [<5cm %] ↑82.98-90.19-83.9794.55
Object Comp. Ratio [<1cm %] ↑61.70-47.79-37.7969.23
+ +Table 2. Averaged reconstruction results for 8 indoor Replica scenes. * represents the baselines we re-trained with ground-truth pose. + +![](images/101e2a669703e2eb7a292e30351473b042a8c2f462df91ce1ab39684fcaab209.jpg) +Figure 3. Scene reconstruction for 4 selected Replica scenes. Interesting regions are highlighted with coloured boxes, showing vMAP's significantly improved reconstruction quality. All scene meshes are provided by the original authors. + +![](images/a9842deedfad70f6912b9dc18e83685a385d58d49881a8cf0405ca4dff393cc4.jpg) +TSDF-Fusion + +![](images/e396859132901ab9dfabd570622f2f6fb3f929f13a7f21b7f37f97587bb9a85f.jpg) +ObjSDF + +![](images/b8e4699eb72f2854f3a10949d247f924f14fa4123d1d68ffc12e41c57151bc78.jpg) +vMAP + +![](images/a4c957d7487883bc99b0592446b261e5d59ea70b3fa1d39744b892b2ffcdec25.jpg) +TSDF-Fusion + +![](images/b7880ce3b18f3d1c5dbbc19679d80157c707e03397a7e9074e8b2fb3b8b1fc3a.jpg) +ObjSDF + +![](images/b5e3c4667ee120a8345d89b107c58717fbc96a6d55eabb194c397d5d75ef5a4b.jpg) +vMAP + +![](images/29fbad12d300d6b7e9635941ce1a5f178cef86cb70174de42a002978c7eac868.jpg) +Figure 4. Visualisation of object reconstructions with vMAP compared to TSDF-Fusion and ObjSDF. Note that all object reconstructions from ObjSDF require much longer off-line training. All object meshes from ObjSDF are provided by the original authors. + +![](images/2300ca72cea5e322f566e720f4d69662710bd3a1e4589e3e47bfb276192fb8f9.jpg) + +![](images/4c282363d906f8363f25d7ec23f2024ca14ced2b0d17b05d3173a5a9ff8ffefe.jpg) + +![](images/8853dd08f1584336f224ba73c4c14cf3bc140d00fa4b61c25390aba4adb59fb4.jpg) + +![](images/27d589b3fcea57ac72a942113af6ae1386492d2daa6d632a49bfcb817f7621ae.jpg) + +![](images/9bcccd19d754b2561327fa0deb36a0f583910aca2b025998b03b4de2c2c836d3.jpg) + +![](images/92d81487f672b50fa6d13830f40ba428f6d7cb797b6e24806c8956c98e23d47e.jpg) +NICE-SLAM* +Figure 5. Visualisation of scene reconstruction from NICE-SLAM* (left) and vMAP (right) in a selected ScanNet sequence. Interesting regions are zoomed in. NICE-SLAM* was re-trained with ground-truth poses. + +![](images/8a02031b70e79897803f030b8c695f33b451a8a63e57fef24dc1d533df2dba8a.jpg) +vMAP + +largest number of objects are in Replica and ScanNet scenes with an average of 50 objects per scene. + +Metrics Following the convention of prior work [31, 48], we adopt Accuracy, Completion, and Completion Ratio for 3D scene-level reconstruction metrics. Besides, we note that such scene-level metrics are heavily biased towards the reconstruction of large objects like walls and floors. Therefore, we additionally provide these metrics at the object-level, by averaging metrics for all objects in each scene. + +# 4.2. Evaluation on Scene and Object Reconstruction + +Results on Replica We experimented on 8 Replica scenes, using the rendered trajectories provided in [31], with 2000 RGB-D frames in each scene. Tab. 2 shows the averaged quantitative reconstruction results in these Replica indoor sequences. For scene-level reconstruction, we compared with TSDF-Fusion [46], iMAP [31] and NICE-SLAM [48]. To isolate reconstruction, we also provided results for these baselines re-trained with ground-truth pose (marked with $*$ ), with their open-sourced code for the fair comparison. Specifically, iMAP* was implemented as a special case of vMAP, when considering the entire scene + +as one object instance. For object-level reconstruction, we compared baselines trained with ground-truth pose. + +vMAP's significant advantage thanks to object-level representation is to reconstruct tiny objects and objects with fine-grained details. Noticeably, vMAP achieved more than $50 - 70\%$ improvement over iMAP and NICE-SLAM for object-level completion. The scene reconstructions of 4 selected Replica sequences are shown in Fig. 3, with interesting regions highlighted in coloured boxes. The quantitative results for 2D novel view rendering are further provided in the supplementary material. + +Results on ScanNet To evaluate on a more challenging setting, we experimented on ScanNet [3], a dataset composed of real scenes, with much noisier ground-truth depth maps and object masks. We choose a ScanNet sequence selected by ObjSDF [37], and we compared with TSDF-Fusion and ObjSDF for object-level reconstruction, and we compared with NICE-SLAM (re-trained with ground-truth pose) for scene-level reconstruction. Unlike ObjSDF, which was optimised from pre-selected posed images without depth for much longer off-line training, we ran both vMAP and TSDF-Fusion in an online setting with depth. As + +![](images/e863167255c280c7952889e039ef2ad0b785edb9b06e5906579d211e933c372f.jpg) +Figure 6. Visualisation of scene reconstruction from TSDF-Fusion (left) and vMAP (right) in a selected TUM RGB-D sequence, trained in real time for 99 seconds. + +
ATE RMSE [cm]↓iMAPNICE-SLAMvMAPORB-SLAM2
fr1/desk4.92.72.61.6
fr2.xyz2.01.81.60.4
fr3/office5.83.03.01.0
+ +shown in Fig. 4, we see that vMAP generates objects with more coherent geometry than TSDF-Fusion; and with much finer details than ObjSDF, though with a much shorter training time. And consistently, we can see that vMAP generates much sharper object boundaries and textures compared to NICE-SLAM, as shown in Fig. 5. + +Results on TUM RGB-D We evaluated on a TUM RGB-D sequence captured in the real-world, with object masks predicted by an off-the-shelf pre-trained instance segmentation network [47], and poses estimated by ORB-SLAM3 [2]. Since our object detector has no spatio-temporal consistency, we found that the same object can be occasionally detected as two different instances, which leads to some reconstruction artifacts. For example, the object ' globe' shown in Fig. 6 was also detected as 'balloon' in some frames, resulting the 'splitting' artifacts in the final object reconstruction. Overall, vMAP still predicts more coherent reconstruction for most objects in a scene, with realistic hole-filling capabilities compared to TSDF-Fusion. However, we acknowledge that the completion of complete out-of-view regions (e.g., the back of a chair) is beyond the reach of our system due to the lack of general 3D prior. + +Though our work focuses more on mapping performance than pose estimation, we also report ATE RMSE [30] in Tab. 3 following [31,48], by jointly optimising camera pose with map. We can observe that vMAP achieves superior performance, due to the fact that reconstruction and tracking quality are typically highly interdependent. However, there is a noticeable performance gap compared to ORBSLAM. As such, we directly choose ORB-SLAM as our external tracking system, which leads to faster training speed, cleaner implementation, and higher tracking quality. + +![](images/1b340c53001a5cd2b483109310669dbbe01b840f94a625255a7df53bc5d227e8.jpg) +Figure 7. Visualisation of table-top reconstruction (top) and individual object reconstructions (bottom), from vMAP running in real time using an Azure Kinect RGB-D camera for 170 seconds. + +Table 3. Camera tracking results on TUM RGB-D. + +
NICE-SLAM*iMAPvMAPvMAP (w/o BG)
Model Param. ↓12.12M0.32M0.66M0.56M
Runtime ↓34min34s12min29s8min16s6min01s
Mapping Time ↓845ms360ms226ms120ms
+ +Table 4. vMAP is extremely memory-efficient and runs $1.5\mathrm{x}$ and $4\mathrm{x}$ faster than iMAP and NICE-SLAM respectively, with even higher performance gains without the background (BG) model. + +Results on Live Kinect Data Finally, we show the reconstruction result of vMAP on a table-top scene, from running in real-time with an Azure Kinect RGB-D camera. As shown in Fig. 7, vMAP is able to generate a range of realistic, watertight object meshes from different categories. + +# 4.3. Performance Analysis + +In this section, we compare different training strategies and architectural design choices for our vMAP system. For simplicity, all experiments were done on the Replica Room-0 sequence, with our default training hyper-parameters. + +Memory and Runtime We compared memory usage and runtime with iMAP and NICE-SLAM in Tab. 4 and Fig. 9, all trained with ground-truth pose, and with the default training hyper-parameters listed in each method, for fair comparison. Specifically, we reported the Runtime for training the entire sequence, and Mapping Time for training each single frame, given the exact same hardware. We can observe that vMAP is highly memory efficient with less than 1M parameters. We want to highlight that vMAP achieves better reconstruction quality, and runs significantly faster ( $\sim$ 5Hz) than iMAP and NICE-SLAM with 1.5x and 4x training speed improvement respectively. + +Vectorised v.s. Sequential Training We ablated training speed with vectorised and sequential operations (for loops), conditioned on different numbers of objects and different + +![](images/7c401d551ce6d8f26fdfddb4ced3a26d30ad59eae3098e791383f566ff3cb60e.jpg) +Figure 8. Vectorised operation allows extremely fast training speed compared to standard sequential operations using for loops. + +![](images/f7e64f3dc1b4a31e45229d4c55f0cdf16ad3e99589189dde6199a03081c9121a.jpg) +Figure 9. Object-level Reconstruction v.s. Model Param. (denoted by network hidden size). vMAP is more compact than iMAP, with the performance starting to saturate from hidden size 16. + +sizes of object model. In Fig. 8, we can see that vectorised training enables tremendous improvements in optimisation speed, especially when we have a large number of objects. And with vectorised training, each optimisation step takes no more than $15\mathrm{ms}$ even when we train as many as 200 objects. Additionally, vectorised training is also stable across a wide range of model sizes, suggesting that we can train our object models with an even larger size if required, with minimal additional training time. As expected, vectorised training and for loops will eventually have similar training speed, when we reach the hardware's memory limit. + +To train multiple models in parallel, an initial approach we tried was spawning a process per object. However, we were only able to spawn a very limited number of processes, due to the per process CUDA memory overhead, which significantly limited the number of objects. + +Object Model Capacity As vectorised training has minimal effect on training speed in terms of object model design, we also investigated how the object-level reconstruction quality is affected by different object model sizes. We experimented with different object model sizes by varying the hidden size of each MLP layer. In Fig. 9, we can see that the object-level performance starts to saturate starting from hidden size 16, with minimal or no improvement by further increasing model sizes. This indicates that object-level representation is highly compressible, and can be efficiently and accurately parameterised by very few parameters. + +Stacked MLPs vs. Shared MLP Apart from representing each object by a single individual MLP, we also explored a shared MLP design by considering multi-object mapping as a multi-task learning problem [26, 33]. Here, each object is additionally associated with a learnable latent code, and this latent code is considered as an conditional input to the network, jointly optimised with the network weights. Though we have tried multiple multi-task learning architectures [12, 18], early experiments (denoted as vMAP-S in Fig. 9) showed that this shared MLP design achieved slightly degraded reconstruction quality and had no distinct training speed improvement compared to stacked + +![](images/02860218b955d22345fa48a3b9991abca8f15895c9b30df086162795b18d2b0b.jpg) + +![](images/07fa112510c756f6f16a7b78c975c9b13830cb89c4a27e74994d6e0871f9875c.jpg) + +MLPs, particularly when powered by vectorised training. Furthermore, we found that shared MLP design can lead to undesired training properties: i) The shared MLP needs to be optimised along with the latent codes from all the objects, since the network weights and all object codes are entangled in a shared representation space. ii) The shared MLP capacity is fixed during training, and therefore the representation space might not be sufficient with an increasing number of objects. This accentuates the advantages of disentangled object representation space, which is a crucial design element of vMAP system. + +# 5. Conclusion + +We have presented vMAP, a real-time object-level mapping system with simple and compact neural implicit representation. By decomposing the 3D scene into meaningful instances, represented by a batch of tiny separate MLPs, the system models the 3D scene in an efficient and flexible way, enabling scene re-composition, independent tracking and continually updating of objects of interest. In addition to more accurate and compact object-centric 3D reconstruction, our system is able to predict plausible watertight surfaces for each object, even under partial occlusion. + +Limitations and Future Work Our current system relies on an off-the-shelf detector for instance masks, which are not necessarily spatio-temporally consistent. Though the ambiguity is partially alleviated by data association and multi-view supervision, a reasonable global constraints will be better. As objects are modelled independently, dynamic objects can be continually tracked and reconstructed to enable downstream tasks, e.g., robotic manipulation [34]. To extend our system to a monocular dense mapping system, depth estimation networks [13, 41] or more efficient neural rendering approaches [19] could be further integrated. + +# Acknowledgements + +Research presented in this paper has been supported by Dyson Technology Ltd. Xin Kong holds a China Scholarship Council-Imperial Scholarship. We are very grateful to Edgar Sucar, Binbin Xu, Hidenobu Matsuki and Anagh Malik for fruitful discussions. + +# References + +[1] Jad Abou-Chakra, Feras Dayoub, and Niko Sunderhauf. Implicit object mapping with noisy data. arXiv preprint arXiv:2204.10516, 2022. 2 +[2] Carlos Campos, Richard Elvira, Juan J Gomez Rodríguez, José MM Montiel, and Juan D Tardós. Orb-slam3: An accurate open-source library for visual, visual-inertial, and multimap slam. IEEE Transactions on Robotics (T-RO), 2021. 4, 7 +[3] Angela Dai, Angel X Chang, Manolis Savva, Maciej Halber, Thomas Funkhouser, and Matthias Nießner. Scannet: Richly-annotated 3d reconstructions of indoor scenes. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2017. 4, 6 +[4] Angela Dai, Matthias Nießner, Michael Zollhöfer, Shahram Izadi, and Christian Theobalt. Bundlefusion: Real-time globally consistent 3d reconstruction using on-the-fly surface reintegration. ACM Transactions on Graphics (ToG), 2017. 1 +[5] Thomas Davies, Derek Nowrouzezahrai, and Alec Jacobson. On the effectiveness of weight-encoded neural implicit 3d shapes. In Proceedings of the International Conference on Machine Learning (ICML), 2021. 2 +[6] Felix Endres, Jürgen Hess, Nikolas Engelhard, Jürgen Sturm, Daniel Cremers, and Wolfram Burgard. An Evaluation of the RGB-D SLAM System. In Proceedings of the IEEE International Conference on Robotics and Automation (ICRA), 2012. 4 +[7] Agrim Gupta, Piotr Dollar, and Ross Girshick. Lvis: A dataset for large vocabulary instance segmentation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 4 +[8] He Horace and Zou Richard. functorch: Jax-like composable function transforms for pytorch. https://github.com/pytorch/functorch, 2021.3 +[9] Wonbong Jang and Lourdes Agapito. Codenerf: Disentangled neural radiance fields for object categories. In Proceedings of the International Conference on Computer Vision (ICCV), 2021. 2 +[10] Xin Kong, Xuemeng Yang, Guangyao Zhai, Xiangrui Zhao, Xianfang Zeng, Mengmeng Wang, Yong Liu, Wanlong Li, and Feng Wen. Semantic graph based place recognition for 3d point clouds. In Proceedings of the IEEE/RSJ Conference on Intelligent Robots and Systems (IROS), 2020. 2 +[11] Guanglin Li, Yifeng Li, Zhichao Ye, Qihang Zhang, Tao Kong, Zhaopeng Cui, and Guofeng Zhang. Generative category-level shape and pose estimation with semantic primitives. In Conference on Robot Learning (CoRL), 2022. 2 +[12] Shikun Liu, Edward Johns, and Andrew J Davison. End-to-end multi-task learning with attention. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 8 +[13] Xiaoyang Lyu, Liang Liu, Mengmeng Wang, Xin Kong, Lina Liu, Yong Liu, Xinxin Chen, and Yi Yuan. Hr-depth: High resolution self-supervised monocular depth estimation. In + +Proceedings of the National Conference on Artificial Intelligence (AAAI), 2021. 8 +[14] Alexander Majercik, Cyril Crassin, Peter Shirley, and Morgan McGuire. A ray-box intersection algorithm and efficient dynamic voxel rendering. Journal of Computer Graphics Techniques (JCGT), 2018. 4 +[15] John McCormac, Ronald Clark, Michael Bloesch, Andrew Davison, and Stefan Leutenegger. Fusion++: Volumetric object-level slam. In Proceedings of the International Conference on 3D Vision (3DV), 2018. 2 +[16] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3d reconstruction in function space. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2 +[17] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. NeRF: Representing scenes as neural radiance fields for view synthesis. In Proceedings of the European Conference on Computer Vision (ECCV), 2020. 2 +[18] Ishan Misra, Abhinav Shrivastava, Abhinav Gupta, and Martial Hebert. Cross-stitch networks for multi-task learning. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 8 +[19] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Transactions on Graphics (ToG), 2022. 2, 8 +[20] Michael Niemeyer and Andreas Geiger. Giraffe: Representing scenes as compositional generative neural feature fields. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 4 +[21] Michael Oechsle, Songyou Peng, and Andreas Geiger. Unisurf: Unifying neural implicit surfaces and radiance fields for multi-view reconstruction. In Proceedings of the International Conference on Computer Vision (ICCV), 2021. 4 +[22] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. DeepSDF: Learning continuous signed distance functions for shape representation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2 +[23] Daniel Rebain, Wei Jiang, Soroosh Yazdani, Ke Li, Kwang Moo Yi, and Andrea Tagliasacchi. Derf: Decomposed radiance fields. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2 +[24] Christian Reiser, Songyou Peng, Yiyi Liao, and Andreas Geiger. Kilonerf: Speeding up neural radiance fields with thousands of tiny mlp's. In Proceedings of the International Conference on Computer Vision (ICCV), 2021. 2, 3 +[25] Antoni Rosinol, John J Leonard, and Luca Carlone. Nerf-slam: Real-time dense monocular slam with neural radiance fields. arXiv preprint arXiv:2210.13641, 2022. 2 +[26] Sebastian Ruder. An overview of multi-task learning in deep neural networks. arXiv preprint arXiv:1706.05098, 2017. 8 +[27] Martin Rünz and Lourdes Agapito. Co-fusion: Real-time segmentation, tracking and fusion of multiple objects. + +In Proceedings of the IEEE International Conference on Robotics and Automation (ICRA), 2017. 2 +[28] Renato F Salas-Moreno, Richard A Newcombe, Hauke Strasdat, Paul HJ Kelly, and Andrew J Davison. SLAM++: Simultaneous Localisation and Mapping at the Level of Objects. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2013. 2 +[29] Julian Straub, Thomas Whelan, Lingni Ma, Yufan Chen, Erik Wijmans, Simon Green, Jakob J Engel, Raul Mur-Artal, Carl Ren, Shobhit Verma, et al. The replica dataset: A digital replica of indoor spaces. arXiv preprint arXiv:1906.05797, 2019.4 +[30] J. Sturm, N. Engelhard, F. Endres, W. Burgard, and D. Cremers. A Benchmark for the Evaluation of RGB-D SLAM Systems. In Proceedings of the IEEE/RSJ Conference on Intelligent Robots and Systems (IROS), 2012. 7 +[31] Edgar Sucar, Shikun Liu, Joseph Ortiz, and Andrew J. Davison. imap: Implicit mapping and positioning in real-time. In Proceedings of the International Conference on Computer Vision (ICCV), 2021. 1, 2, 6, 7 +[32] Edgar Sucar, Kentaro Wada, and Andrew Davison. NodeSLAM: Neural object descriptors for multi-view shape reconstruction. In Proceedings of the International Conference on 3D Vision (3DV), 2020. 2 +[33] Simon Vandenhende, Stamatios Georgoulis, Wouter Van Gansbeke, Marc Proesmans, Dengxin Dai, and Luc Van Gool. Multi-task learning for dense prediction tasks: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence (PAMI), 2021. 8 +[34] Kentaro Wada, Edgar Sucar, Stephen James, Daniel Lenton, and Andrew J Davison. Morefusion: Multi-object reasoning for 6d pose estimation from volumetric fusion. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 8 +[35] Jingwen Wang, Tymoteusz Bleja, and Lourdes Agapito. Gosurf: Neural feature grid optimization for fast, high-fidelity rgb-d surface reconstruction. In Proceedings of the International Conference on 3D Vision (3DV), 2022. 2 +[36] Jingwen Wang, Martin Rünz, and Lourdes Agapito. Dsp-slam: object oriented slam with deep shape priors. In 2021 International Conference on 3D Vision (3DV), 2021. 2 +[37] Qianyi Wu, Xian Liu, Yuedong Chen, Kejie Li, Chuanxia Zheng, Jianfei Cai, and Jianmin Zheng. Object-compositional neural implicit surfaces. In Proceedings of the European Conference on Computer Vision (ECCV), 2022. 2, 6 +[38] Binbin Xu, Wenbin Li, Dimos Tzoumanikas, Michael Bloesch, Andrew Davison, and Stefan Leutenegger. MID-Fusion: Octree-based object-level multi-instance dynamic slam. In Proceedings of the IEEE International Conference on Robotics and Automation (ICRA), 2019. 2 +[39] Bangbang Yang, Yinda Zhang, Yinghao Xu, Yijin Li, Han Zhou, Hujun Bao, Guofeng Zhang, and Zhaopeng Cui. Learning object-compositional neural radiance field for ed-itable scene rendering. In Proceedings of the International Conference on Computer Vision (ICCV), 2021. 2 +[40] Xingrui Yang, Hai Li, Hongjia Zhai, Yuhang Ming, Yuqian Liu, and Guofeng Zhang. Vox-Fusion: Dense tracking and + +mapping with voxel-based neural implicit representation. In Proceedings of the International Symposium on Mixed and Augmented Reality (ISMAR), 2022. 2 +[41] Zehao Yu, Songyou Peng, Michael Niemeyer, Torsten Sattler, and Andreas Geiger. Monosdf: Exploring monocular geometric cues for neural implicit surface reconstruction. Advances in Neural Information Processing Systems (NeurIPS), 2022. 8 +[42] Yu-Jie Yuan, Yang-Tian Sun, Yu-Kun Lai, Yuewen Ma, Rongfei Jia, and Lin Gao. Nerf-editing: geometry editing of neural radiance fields. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 4 +[43] Shuaifeng Zhi, Michael Bloesch, Stefan Leutenegger, and Andrew J Davison. SceneCode: Monocular dense semantic reconstruction using learned encoded scene representations. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2 +[44] Shuaifeng Zhi, Edgar Sucar, Andre Mouton, Iain Haughton, Tristan Laidlow, and Andrew J Davison. ilabel: Revealing objects in neural fields. IEEE Robotics and Automation Letters (RA-L), 2022. 2 +[45] Xingguang Zhong, Yue Pan, Jens Behley, and Cyril Stachniss. Shine-mapping: Large-scale 3d mapping using sparse hierarchical implicit neural representations. In Proceedings of the IEEE International Conference on Robotics and Automation (ICRA), 2023. 2 +[46] Qian-Yi Zhou and Vladlen Koltun. Dense scene reconstruction with points of interest. ACM Transactions on Graphics (ToG), 2013. 1, 6 +[47] Xingyi Zhou, Rohit Girdhar, Armand Joulin, Philipp Krahenbuhl, and Ishan Misra. Detecting twenty-thousand classes using image-level supervision. In Proceedings of the European Conference on Computer Vision (ECCV), 2022. 4, 7 +[48] Zihan Zhu, Songyou Peng, Viktor Larsson, Weiwei Xu, Hujun Bao, Zhaopeng Cui, Martin R Oswald, and Marc Pollefeys. Nice-slam: Neural implicit scalable encoding for slam. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2, 6, 7 \ No newline at end of file diff --git a/2023/vMAP_ Vectorised Object Mapping for Neural Field SLAM/images.zip b/2023/vMAP_ Vectorised Object Mapping for Neural Field SLAM/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..7f2480800aff9e929f063be7a9273af468e0d765 --- /dev/null +++ b/2023/vMAP_ Vectorised Object Mapping for Neural Field SLAM/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d2dae5fbeeb874f5a207a30a635255c2e60f1810473e7f7dea64c5ecb5590087 +size 690915 diff --git a/2023/vMAP_ Vectorised Object Mapping for Neural Field SLAM/layout.json b/2023/vMAP_ Vectorised Object Mapping for Neural Field SLAM/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..b789867b389f781a0b5a4294b8e139c4990a2ce4 --- /dev/null +++ b/2023/vMAP_ Vectorised Object Mapping for Neural Field SLAM/layout.json @@ -0,0 +1,7616 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 113, + 103, + 479, + 120 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 103, + 479, + 120 + ], + "spans": [ + { + "bbox": [ + 113, + 103, + 479, + 120 + ], + "type": "text", + "content": "vMAP: Vectorised Object Mapping for Neural Field SLAM" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 144, + 143, + 449, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 144, + 143, + 449, + 172 + ], + "spans": [ + { + "bbox": [ + 144, + 143, + 449, + 172 + ], + "type": "text", + "content": "Xin Kong Shikun Liu Marwan Taher Andrew J. Davison Dyson Robotics Lab, Imperial College London" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 136, + 174, + 454, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 136, + 174, + 454, + 185 + ], + "spans": [ + { + "bbox": [ + 136, + 174, + 454, + 185 + ], + "type": "text", + "content": "{x.kong21, shikun.liu17, m.taher, a.davison}@imperial.ac.uk" + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 50, + 213, + 547, + 322 + ], + "blocks": [ + { + "bbox": [ + 50, + 213, + 547, + 322 + ], + "lines": [ + { + "bbox": [ + 50, + 213, + 547, + 322 + ], + "spans": [ + { + "bbox": [ + 50, + 213, + 547, + 322 + ], + "type": "image", + "image_path": "7d9a6ae6db468eb20e0ac63efa0cdd689fbb21ad9fb2211e33edc8381b5de0f0.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 331, + 546, + 386 + ], + "lines": [ + { + "bbox": [ + 46, + 331, + 546, + 386 + ], + "spans": [ + { + "bbox": [ + 46, + 331, + 546, + 386 + ], + "type": "text", + "content": "Figure 1. vMAP automatically builds an object-level scene model from a real-time RGB-D input stream. Each object is represented by a separate MLP neural field model, all optimised in parallel via vectorised training. We use no 3D shape priors, but the MLP representation encourages object reconstruction to be watertight and complete, even when objects are partially observed or are heavily occluded in the input images. See for instance the separate reconstructions of the armchairs, sofas and cushions, which were mutually occluding each other, in this example from Replica." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 143, + 394, + 192, + 407 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 394, + 192, + 407 + ], + "spans": [ + { + "bbox": [ + 143, + 394, + 192, + 407 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 419, + 287, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 419, + 287, + 467 + ], + "spans": [ + { + "bbox": [ + 46, + 419, + 287, + 467 + ], + "type": "text", + "content": "We present vMAP, an object-level dense SLAM system using neural field representations. Each object is represented by a small MLP, enabling efficient, watertight object modelling without the need for 3D priors." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 45, + 468, + 288, + 587 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 468, + 288, + 587 + ], + "spans": [ + { + "bbox": [ + 45, + 468, + 288, + 587 + ], + "type": "text", + "content": "As an RGB-D camera browses a scene with no prior information, vMAP detects object instances on-the-fly, and dynamically adds them to its map. Specifically, thanks to the power of vectorised training, vMAP can optimise as many as 50 individual objects in a single scene, with an extremely efficient training speed of " + }, + { + "bbox": [ + 45, + 468, + 288, + 587 + ], + "type": "inline_equation", + "content": "5\\mathrm{Hz}" + }, + { + "bbox": [ + 45, + 468, + 288, + 587 + ], + "type": "text", + "content": " map update. We experimentally demonstrate significantly improved scene-level and object-level reconstruction quality compared to prior neural field SLAM systems. Project page: https://kxhit.github.io/vMAP." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 597, + 128, + 609 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 597, + 128, + 609 + ], + "spans": [ + { + "bbox": [ + 47, + 597, + 128, + 609 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 617, + 287, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 617, + 287, + 677 + ], + "spans": [ + { + "bbox": [ + 46, + 617, + 287, + 677 + ], + "type": "text", + "content": "For robotics and other interactive vision applications, an object-level model is arguably semantically optimal, with scene entities represented in a separated, composable way, but also efficiently focusing resources on what is important in an environment." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 46, + 677, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 288, + 713 + ], + "type": "text", + "content": "The key question in building an object-level mapping system is what level of prior information is known about the objects in a scene in order to segment, classify and re" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 396, + 546, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 396, + 546, + 479 + ], + "spans": [ + { + "bbox": [ + 304, + 396, + 546, + 479 + ], + "type": "text", + "content": "construct them. If no 3D object priors are available, then usually only the directly observed parts of objects can be reconstructed, leading to holes and missing parts [4, 46]. Prior object information such as CAD models or category-level shape space models enable full object shape estimation from partial views, but only for the subset of objects in a scene for which these models are available." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 483, + 547, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 483, + 547, + 615 + ], + "spans": [ + { + "bbox": [ + 304, + 483, + 547, + 615 + ], + "type": "text", + "content": "In this paper, we present a new approach which applies to the case where no 3D priors are available but still often enables watertight object reconstruction in realistic real-time scene scanning. Our system, vMAP, builds on the attractive properties shown by neural fields as a real-time scene representation [31], with efficient and complete representation of shape, but now reconstructs a separate tiny MLP model of each object. The key technical contribution of our work is to show that a large number of separate MLP object models can be simultaneously and efficiently optimised on a single GPU during live operation via vectorised training." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 617, + 547, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 617, + 547, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 617, + 547, + 714 + ], + "type": "text", + "content": "We show that we can achieve much more accurate and complete scene reconstruction by separately modelling objects, compared with using a similar number of weights in a single neural field model of the whole scene. Our real-time system is highly efficient in terms of both computation and memory, and we show that scenes with up to 50 objects can be mapped with 40KB per object of learned parameters across the multiple, independent object networks." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "spans": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "type": "text", + "content": "952" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 288, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 288, + 144 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 288, + 144 + ], + "type": "text", + "content": "We also demonstrate the flexibility of our disentangled object representation to enable recomposition of scenes with new object configurations. Extensive experiments have been conducted on both simulated and real-world datasets, showing state-of-the-art scene-level and object-level reconstruction performance." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 155, + 134, + 167 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 155, + 134, + 167 + ], + "spans": [ + { + "bbox": [ + 47, + 155, + 134, + 167 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 175, + 287, + 283 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 175, + 287, + 283 + ], + "spans": [ + { + "bbox": [ + 47, + 175, + 287, + 283 + ], + "type": "text", + "content": "This work follows in long series of efforts to build real-time scene representations which are decomposed into explicit rigid objects, with the promise of flexible and efficient scene representation and even the possibility to represent changing scenes. Different systems assumed varying types of representation and levels of prior knowledge, from CAD models [28], via category-level shape models [10, 11, 32, 36] to no prior shape knowledge, although in this case only the visible parts of objects could be reconstructed [15, 27, 38]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 288, + 288, + 492 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 288, + 288, + 492 + ], + "spans": [ + { + "bbox": [ + 47, + 288, + 288, + 492 + ], + "type": "text", + "content": "Neural Field SLAM Neural fields have recently been widely used as efficient, accurate and flexible representations of whole scenes [16, 17, 19, 22]. To adopt these representations into real-time SLAM systems, iMAP [31] demonstrated for the first time that a simple MLP network, incrementally trained with the aid of depth measurements from RGB-D sensors, can represent room-scaled 3D scenes in real-time. Some of iMAP's most interesting properties were its tendency to produce watertight reconstructions, even often plausibly completing the unobserved back of objects. These coherence properties of neural fields were particularly revealed when semantic output channels were added, as in SemanticNeRF [43] and iLabel [44], and were found to inherit the coherence. To make implicit representation more scalable and efficient, a group of implicit SLAM systems [25, 35, 40, 45, 48] fused neural fields with conventional volumetric representations." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 498, + 287, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 498, + 287, + 641 + ], + "spans": [ + { + "bbox": [ + 47, + 498, + 287, + 641 + ], + "type": "text", + "content": "Object Representations with Neural Fields However, obtaining individual object representations from these neural field methods is difficult, as the correspondences between network parameters and specific scene regions are complicated and difficult to determine. To tackle this, DeRF [23] decomposed a scene spatially and dedicated smaller networks to each decomposed part. Similarly, KiloNeRF [24] divided a scene into thousands of volumetric parts, each represented by a tiny MLP, and trained them in parallel with custom CUDA kernels to speed up NeRF. Different from KiloNeRF, vMAP decomposes the scene into objects which are semantically meaningful." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 642, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 642, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 642, + 288, + 715 + ], + "type": "text", + "content": "To represent multiple objects, ObjectNeRF [39] and ObjSDF [37] took pre-computed instance masks as additional input and conditioned object representation on learnable object activation code. But these methods are still trained offline and tangle object representations with the main scene network, so that they need to optimise the network weights" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 72, + 545, + 132 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 132 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 132 + ], + "type": "text", + "content": "with all object codes during training, and infer the whole network to get the shape of a desired object. This contrasts with vMAP which models objects individually, and is able to stop and resume training for any objects without any inter-object interference." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 133, + 546, + 287 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 133, + 546, + 287 + ], + "spans": [ + { + "bbox": [ + 304, + 133, + 546, + 287 + ], + "type": "text", + "content": "The recent work most similar to ours has used the attractive properties of neural field MLPs to represent single objects. The analysis in [5] explicitly evaluated the use of over-fit neural implicit networks as a 3D shape representation for graphics, considering that they should be taken seriously. The work in [1] furthered this analysis, showing how object representation was affected by different observation conditions, though using the hybrid Instant NGP rather than a single MLP representation, so it is not clear whether some object coherence properties would be lost. Finally, the CodeNeRF system [9] trained a NeRF conditioned on learnable object codes, again proving the attractive properties of neural fields to represent single objects." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 288, + 545, + 347 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 288, + 545, + 347 + ], + "spans": [ + { + "bbox": [ + 304, + 288, + 545, + 347 + ], + "type": "text", + "content": "We build on this work in our paper, but for the first time show that many individual neural field models making up a whole scene can be simultaneously trained within a real-time system, resulting in accurate and efficient representation of many-object scenes." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 305, + 358, + 545, + 387 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 358, + 545, + 387 + ], + "spans": [ + { + "bbox": [ + 305, + 358, + 545, + 387 + ], + "type": "text", + "content": "3. vMAP: An Efficient Object Mapping System with Vectorised Training" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 393, + 410, + 406 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 393, + 410, + 406 + ], + "spans": [ + { + "bbox": [ + 306, + 393, + 410, + 406 + ], + "type": "text", + "content": "3.1. System Overview" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 411, + 545, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 411, + 545, + 496 + ], + "spans": [ + { + "bbox": [ + 304, + 411, + 545, + 496 + ], + "type": "text", + "content": "We first introduce our detailed design for object-level mapping with efficient vectorised training (Section 3.2), and then explain our improved training strategies of pixel sampling and surface rendering (Section 3.3). Finally, we show how we may recompose and render a new scene with these learned object models (Section 3.4). An overview of our training and rendering pipeline is shown in Fig. 2." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 503, + 486, + 517 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 503, + 486, + 517 + ], + "spans": [ + { + "bbox": [ + 306, + 503, + 486, + 517 + ], + "type": "text", + "content": "3.2. Vectorised Object Level Mapping" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 521, + 546, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 521, + 546, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 521, + 546, + 713 + ], + "type": "text", + "content": "Object Initialisation and Association To start with, each frame is associated with densely labelled object masks. These object masks are either directly provided in the dataset, or predicted with an off-the-shelf 2D instance segmentation network. Since those predicted object masks have no temporal consistency across different frames, we perform object association between the previous and the current live frame, based on two criteria: i) Semantic Consistency: the object in the current frame is predicted as the same semantic class from the previous frame, and ii) Spatial Consistency: the object in the current frame is spatially close to the object in the previous frames, measured by the mean IoU of their 3D object bounds. When these two criteria are satisfied, we assume they are the same object instance and represent them with the same object model. Otherwise, they are different object instances and we initialise" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "spans": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "type": "text", + "content": "953" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 49, + 76, + 544, + 347 + ], + "blocks": [ + { + "bbox": [ + 49, + 76, + 544, + 347 + ], + "lines": [ + { + "bbox": [ + 49, + 76, + 544, + 347 + ], + "spans": [ + { + "bbox": [ + 49, + 76, + 544, + 347 + ], + "type": "image", + "image_path": "49bed62b879a9243dc8649cbc650b8273b3ece64d5e4b39a5abcf819e31b25fd.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 173, + 354, + 419, + 366 + ], + "lines": [ + { + "bbox": [ + 173, + 354, + 419, + 366 + ], + "spans": [ + { + "bbox": [ + 173, + 354, + 419, + 366 + ], + "type": "text", + "content": "Figure 2. An overview of training and rendering pipeline of vMAP." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 376, + 266, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 376, + 266, + 388 + ], + "spans": [ + { + "bbox": [ + 46, + 376, + 266, + 388 + ], + "type": "text", + "content": "a new object model and append it to the models stack." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 388, + 287, + 508 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 388, + 287, + 508 + ], + "spans": [ + { + "bbox": [ + 46, + 388, + 287, + 508 + ], + "type": "text", + "content": "For each object in a frame, we estimate its 3D object bound by its 3D point cloud, parameterised by its depth map and the camera pose. Camera tracking is externally provided by an off-the-shelf tracking system, which we found to be more accurate and robust compared to jointly optimising pose and geometry. If we detect the same object instance in a new frame, we merge its 3D point cloud from the previous frames to the current frame and re-estimate its 3D object bound. Therefore, these object bounds are dynamically updated and refined with more observations." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 514, + 287, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 514, + 287, + 586 + ], + "spans": [ + { + "bbox": [ + 46, + 514, + 287, + 586 + ], + "type": "text", + "content": "Object Supervision We apply object-level supervision only for pixels inside a 2D object bounding box, for maximal training efficiency. For those pixels within an object mask, we encourage the object radiance field to be occupied and supervise them with depth and colour loss. Otherwise we encourage the object radiance field to be empty." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 586, + 287, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 586, + 287, + 635 + ], + "spans": [ + { + "bbox": [ + 46, + 586, + 287, + 635 + ], + "type": "text", + "content": "Each object instance samples training pixels from its own independent keyframe buffer. Therefore, we have flexibility to stop or resume the training of any object, with no training interference between objects." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 641, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 641, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 641, + 288, + 714 + ], + "type": "text", + "content": "Vectorised Training Representing a neural field with multiple small networks can lead to efficient training, as shown in prior work [24]. In vMAP, all object models are of the same design, except for the background object which we represent with a slightly larger network. Therefore, we are able to stack these small object models together for vec" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 376, + 545, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 376, + 545, + 460 + ], + "spans": [ + { + "bbox": [ + 304, + 376, + 545, + 460 + ], + "type": "text", + "content": "torised training, leveraging the highly optimised vectorised operations in PyTorch [8]. Since multiple object models are batched and trained simultaneously as opposed to sequentially, we optimise the use of the available GPU resources. We show that vectorised training is an essential design element to the system, resulting in significantly improved training speed, further discussed in Section 4.3." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 305, + 465, + 446, + 479 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 465, + 446, + 479 + ], + "spans": [ + { + "bbox": [ + 305, + 465, + 446, + 479 + ], + "type": "text", + "content": "3.3. Neural Implicit Mapping" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 483, + 545, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 483, + 545, + 664 + ], + "spans": [ + { + "bbox": [ + 304, + 483, + 545, + 664 + ], + "type": "text", + "content": "Depth Guided Sampling Neural fields trained on RGB data only have no guarantee to model accurate object geometry, due to the fact that they are optimising for appearance rather than the geometry. To obtain more geometrically accurate object models, we benefit from the depth map available from an RGB-D sensor, providing a strong prior for learning the density field of 3D volumes. Specifically, we sample " + }, + { + "bbox": [ + 304, + 483, + 545, + 664 + ], + "type": "inline_equation", + "content": "N_{s}" + }, + { + "bbox": [ + 304, + 483, + 545, + 664 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 483, + 545, + 664 + ], + "type": "inline_equation", + "content": "N_{c}" + }, + { + "bbox": [ + 304, + 483, + 545, + 664 + ], + "type": "text", + "content": " points along each ray, for which " + }, + { + "bbox": [ + 304, + 483, + 545, + 664 + ], + "type": "inline_equation", + "content": "N_{s}" + }, + { + "bbox": [ + 304, + 483, + 545, + 664 + ], + "type": "text", + "content": " points are sampled with a Normal distribution centered around the surface " + }, + { + "bbox": [ + 304, + 483, + 545, + 664 + ], + "type": "inline_equation", + "content": "t_{s}" + }, + { + "bbox": [ + 304, + 483, + 545, + 664 + ], + "type": "text", + "content": " (from the depth map), with a small " + }, + { + "bbox": [ + 304, + 483, + 545, + 664 + ], + "type": "inline_equation", + "content": "d_{\\sigma}" + }, + { + "bbox": [ + 304, + 483, + 545, + 664 + ], + "type": "text", + "content": " variance, and " + }, + { + "bbox": [ + 304, + 483, + 545, + 664 + ], + "type": "inline_equation", + "content": "N_{c}" + }, + { + "bbox": [ + 304, + 483, + 545, + 664 + ], + "type": "text", + "content": " points are uniformly sampled between the camera " + }, + { + "bbox": [ + 304, + 483, + 545, + 664 + ], + "type": "inline_equation", + "content": "t_{n}" + }, + { + "bbox": [ + 304, + 483, + 545, + 664 + ], + "type": "text", + "content": " (the near bound) and the surface " + }, + { + "bbox": [ + 304, + 483, + 545, + 664 + ], + "type": "inline_equation", + "content": "t_{s}" + }, + { + "bbox": [ + 304, + 483, + 545, + 664 + ], + "type": "text", + "content": ", with a stratified sampling approach. When the depth measurement is invalid, the surface " + }, + { + "bbox": [ + 304, + 483, + 545, + 664 + ], + "type": "inline_equation", + "content": "t_{s}" + }, + { + "bbox": [ + 304, + 483, + 545, + 664 + ], + "type": "text", + "content": " is then replaced with the far bound " + }, + { + "bbox": [ + 304, + 483, + 545, + 664 + ], + "type": "inline_equation", + "content": "t_{f}" + }, + { + "bbox": [ + 304, + 483, + 545, + 664 + ], + "type": "text", + "content": ". Mathematically, we have:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 312, + 669, + 545, + 695 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 669, + 545, + 695 + ], + "spans": [ + { + "bbox": [ + 312, + 669, + 545, + 695 + ], + "type": "interline_equation", + "content": "t _ {i} \\sim \\mathcal {U} \\left(t _ {n} + \\frac {i - 1}{N _ {c}} \\left(t _ {s} - t _ {n}\\right), t _ {n} + \\frac {i}{N _ {c}} \\left(t _ {s} - t _ {n}\\right)\\right), \\tag {1}", + "image_path": "75284cb6dcd1849ad554fb2553471bc8251adf64211eced7b3d47cd08255ef58.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 696, + 545, + 711 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 696, + 545, + 711 + ], + "spans": [ + { + "bbox": [ + 313, + 696, + 545, + 711 + ], + "type": "interline_equation", + "content": "t _ {i} \\sim \\mathcal {N} \\left(t _ {s}, d _ {\\sigma} ^ {2}\\right). \\tag {2}", + "image_path": "bfad9846d1cd4bc803f1ab4e0e6ddbd90f5ef15b78e069b587baaeda84aaa82a.jpg" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 297, + 748, + 313, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 748, + 313, + 757 + ], + "spans": [ + { + "bbox": [ + 297, + 748, + 313, + 757 + ], + "type": "text", + "content": "954" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "content": "We choose " + }, + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "inline_equation", + "content": "d_{\\sigma} = 3cm" + }, + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "content": " which works well in our implementation. We observe that training more points near the surface helps to guide the object models to quickly focus on representing accurate object geometry." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 126, + 289, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 126, + 289, + 269 + ], + "spans": [ + { + "bbox": [ + 46, + 126, + 289, + 269 + ], + "type": "text", + "content": "Surface and Volume Rendering As we are concerned more by 3D surface reconstruction than 2D rendering, we omit the viewing direction from the network input, and model object visibility with a binary indicator (no transparent objects). With similar motivation to UniSURF [21], we parameterise the occupancy probability of a 3D point " + }, + { + "bbox": [ + 46, + 126, + 289, + 269 + ], + "type": "inline_equation", + "content": "x_{i}" + }, + { + "bbox": [ + 46, + 126, + 289, + 269 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 46, + 126, + 289, + 269 + ], + "type": "inline_equation", + "content": "o_{\\theta}(x_i)\\rightarrow [0,1]" + }, + { + "bbox": [ + 46, + 126, + 289, + 269 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 46, + 126, + 289, + 269 + ], + "type": "inline_equation", + "content": "o_{\\theta}" + }, + { + "bbox": [ + 46, + 126, + 289, + 269 + ], + "type": "text", + "content": " is a continuous occupancy field. Therefore, the termination probability at point " + }, + { + "bbox": [ + 46, + 126, + 289, + 269 + ], + "type": "inline_equation", + "content": "x_{i}" + }, + { + "bbox": [ + 46, + 126, + 289, + 269 + ], + "type": "text", + "content": " along ray " + }, + { + "bbox": [ + 46, + 126, + 289, + 269 + ], + "type": "inline_equation", + "content": "\\mathbf{r}" + }, + { + "bbox": [ + 46, + 126, + 289, + 269 + ], + "type": "text", + "content": " becomes " + }, + { + "bbox": [ + 46, + 126, + 289, + 269 + ], + "type": "inline_equation", + "content": "T_{i} = o(x_{i})\\prod_{j < i}(1 - o(x_{j}))" + }, + { + "bbox": [ + 46, + 126, + 289, + 269 + ], + "type": "text", + "content": ", indicating that no occupied samples " + }, + { + "bbox": [ + 46, + 126, + 289, + 269 + ], + "type": "inline_equation", + "content": "x_{j}" + }, + { + "bbox": [ + 46, + 126, + 289, + 269 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 46, + 126, + 289, + 269 + ], + "type": "inline_equation", + "content": "j < i" + }, + { + "bbox": [ + 46, + 126, + 289, + 269 + ], + "type": "text", + "content": " exist before " + }, + { + "bbox": [ + 46, + 126, + 289, + 269 + ], + "type": "inline_equation", + "content": "x_{i}" + }, + { + "bbox": [ + 46, + 126, + 289, + 269 + ], + "type": "text", + "content": ". The corresponding rendered occupancy, depth and colour are defined as follows:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 277, + 287, + 310 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 277, + 287, + 310 + ], + "spans": [ + { + "bbox": [ + 53, + 277, + 287, + 310 + ], + "type": "interline_equation", + "content": "\\hat {O} (\\mathbf {r}) = \\sum_ {i = 1} ^ {N} T _ {i}, \\hat {D} (\\mathbf {r}) = \\sum_ {i = 1} ^ {N} T _ {i} d _ {i}, \\hat {C} (\\mathbf {r}) = \\sum_ {i = 1} ^ {N} T _ {i} c _ {i}. \\tag {3}", + "image_path": "b29a268e36acff7cb429a2e12ca90a31b481e536b27648934bd6c14c446d8d5d.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 318, + 287, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 318, + 287, + 390 + ], + "spans": [ + { + "bbox": [ + 46, + 318, + 287, + 390 + ], + "type": "text", + "content": "Training Objective For each object " + }, + { + "bbox": [ + 46, + 318, + 287, + 390 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 46, + 318, + 287, + 390 + ], + "type": "text", + "content": ", we only sample training pixels inside that object's 2D bounding box, denoted by " + }, + { + "bbox": [ + 46, + 318, + 287, + 390 + ], + "type": "inline_equation", + "content": "\\mathcal{R}^k" + }, + { + "bbox": [ + 46, + 318, + 287, + 390 + ], + "type": "text", + "content": ", and only optimise depth and colour for pixels inside its 2D object mask, denoted by " + }, + { + "bbox": [ + 46, + 318, + 287, + 390 + ], + "type": "inline_equation", + "content": "M^k" + }, + { + "bbox": [ + 46, + 318, + 287, + 390 + ], + "type": "text", + "content": ". Note that it is always true that " + }, + { + "bbox": [ + 46, + 318, + 287, + 390 + ], + "type": "inline_equation", + "content": "M^k \\subset \\mathcal{R}^k" + }, + { + "bbox": [ + 46, + 318, + 287, + 390 + ], + "type": "text", + "content": ". The depth, colour and occupancy loss for the object " + }, + { + "bbox": [ + 46, + 318, + 287, + 390 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 46, + 318, + 287, + 390 + ], + "type": "text", + "content": " are defined as follows:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 97, + 399, + 287, + 426 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 399, + 287, + 426 + ], + "spans": [ + { + "bbox": [ + 97, + 399, + 287, + 426 + ], + "type": "interline_equation", + "content": "L _ {d e p t h} ^ {k} = M ^ {k} \\odot \\sum_ {\\mathbf {r} \\in R ^ {k}} | \\hat {D} (\\mathbf {r}) - D (\\mathbf {r}) |, \\tag {4}", + "image_path": "66af1736a3f6dc69b6525005a8d0a0756880e441961f3c5b151527bc8c0df828.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 96, + 428, + 287, + 454 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 428, + 287, + 454 + ], + "spans": [ + { + "bbox": [ + 96, + 428, + 287, + 454 + ], + "type": "interline_equation", + "content": "L _ {\\text {c o l o u r}} ^ {k} = M ^ {k} \\odot \\sum_ {\\mathbf {r} \\in R ^ {k}} | \\hat {C} (\\mathbf {r}) - C (\\mathbf {r}) |, \\tag {5}", + "image_path": "67b76b6b89d586fb26733a97478fc5a1427057caeacc3c64d4bcc2d0df919249.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 81, + 456, + 287, + 483 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 456, + 287, + 483 + ], + "spans": [ + { + "bbox": [ + 81, + 456, + 287, + 483 + ], + "type": "interline_equation", + "content": "L _ {\\text {o c c u p a n c y}} ^ {k} = \\sum_ {\\mathbf {r} \\in R ^ {k}} | \\hat {O} (\\mathbf {r}) - M ^ {k} (\\mathbf {r}) |. \\tag {6}", + "image_path": "e401f775f8f477edd4877ea98e30c4ff422e0f6566dd19605d476bef36ec599c.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 492, + 287, + 516 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 492, + 287, + 516 + ], + "spans": [ + { + "bbox": [ + 46, + 492, + 287, + 516 + ], + "type": "text", + "content": "The overall training objective then accumulates losses for all " + }, + { + "bbox": [ + 46, + 492, + 287, + 516 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 46, + 492, + 287, + 516 + ], + "type": "text", + "content": " objects:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 58, + 525, + 287, + 558 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 525, + 287, + 558 + ], + "spans": [ + { + "bbox": [ + 58, + 525, + 287, + 558 + ], + "type": "interline_equation", + "content": "L = \\sum_ {k = 1} ^ {K} L _ {\\text {d e p t h}} ^ {k} + \\lambda_ {1} \\cdot L _ {\\text {c o l o u r}} ^ {k} + \\lambda_ {2} \\cdot L _ {\\text {o c c u p a n c y}} ^ {k}. \\tag {7}", + "image_path": "cea808114ea749c37d1d638db1e0336529250adede151a11d7f576b7fa900809.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 567, + 287, + 591 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 567, + 287, + 591 + ], + "spans": [ + { + "bbox": [ + 46, + 567, + 287, + 591 + ], + "type": "text", + "content": "We choose loss weightings " + }, + { + "bbox": [ + 46, + 567, + 287, + 591 + ], + "type": "inline_equation", + "content": "\\lambda_1 = 5" + }, + { + "bbox": [ + 46, + 567, + 287, + 591 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 567, + 287, + 591 + ], + "type": "inline_equation", + "content": "\\lambda_{2} = 10" + }, + { + "bbox": [ + 46, + 567, + 287, + 591 + ], + "type": "text", + "content": ", which we found to work well in our experiments." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 599, + 220, + 612 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 599, + 220, + 612 + ], + "spans": [ + { + "bbox": [ + 47, + 599, + 220, + 612 + ], + "type": "text", + "content": "3.4. Compositional Scene Rendering" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 618, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 618, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 618, + 288, + 714 + ], + "type": "text", + "content": "Since vMAP represents objects in a purely disentangled representation space, we can obtain each 3D object by querying within its estimated 3D object bounds and easily manipulate it. For 2D novel view synthesis, we use the Ray-Box Intersection algorithm [14] to calculate near and far bounds for each object, and then rank rendered depths along each ray to achieve occlusion-aware scene-level rendering. This disentangled representation also opens up other types" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 72, + 545, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 120 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 120 + ], + "type": "text", + "content": "of fine-grained object-level manipulation, such as changing object shape or textures by conditioning on disentangled pre-trained feature fields [20, 42], which we consider as an interesting future direction." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 128, + 387, + 142 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 128, + 387, + 142 + ], + "spans": [ + { + "bbox": [ + 305, + 128, + 387, + 142 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 148, + 545, + 256 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 148, + 545, + 256 + ], + "spans": [ + { + "bbox": [ + 304, + 148, + 545, + 256 + ], + "type": "text", + "content": "We have comprehensively evaluated vMAP on a range of different datasets, which include both simulated and real-world sequences, with and without ground-truth object masks and poses. For all datasets, we qualitatively compare our system to prior state-of-the-art SLAM frameworks on 2D and 3D scene-level and object-level rendering. We further quantitatively compare these systems in datasets where ground-truth meshes are available. Please see our attached supplementary material for more results." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 305, + 263, + 422, + 277 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 263, + 422, + 277 + ], + "spans": [ + { + "bbox": [ + 305, + 263, + 422, + 277 + ], + "type": "text", + "content": "4.1. Experimental Setup" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 281, + 545, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 281, + 545, + 365 + ], + "spans": [ + { + "bbox": [ + 304, + 281, + 545, + 365 + ], + "type": "text", + "content": "Datasets We evaluated on Replica [29], ScanNet [3], and TUM RGB-D [6]. Each dataset contains sequences with different levels of quality in object masks, depth and pose measurements. Additionally, we also showed vMAP's performance in complex real-world with self-captured video sequences recorded by an Azure Kinect RGB-D camera. An overview of these datasets is shown in Tab. 1." + } + ] + } + ], + "index": 16 + }, + { + "type": "table", + "bbox": [ + 307, + 372, + 545, + 434 + ], + "blocks": [ + { + "bbox": [ + 307, + 372, + 545, + 434 + ], + "lines": [ + { + "bbox": [ + 307, + 372, + 545, + 434 + ], + "spans": [ + { + "bbox": [ + 307, + 372, + 545, + 434 + ], + "type": "table", + "html": "
Object MasksDepth QualityPose Estimation
ReplicaPerfect GTPerfect GTPerfect GT
ScanNetNoisyNoisyPerfect GT
TUM RGB-DDeticNoisyORB-SLAM3
Our RecordingDeticNoisyORB-SLAM3
", + "image_path": "8c8023181474d15da3baad08fe14767bf86a356b8237a2cb0ff515a2c4a85e99.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "table_body" + } + ], + "index": 17 + }, + { + "bbox": [ + 339, + 442, + 511, + 453 + ], + "lines": [ + { + "bbox": [ + 339, + 442, + 511, + 453 + ], + "spans": [ + { + "bbox": [ + 339, + 442, + 511, + 453 + ], + "type": "text", + "content": "Table 1. An overview of datasets we evaluated." + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 304, + 457, + 545, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 457, + 545, + 517 + ], + "spans": [ + { + "bbox": [ + 304, + 457, + 545, + 517 + ], + "type": "text", + "content": "Datasets with perfect ground-truth information represent the upper-bound performance of our system. We expect vMAP's performance in the real-world setting can be further improved, when coupled with a better instance segmentation and pose estimation framework." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 521, + 545, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 521, + 545, + 629 + ], + "spans": [ + { + "bbox": [ + 304, + 521, + 545, + 629 + ], + "type": "text", + "content": "Implementation Details We conduct all experiments on a desktop PC with a 3.60 GHz i7-11700K CPU and a single Nvidia RTX 3090 GPU. We choose our instance segmentation detector to be Detic [47], pre-trained on an open-vocabulary LVIS dataset [7] which contains more than 1000 object classes. We choose our pose estimation framework to be ORB-SLAM3 [2], for its fast and accurate tracking performance. We continuously update the keyframe poses using the latest estimates from ORB-SLAM3." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 630, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 630, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 630, + 545, + 713 + ], + "type": "text", + "content": "We applied the same set of hyper-parameters for all datasets. Both our object and background model use 4-layer MLPs, with each layer having hidden size 32 (object) and 128 (background). For object / background, we selected keyframes every " + }, + { + "bbox": [ + 304, + 630, + 545, + 713 + ], + "type": "inline_equation", + "content": "25/50" + }, + { + "bbox": [ + 304, + 630, + 545, + 713 + ], + "type": "text", + "content": " frames, " + }, + { + "bbox": [ + 304, + 630, + 545, + 713 + ], + "type": "inline_equation", + "content": "120/1200" + }, + { + "bbox": [ + 304, + 630, + 545, + 713 + ], + "type": "text", + "content": " rays each training step with 10 points per ray. The number of objects in a scene typically varies between 20 and 70, among which the" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 297, + 748, + 312, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 748, + 312, + 757 + ], + "spans": [ + { + "bbox": [ + 297, + 748, + 312, + 757 + ], + "type": "text", + "content": "955" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 48, + 70, + 545, + 159 + ], + "blocks": [ + { + "bbox": [ + 48, + 70, + 545, + 159 + ], + "lines": [ + { + "bbox": [ + 48, + 70, + 545, + 159 + ], + "spans": [ + { + "bbox": [ + 48, + 70, + 545, + 159 + ], + "type": "table", + "html": "
TSDF-Fusion*iMAPiMAP*NICE-SLAMNICE-SLAM*vMAP
Scene Acc. [cm] ↓1.284.432.152.943.043.20
Scene Comp. [cm] ↓5.615.562.884.023.842.39
Scene Comp. Ratio [<5cm %] ↑82.6779.0690.8586.7386.5292.99
Object Acc. [cm] ↓0.45-3.57-3.912.23
Object Comp. [cm] ↓3.69-2.38-3.271.44
Object Comp. Ratio [<5cm %] ↑82.98-90.19-83.9794.55
Object Comp. Ratio [<1cm %] ↑61.70-47.79-37.7969.23
", + "image_path": "b85e48cf1d501b1055e72fdcec21f287b7f09c09800f95098e6a6d7e04ea85e5.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 161, + 537, + 173 + ], + "lines": [ + { + "bbox": [ + 55, + 161, + 537, + 173 + ], + "spans": [ + { + "bbox": [ + 55, + 161, + 537, + 173 + ], + "type": "text", + "content": "Table 2. Averaged reconstruction results for 8 indoor Replica scenes. * represents the baselines we re-trained with ground-truth pose." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 59, + 177, + 548, + 663 + ], + "blocks": [ + { + "bbox": [ + 59, + 177, + 548, + 663 + ], + "lines": [ + { + "bbox": [ + 59, + 177, + 548, + 663 + ], + "spans": [ + { + "bbox": [ + 59, + 177, + 548, + 663 + ], + "type": "image", + "image_path": "101e2a669703e2eb7a292e30351473b042a8c2f462df91ce1ab39684fcaab209.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 667, + 547, + 690 + ], + "lines": [ + { + "bbox": [ + 46, + 667, + 547, + 690 + ], + "spans": [ + { + "bbox": [ + 46, + 667, + 547, + 690 + ], + "type": "text", + "content": "Figure 3. Scene reconstruction for 4 selected Replica scenes. Interesting regions are highlighted with coloured boxes, showing vMAP's significantly improved reconstruction quality. All scene meshes are provided by the original authors." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "spans": [ + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "type": "text", + "content": "956" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 58, + 85, + 124, + 146 + ], + "blocks": [ + { + "bbox": [ + 60, + 69, + 119, + 79 + ], + "lines": [ + { + "bbox": [ + 60, + 69, + 119, + 79 + ], + "spans": [ + { + "bbox": [ + 60, + 69, + 119, + 79 + ], + "type": "text", + "content": "TSDF-Fusion" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 58, + 85, + 124, + 146 + ], + "lines": [ + { + "bbox": [ + 58, + 85, + 124, + 146 + ], + "spans": [ + { + "bbox": [ + 58, + 85, + 124, + 146 + ], + "type": "image", + "image_path": "a9842deedfad70f6912b9dc18e83685a385d58d49881a8cf0405ca4dff393cc4.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 143, + 86, + 206, + 145 + ], + "blocks": [ + { + "bbox": [ + 157, + 70, + 192, + 80 + ], + "lines": [ + { + "bbox": [ + 157, + 70, + 192, + 80 + ], + "spans": [ + { + "bbox": [ + 157, + 70, + 192, + 80 + ], + "type": "text", + "content": "ObjSDF" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 143, + 86, + 206, + 145 + ], + "lines": [ + { + "bbox": [ + 143, + 86, + 206, + 145 + ], + "spans": [ + { + "bbox": [ + 143, + 86, + 206, + 145 + ], + "type": "image", + "image_path": "e396859132901ab9dfabd570622f2f6fb3f929f13a7f21b7f37f97587bb9a85f.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 227, + 85, + 296, + 146 + ], + "blocks": [ + { + "bbox": [ + 245, + 70, + 272, + 79 + ], + "lines": [ + { + "bbox": [ + 245, + 70, + 272, + 79 + ], + "spans": [ + { + "bbox": [ + 245, + 70, + 272, + 79 + ], + "type": "text", + "content": "vMAP" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 227, + 85, + 296, + 146 + ], + "lines": [ + { + "bbox": [ + 227, + 85, + 296, + 146 + ], + "spans": [ + { + "bbox": [ + 227, + 85, + 296, + 146 + ], + "type": "image", + "image_path": "b8e4699eb72f2854f3a10949d247f924f14fa4123d1d68ffc12e41c57151bc78.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 307, + 88, + 376, + 143 + ], + "blocks": [ + { + "bbox": [ + 314, + 70, + 370, + 79 + ], + "lines": [ + { + "bbox": [ + 314, + 70, + 370, + 79 + ], + "spans": [ + { + "bbox": [ + 314, + 70, + 370, + 79 + ], + "type": "text", + "content": "TSDF-Fusion" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 307, + 88, + 376, + 143 + ], + "lines": [ + { + "bbox": [ + 307, + 88, + 376, + 143 + ], + "spans": [ + { + "bbox": [ + 307, + 88, + 376, + 143 + ], + "type": "image", + "image_path": "a4c957d7487883bc99b0592446b261e5d59ea70b3fa1d39744b892b2ffcdec25.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 389, + 96, + 460, + 145 + ], + "blocks": [ + { + "bbox": [ + 408, + 70, + 443, + 80 + ], + "lines": [ + { + "bbox": [ + 408, + 70, + 443, + 80 + ], + "spans": [ + { + "bbox": [ + 408, + 70, + 443, + 80 + ], + "type": "text", + "content": "ObjSDF" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 389, + 96, + 460, + 145 + ], + "lines": [ + { + "bbox": [ + 389, + 96, + 460, + 145 + ], + "spans": [ + { + "bbox": [ + 389, + 96, + 460, + 145 + ], + "type": "image", + "image_path": "b7880ce3b18f3d1c5dbbc19679d80157c707e03397a7e9074e8b2fb3b8b1fc3a.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 476, + 87, + 543, + 144 + ], + "blocks": [ + { + "bbox": [ + 496, + 70, + 523, + 79 + ], + "lines": [ + { + "bbox": [ + 496, + 70, + 523, + 79 + ], + "spans": [ + { + "bbox": [ + 496, + 70, + 523, + 79 + ], + "type": "text", + "content": "vMAP" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 476, + 87, + 543, + 144 + ], + "lines": [ + { + "bbox": [ + 476, + 87, + 543, + 144 + ], + "spans": [ + { + "bbox": [ + 476, + 87, + 543, + 144 + ], + "type": "image", + "image_path": "b5e3c4667ee120a8345d89b107c58717fbc96a6d55eabb194c397d5d75ef5a4b.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 58, + 163, + 118, + 216 + ], + "blocks": [ + { + "bbox": [ + 58, + 163, + 118, + 216 + ], + "lines": [ + { + "bbox": [ + 58, + 163, + 118, + 216 + ], + "spans": [ + { + "bbox": [ + 58, + 163, + 118, + 216 + ], + "type": "image", + "image_path": "29fbad12d300d6b7e9635941ce1a5f178cef86cb70174de42a002978c7eac868.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 228, + 544, + 251 + ], + "lines": [ + { + "bbox": [ + 46, + 228, + 544, + 251 + ], + "spans": [ + { + "bbox": [ + 46, + 228, + 544, + 251 + ], + "type": "text", + "content": "Figure 4. Visualisation of object reconstructions with vMAP compared to TSDF-Fusion and ObjSDF. Note that all object reconstructions from ObjSDF require much longer off-line training. All object meshes from ObjSDF are provided by the original authors." + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 143, + 164, + 202, + 216 + ], + "blocks": [ + { + "bbox": [ + 143, + 164, + 202, + 216 + ], + "lines": [ + { + "bbox": [ + 143, + 164, + 202, + 216 + ], + "spans": [ + { + "bbox": [ + 143, + 164, + 202, + 216 + ], + "type": "image", + "image_path": "2300ca72cea5e322f566e720f4d69662710bd3a1e4589e3e47bfb276192fb8f9.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 226, + 163, + 286, + 216 + ], + "blocks": [ + { + "bbox": [ + 226, + 163, + 286, + 216 + ], + "lines": [ + { + "bbox": [ + 226, + 163, + 286, + 216 + ], + "spans": [ + { + "bbox": [ + 226, + 163, + 286, + 216 + ], + "type": "image", + "image_path": "4c282363d906f8363f25d7ec23f2024ca14ced2b0d17b05d3173a5a9ff8ffefe.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 302, + 161, + 364, + 215 + ], + "blocks": [ + { + "bbox": [ + 302, + 161, + 364, + 215 + ], + "lines": [ + { + "bbox": [ + 302, + 161, + 364, + 215 + ], + "spans": [ + { + "bbox": [ + 302, + 161, + 364, + 215 + ], + "type": "image", + "image_path": "8853dd08f1584336f224ba73c4c14cf3bc140d00fa4b61c25390aba4adb59fb4.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 395, + 163, + 446, + 215 + ], + "blocks": [ + { + "bbox": [ + 395, + 163, + 446, + 215 + ], + "lines": [ + { + "bbox": [ + 395, + 163, + 446, + 215 + ], + "spans": [ + { + "bbox": [ + 395, + 163, + 446, + 215 + ], + "type": "image", + "image_path": "27d589b3fcea57ac72a942113af6ae1386492d2daa6d632a49bfcb817f7621ae.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 476, + 161, + 531, + 214 + ], + "blocks": [ + { + "bbox": [ + 476, + 161, + 531, + 214 + ], + "lines": [ + { + "bbox": [ + 476, + 161, + 531, + 214 + ], + "spans": [ + { + "bbox": [ + 476, + 161, + 531, + 214 + ], + "type": "image", + "image_path": "9bcccd19d754b2561327fa0deb36a0f583910aca2b025998b03b4de2c2c836d3.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 56, + 269, + 303, + 399 + ], + "blocks": [ + { + "bbox": [ + 149, + 258, + 203, + 267 + ], + "lines": [ + { + "bbox": [ + 149, + 258, + 203, + 267 + ], + "spans": [ + { + "bbox": [ + 149, + 258, + 203, + 267 + ], + "type": "text", + "content": "NICE-SLAM*" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 56, + 269, + 303, + 399 + ], + "lines": [ + { + "bbox": [ + 56, + 269, + 303, + 399 + ], + "spans": [ + { + "bbox": [ + 56, + 269, + 303, + 399 + ], + "type": "image", + "image_path": "92d81487f672b50fa6d13830f40ba428f6d7cb797b6e24806c8956c98e23d47e.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 407, + 544, + 430 + ], + "lines": [ + { + "bbox": [ + 46, + 407, + 544, + 430 + ], + "spans": [ + { + "bbox": [ + 46, + 407, + 544, + 430 + ], + "type": "text", + "content": "Figure 5. Visualisation of scene reconstruction from NICE-SLAM* (left) and vMAP (right) in a selected ScanNet sequence. Interesting regions are zoomed in. NICE-SLAM* was re-trained with ground-truth poses." + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 310, + 269, + 557, + 399 + ], + "blocks": [ + { + "bbox": [ + 417, + 258, + 443, + 267 + ], + "lines": [ + { + "bbox": [ + 417, + 258, + 443, + 267 + ], + "spans": [ + { + "bbox": [ + 417, + 258, + 443, + 267 + ], + "type": "text", + "content": "vMAP" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 310, + 269, + 557, + 399 + ], + "lines": [ + { + "bbox": [ + 310, + 269, + 557, + 399 + ], + "spans": [ + { + "bbox": [ + 310, + 269, + 557, + 399 + ], + "type": "image", + "image_path": "8a02031b70e79897803f030b8c695f33b451a8a63e57fef24dc1d533df2dba8a.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + } + ], + "index": 22 + }, + { + "bbox": [ + 46, + 441, + 287, + 466 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 441, + 287, + 466 + ], + "spans": [ + { + "bbox": [ + 46, + 441, + 287, + 466 + ], + "type": "text", + "content": "largest number of objects are in Replica and ScanNet scenes with an average of 50 objects per scene." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 46, + 472, + 287, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 472, + 287, + 555 + ], + "spans": [ + { + "bbox": [ + 46, + 472, + 287, + 555 + ], + "type": "text", + "content": "Metrics Following the convention of prior work [31, 48], we adopt Accuracy, Completion, and Completion Ratio for 3D scene-level reconstruction metrics. Besides, we note that such scene-level metrics are heavily biased towards the reconstruction of large objects like walls and floors. Therefore, we additionally provide these metrics at the object-level, by averaging metrics for all objects in each scene." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 47, + 563, + 287, + 575 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 563, + 287, + 575 + ], + "spans": [ + { + "bbox": [ + 47, + 563, + 287, + 575 + ], + "type": "text", + "content": "4.2. Evaluation on Scene and Object Reconstruction" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 46, + 582, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 582, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 582, + 287, + 713 + ], + "type": "text", + "content": "Results on Replica We experimented on 8 Replica scenes, using the rendered trajectories provided in [31], with 2000 RGB-D frames in each scene. Tab. 2 shows the averaged quantitative reconstruction results in these Replica indoor sequences. For scene-level reconstruction, we compared with TSDF-Fusion [46], iMAP [31] and NICE-SLAM [48]. To isolate reconstruction, we also provided results for these baselines re-trained with ground-truth pose (marked with " + }, + { + "bbox": [ + 46, + 582, + 287, + 713 + ], + "type": "inline_equation", + "content": "*" + }, + { + "bbox": [ + 46, + 582, + 287, + 713 + ], + "type": "text", + "content": "), with their open-sourced code for the fair comparison. Specifically, iMAP* was implemented as a special case of vMAP, when considering the entire scene" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 305, + 442, + 544, + 466 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 442, + 544, + 466 + ], + "spans": [ + { + "bbox": [ + 305, + 442, + 544, + 466 + ], + "type": "text", + "content": "as one object instance. For object-level reconstruction, we compared baselines trained with ground-truth pose." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 304, + 467, + 545, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 467, + 545, + 574 + ], + "spans": [ + { + "bbox": [ + 304, + 467, + 545, + 574 + ], + "type": "text", + "content": "vMAP's significant advantage thanks to object-level representation is to reconstruct tiny objects and objects with fine-grained details. Noticeably, vMAP achieved more than " + }, + { + "bbox": [ + 304, + 467, + 545, + 574 + ], + "type": "inline_equation", + "content": "50 - 70\\%" + }, + { + "bbox": [ + 304, + 467, + 545, + 574 + ], + "type": "text", + "content": " improvement over iMAP and NICE-SLAM for object-level completion. The scene reconstructions of 4 selected Replica sequences are shown in Fig. 3, with interesting regions highlighted in coloured boxes. The quantitative results for 2D novel view rendering are further provided in the supplementary material." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 304, + 582, + 546, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 582, + 546, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 582, + 546, + 713 + ], + "type": "text", + "content": "Results on ScanNet To evaluate on a more challenging setting, we experimented on ScanNet [3], a dataset composed of real scenes, with much noisier ground-truth depth maps and object masks. We choose a ScanNet sequence selected by ObjSDF [37], and we compared with TSDF-Fusion and ObjSDF for object-level reconstruction, and we compared with NICE-SLAM (re-trained with ground-truth pose) for scene-level reconstruction. Unlike ObjSDF, which was optimised from pre-selected posed images without depth for much longer off-line training, we ran both vMAP and TSDF-Fusion in an online setting with depth. As" + } + ] + } + ], + "index": 30 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "spans": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "type": "text", + "content": "957" + } + ] + } + ], + "index": 31 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 53, + 72, + 286, + 177 + ], + "blocks": [ + { + "bbox": [ + 53, + 72, + 286, + 177 + ], + "lines": [ + { + "bbox": [ + 53, + 72, + 286, + 177 + ], + "spans": [ + { + "bbox": [ + 53, + 72, + 286, + 177 + ], + "type": "image", + "image_path": "e863167255c280c7952889e039ef2ad0b785edb9b06e5906579d211e933c372f.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 47, + 185, + 287, + 218 + ], + "lines": [ + { + "bbox": [ + 47, + 185, + 287, + 218 + ], + "spans": [ + { + "bbox": [ + 47, + 185, + 287, + 218 + ], + "type": "text", + "content": "Figure 6. Visualisation of scene reconstruction from TSDF-Fusion (left) and vMAP (right) in a selected TUM RGB-D sequence, trained in real time for 99 seconds." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 50, + 232, + 282, + 284 + ], + "blocks": [ + { + "bbox": [ + 50, + 232, + 282, + 284 + ], + "lines": [ + { + "bbox": [ + 50, + 232, + 282, + 284 + ], + "spans": [ + { + "bbox": [ + 50, + 232, + 282, + 284 + ], + "type": "table", + "html": "
ATE RMSE [cm]↓iMAPNICE-SLAMvMAPORB-SLAM2
fr1/desk4.92.72.61.6
fr2.xyz2.01.81.60.4
fr3/office5.83.03.01.0
", + "image_path": "7b2112da8c8339a2b1a99d0b09b3bb8bf0c359ec2dc9484e7b2e70a52e4661bf.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 314, + 287, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 314, + 287, + 387 + ], + "spans": [ + { + "bbox": [ + 46, + 314, + 287, + 387 + ], + "type": "text", + "content": "shown in Fig. 4, we see that vMAP generates objects with more coherent geometry than TSDF-Fusion; and with much finer details than ObjSDF, though with a much shorter training time. And consistently, we can see that vMAP generates much sharper object boundaries and textures compared to NICE-SLAM, as shown in Fig. 5." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 400, + 287, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 400, + 287, + 592 + ], + "spans": [ + { + "bbox": [ + 46, + 400, + 287, + 592 + ], + "type": "text", + "content": "Results on TUM RGB-D We evaluated on a TUM RGB-D sequence captured in the real-world, with object masks predicted by an off-the-shelf pre-trained instance segmentation network [47], and poses estimated by ORB-SLAM3 [2]. Since our object detector has no spatio-temporal consistency, we found that the same object can be occasionally detected as two different instances, which leads to some reconstruction artifacts. For example, the object ' globe' shown in Fig. 6 was also detected as 'balloon' in some frames, resulting the 'splitting' artifacts in the final object reconstruction. Overall, vMAP still predicts more coherent reconstruction for most objects in a scene, with realistic hole-filling capabilities compared to TSDF-Fusion. However, we acknowledge that the completion of complete out-of-view regions (e.g., the back of a chair) is beyond the reach of our system due to the lack of general 3D prior." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 594, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 594, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 594, + 287, + 714 + ], + "type": "text", + "content": "Though our work focuses more on mapping performance than pose estimation, we also report ATE RMSE [30] in Tab. 3 following [31,48], by jointly optimising camera pose with map. We can observe that vMAP achieves superior performance, due to the fact that reconstruction and tracking quality are typically highly interdependent. However, there is a noticeable performance gap compared to ORBSLAM. As such, we directly choose ORB-SLAM as our external tracking system, which leads to faster training speed, cleaner implementation, and higher tracking quality." + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 309, + 71, + 544, + 225 + ], + "blocks": [ + { + "bbox": [ + 309, + 71, + 544, + 225 + ], + "lines": [ + { + "bbox": [ + 309, + 71, + 544, + 225 + ], + "spans": [ + { + "bbox": [ + 309, + 71, + 544, + 225 + ], + "type": "image", + "image_path": "1b340c53001a5cd2b483109310669dbbe01b840f94a625255a7df53bc5d227e8.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 235, + 545, + 269 + ], + "lines": [ + { + "bbox": [ + 305, + 235, + 545, + 269 + ], + "spans": [ + { + "bbox": [ + 305, + 235, + 545, + 269 + ], + "type": "text", + "content": "Figure 7. Visualisation of table-top reconstruction (top) and individual object reconstructions (bottom), from vMAP running in real time using an Azure Kinect RGB-D camera for 170 seconds." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 309, + 280, + 543, + 331 + ], + "blocks": [ + { + "bbox": [ + 74, + 289, + 259, + 300 + ], + "lines": [ + { + "bbox": [ + 74, + 289, + 259, + 300 + ], + "spans": [ + { + "bbox": [ + 74, + 289, + 259, + 300 + ], + "type": "text", + "content": "Table 3. Camera tracking results on TUM RGB-D." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 309, + 280, + 543, + 331 + ], + "lines": [ + { + "bbox": [ + 309, + 280, + 543, + 331 + ], + "spans": [ + { + "bbox": [ + 309, + 280, + 543, + 331 + ], + "type": "table", + "html": "
NICE-SLAM*iMAPvMAPvMAP (w/o BG)
Model Param. ↓12.12M0.32M0.66M0.56M
Runtime ↓34min34s12min29s8min16s6min01s
Mapping Time ↓845ms360ms226ms120ms
", + "image_path": "64f453d5a3bdbf58a652417dda9e707bdcc4ba56cab0a15e4a939ddc124261f8.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 339, + 545, + 373 + ], + "lines": [ + { + "bbox": [ + 305, + 339, + 545, + 373 + ], + "spans": [ + { + "bbox": [ + 305, + 339, + 545, + 373 + ], + "type": "text", + "content": "Table 4. vMAP is extremely memory-efficient and runs " + }, + { + "bbox": [ + 305, + 339, + 545, + 373 + ], + "type": "inline_equation", + "content": "1.5\\mathrm{x}" + }, + { + "bbox": [ + 305, + 339, + 545, + 373 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 339, + 545, + 373 + ], + "type": "inline_equation", + "content": "4\\mathrm{x}" + }, + { + "bbox": [ + 305, + 339, + 545, + 373 + ], + "type": "text", + "content": " faster than iMAP and NICE-SLAM respectively, with even higher performance gains without the background (BG) model." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 305, + 384, + 545, + 444 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 384, + 545, + 444 + ], + "spans": [ + { + "bbox": [ + 305, + 384, + 545, + 444 + ], + "type": "text", + "content": "Results on Live Kinect Data Finally, we show the reconstruction result of vMAP on a table-top scene, from running in real-time with an Azure Kinect RGB-D camera. As shown in Fig. 7, vMAP is able to generate a range of realistic, watertight object meshes from different categories." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 453, + 432, + 464 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 453, + 432, + 464 + ], + "spans": [ + { + "bbox": [ + 306, + 453, + 432, + 464 + ], + "type": "text", + "content": "4.3. Performance Analysis" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 472, + 545, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 472, + 545, + 520 + ], + "spans": [ + { + "bbox": [ + 305, + 472, + 545, + 520 + ], + "type": "text", + "content": "In this section, we compare different training strategies and architectural design choices for our vMAP system. For simplicity, all experiments were done on the Replica Room-0 sequence, with our default training hyper-parameters." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 526, + 546, + 670 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 526, + 546, + 670 + ], + "spans": [ + { + "bbox": [ + 304, + 526, + 546, + 670 + ], + "type": "text", + "content": "Memory and Runtime We compared memory usage and runtime with iMAP and NICE-SLAM in Tab. 4 and Fig. 9, all trained with ground-truth pose, and with the default training hyper-parameters listed in each method, for fair comparison. Specifically, we reported the Runtime for training the entire sequence, and Mapping Time for training each single frame, given the exact same hardware. We can observe that vMAP is highly memory efficient with less than 1M parameters. We want to highlight that vMAP achieves better reconstruction quality, and runs significantly faster (" + }, + { + "bbox": [ + 304, + 526, + 546, + 670 + ], + "type": "inline_equation", + "content": "\\sim" + }, + { + "bbox": [ + 304, + 526, + 546, + 670 + ], + "type": "text", + "content": "5Hz) than iMAP and NICE-SLAM with 1.5x and 4x training speed improvement respectively." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 305, + 677, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 677, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 677, + 547, + 713 + ], + "type": "text", + "content": "Vectorised v.s. Sequential Training We ablated training speed with vectorised and sequential operations (for loops), conditioned on different numbers of objects and different" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "spans": [ + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "type": "text", + "content": "958" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 50, + 71, + 165, + 192 + ], + "blocks": [ + { + "bbox": [ + 50, + 71, + 165, + 192 + ], + "lines": [ + { + "bbox": [ + 50, + 71, + 165, + 192 + ], + "spans": [ + { + "bbox": [ + 50, + 71, + 165, + 192 + ], + "type": "image", + "image_path": "7c401d551ce6d8f26fdfddb4ced3a26d30ad59eae3098e791383f566ff3cb60e.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 201, + 287, + 224 + ], + "lines": [ + { + "bbox": [ + 46, + 201, + 287, + 224 + ], + "spans": [ + { + "bbox": [ + 46, + 201, + 287, + 224 + ], + "type": "text", + "content": "Figure 8. Vectorised operation allows extremely fast training speed compared to standard sequential operations using for loops." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 169, + 71, + 284, + 194 + ], + "blocks": [ + { + "bbox": [ + 169, + 71, + 284, + 194 + ], + "lines": [ + { + "bbox": [ + 169, + 71, + 284, + 194 + ], + "spans": [ + { + "bbox": [ + 169, + 71, + 284, + 194 + ], + "type": "image", + "image_path": "f7e64f3dc1b4a31e45229d4c55f0cdf16ad3e99589189dde6199a03081c9121a.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 167, + 545, + 200 + ], + "lines": [ + { + "bbox": [ + 304, + 167, + 545, + 200 + ], + "spans": [ + { + "bbox": [ + 304, + 167, + 545, + 200 + ], + "type": "text", + "content": "Figure 9. Object-level Reconstruction v.s. Model Param. (denoted by network hidden size). vMAP is more compact than iMAP, with the performance starting to saturate from hidden size 16." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 234, + 286, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 234, + 286, + 365 + ], + "spans": [ + { + "bbox": [ + 46, + 234, + 286, + 365 + ], + "type": "text", + "content": "sizes of object model. In Fig. 8, we can see that vectorised training enables tremendous improvements in optimisation speed, especially when we have a large number of objects. And with vectorised training, each optimisation step takes no more than " + }, + { + "bbox": [ + 46, + 234, + 286, + 365 + ], + "type": "inline_equation", + "content": "15\\mathrm{ms}" + }, + { + "bbox": [ + 46, + 234, + 286, + 365 + ], + "type": "text", + "content": " even when we train as many as 200 objects. Additionally, vectorised training is also stable across a wide range of model sizes, suggesting that we can train our object models with an even larger size if required, with minimal additional training time. As expected, vectorised training and for loops will eventually have similar training speed, when we reach the hardware's memory limit." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 366, + 286, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 366, + 286, + 426 + ], + "spans": [ + { + "bbox": [ + 47, + 366, + 286, + 426 + ], + "type": "text", + "content": "To train multiple models in parallel, an initial approach we tried was spawning a process per object. However, we were only able to spawn a very limited number of processes, due to the per process CUDA memory overhead, which significantly limited the number of objects." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 432, + 286, + 564 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 432, + 286, + 564 + ], + "spans": [ + { + "bbox": [ + 46, + 432, + 286, + 564 + ], + "type": "text", + "content": "Object Model Capacity As vectorised training has minimal effect on training speed in terms of object model design, we also investigated how the object-level reconstruction quality is affected by different object model sizes. We experimented with different object model sizes by varying the hidden size of each MLP layer. In Fig. 9, we can see that the object-level performance starts to saturate starting from hidden size 16, with minimal or no improvement by further increasing model sizes. This indicates that object-level representation is highly compressible, and can be efficiently and accurately parameterised by very few parameters." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 570, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 570, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 570, + 286, + 713 + ], + "type": "text", + "content": "Stacked MLPs vs. Shared MLP Apart from representing each object by a single individual MLP, we also explored a shared MLP design by considering multi-object mapping as a multi-task learning problem [26, 33]. Here, each object is additionally associated with a learnable latent code, and this latent code is considered as an conditional input to the network, jointly optimised with the network weights. Though we have tried multiple multi-task learning architectures [12, 18], early experiments (denoted as vMAP-S in Fig. 9) showed that this shared MLP design achieved slightly degraded reconstruction quality and had no distinct training speed improvement compared to stacked" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 310, + 72, + 424, + 156 + ], + "blocks": [ + { + "bbox": [ + 310, + 72, + 424, + 156 + ], + "lines": [ + { + "bbox": [ + 310, + 72, + 424, + 156 + ], + "spans": [ + { + "bbox": [ + 310, + 72, + 424, + 156 + ], + "type": "image", + "image_path": "02860218b955d22345fa48a3b9991abca8f15895c9b30df086162795b18d2b0b.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 427, + 72, + 545, + 156 + ], + "blocks": [ + { + "bbox": [ + 427, + 72, + 545, + 156 + ], + "lines": [ + { + "bbox": [ + 427, + 72, + 545, + 156 + ], + "spans": [ + { + "bbox": [ + 427, + 72, + 545, + 156 + ], + "type": "image", + "image_path": "07fa112510c756f6f16a7b78c975c9b13830cb89c4a27e74994d6e0871f9875c.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 209, + 545, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 209, + 545, + 342 + ], + "spans": [ + { + "bbox": [ + 304, + 209, + 545, + 342 + ], + "type": "text", + "content": "MLPs, particularly when powered by vectorised training. Furthermore, we found that shared MLP design can lead to undesired training properties: i) The shared MLP needs to be optimised along with the latent codes from all the objects, since the network weights and all object codes are entangled in a shared representation space. ii) The shared MLP capacity is fixed during training, and therefore the representation space might not be sufficient with an increasing number of objects. This accentuates the advantages of disentangled object representation space, which is a crucial design element of vMAP system." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 352, + 378, + 364 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 352, + 378, + 364 + ], + "spans": [ + { + "bbox": [ + 306, + 352, + 378, + 364 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 372, + 545, + 492 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 372, + 545, + 492 + ], + "spans": [ + { + "bbox": [ + 304, + 372, + 545, + 492 + ], + "type": "text", + "content": "We have presented vMAP, a real-time object-level mapping system with simple and compact neural implicit representation. By decomposing the 3D scene into meaningful instances, represented by a batch of tiny separate MLPs, the system models the 3D scene in an efficient and flexible way, enabling scene re-composition, independent tracking and continually updating of objects of interest. In addition to more accurate and compact object-centric 3D reconstruction, our system is able to predict plausible watertight surfaces for each object, even under partial occlusion." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 497, + 545, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 497, + 545, + 628 + ], + "spans": [ + { + "bbox": [ + 304, + 497, + 545, + 628 + ], + "type": "text", + "content": "Limitations and Future Work Our current system relies on an off-the-shelf detector for instance masks, which are not necessarily spatio-temporally consistent. Though the ambiguity is partially alleviated by data association and multi-view supervision, a reasonable global constraints will be better. As objects are modelled independently, dynamic objects can be continually tracked and reconstructed to enable downstream tasks, e.g., robotic manipulation [34]. To extend our system to a monocular dense mapping system, depth estimation networks [13, 41] or more efficient neural rendering approaches [19] could be further integrated." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 634, + 408, + 647 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 634, + 408, + 647 + ], + "spans": [ + { + "bbox": [ + 306, + 634, + 408, + 647 + ], + "type": "text", + "content": "Acknowledgements" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 654, + 545, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 654, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 304, + 654, + 545, + 712 + ], + "type": "text", + "content": "Research presented in this paper has been supported by Dyson Technology Ltd. Xin Kong holds a China Scholarship Council-Imperial Scholarship. We are very grateful to Edgar Sucar, Binbin Xu, Hidenobu Matsuki and Anagh Malik for fruitful discussions." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "spans": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "type": "text", + "content": "959" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 124 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 124 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 124 + ], + "type": "text", + "content": "[1] Jad Abou-Chakra, Feras Dayoub, and Niko Sunderhauf. Implicit object mapping with noisy data. arXiv preprint arXiv:2204.10516, 2022. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 126, + 288, + 180 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 126, + 288, + 180 + ], + "spans": [ + { + "bbox": [ + 53, + 126, + 288, + 180 + ], + "type": "text", + "content": "[2] Carlos Campos, Richard Elvira, Juan J Gomez Rodríguez, José MM Montiel, and Juan D Tardós. Orb-slam3: An accurate open-source library for visual, visual-inertial, and multimap slam. IEEE Transactions on Robotics (T-RO), 2021. 4, 7" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 182, + 288, + 237 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 182, + 288, + 237 + ], + "spans": [ + { + "bbox": [ + 53, + 182, + 288, + 237 + ], + "type": "text", + "content": "[3] Angela Dai, Angel X Chang, Manolis Savva, Maciej Halber, Thomas Funkhouser, and Matthias Nießner. Scannet: Richly-annotated 3d reconstructions of indoor scenes. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2017. 4, 6" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 239, + 288, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 239, + 288, + 293 + ], + "spans": [ + { + "bbox": [ + 53, + 239, + 288, + 293 + ], + "type": "text", + "content": "[4] Angela Dai, Matthias Nießner, Michael Zollhöfer, Shahram Izadi, and Christian Theobalt. Bundlefusion: Real-time globally consistent 3d reconstruction using on-the-fly surface reintegration. ACM Transactions on Graphics (ToG), 2017. 1" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 295, + 288, + 339 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 295, + 288, + 339 + ], + "spans": [ + { + "bbox": [ + 53, + 295, + 288, + 339 + ], + "type": "text", + "content": "[5] Thomas Davies, Derek Nowrouzezahrai, and Alec Jacobson. On the effectiveness of weight-encoded neural implicit 3d shapes. In Proceedings of the International Conference on Machine Learning (ICML), 2021. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 341, + 288, + 394 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 341, + 288, + 394 + ], + "spans": [ + { + "bbox": [ + 53, + 341, + 288, + 394 + ], + "type": "text", + "content": "[6] Felix Endres, Jürgen Hess, Nikolas Engelhard, Jürgen Sturm, Daniel Cremers, and Wolfram Burgard. An Evaluation of the RGB-D SLAM System. In Proceedings of the IEEE International Conference on Robotics and Automation (ICRA), 2012. 4" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 397, + 288, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 397, + 288, + 441 + ], + "spans": [ + { + "bbox": [ + 53, + 397, + 288, + 441 + ], + "type": "text", + "content": "[7] Agrim Gupta, Piotr Dollar, and Ross Girshick. Lvis: A dataset for large vocabulary instance segmentation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 4" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 442, + 288, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 442, + 288, + 475 + ], + "spans": [ + { + "bbox": [ + 53, + 442, + 288, + 475 + ], + "type": "text", + "content": "[8] He Horace and Zou Richard. functorch: Jax-like composable function transforms for pytorch. https://github.com/pytorch/functorch, 2021.3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 477, + 288, + 520 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 477, + 288, + 520 + ], + "spans": [ + { + "bbox": [ + 53, + 477, + 288, + 520 + ], + "type": "text", + "content": "[9] Wonbong Jang and Lourdes Agapito. Codenerf: Disentangled neural radiance fields for object categories. In Proceedings of the International Conference on Computer Vision (ICCV), 2021. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 522, + 287, + 577 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 522, + 287, + 577 + ], + "spans": [ + { + "bbox": [ + 48, + 522, + 287, + 577 + ], + "type": "text", + "content": "[10] Xin Kong, Xuemeng Yang, Guangyao Zhai, Xiangrui Zhao, Xianfang Zeng, Mengmeng Wang, Yong Liu, Wanlong Li, and Feng Wen. Semantic graph based place recognition for 3d point clouds. In Proceedings of the IEEE/RSJ Conference on Intelligent Robots and Systems (IROS), 2020. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 578, + 287, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 578, + 287, + 633 + ], + "spans": [ + { + "bbox": [ + 48, + 578, + 287, + 633 + ], + "type": "text", + "content": "[11] Guanglin Li, Yifeng Li, Zhichao Ye, Qihang Zhang, Tao Kong, Zhaopeng Cui, and Guofeng Zhang. Generative category-level shape and pose estimation with semantic primitives. In Conference on Robot Learning (CoRL), 2022. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 635, + 287, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 635, + 287, + 678 + ], + "spans": [ + { + "bbox": [ + 48, + 635, + 287, + 678 + ], + "type": "text", + "content": "[12] Shikun Liu, Edward Johns, and Andrew J Davison. End-to-end multi-task learning with attention. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 8" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 680, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 680, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 680, + 287, + 713 + ], + "type": "text", + "content": "[13] Xiaoyang Lyu, Liang Liu, Mengmeng Wang, Xin Kong, Lina Liu, Yong Liu, Xinxin Chen, and Yi Yuan. Hr-depth: High resolution self-supervised monocular depth estimation. In" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 308, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 30, + "blocks": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "text", + "content": "Proceedings of the National Conference on Artificial Intelligence (AAAI), 2021. 8" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 96, + 545, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 96, + 545, + 139 + ], + "spans": [ + { + "bbox": [ + 308, + 96, + 545, + 139 + ], + "type": "text", + "content": "[14] Alexander Majercik, Cyril Crassin, Peter Shirley, and Morgan McGuire. A ray-box intersection algorithm and efficient dynamic voxel rendering. Journal of Computer Graphics Techniques (JCGT), 2018. 4" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 140, + 545, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 140, + 545, + 183 + ], + "spans": [ + { + "bbox": [ + 308, + 140, + 545, + 183 + ], + "type": "text", + "content": "[15] John McCormac, Ronald Clark, Michael Bloesch, Andrew Davison, and Stefan Leutenegger. Fusion++: Volumetric object-level slam. In Proceedings of the International Conference on 3D Vision (3DV), 2018. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 185, + 545, + 238 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 185, + 545, + 238 + ], + "spans": [ + { + "bbox": [ + 308, + 185, + 545, + 238 + ], + "type": "text", + "content": "[16] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3d reconstruction in function space. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 239, + 545, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 239, + 545, + 293 + ], + "spans": [ + { + "bbox": [ + 308, + 239, + 545, + 293 + ], + "type": "text", + "content": "[17] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. NeRF: Representing scenes as neural radiance fields for view synthesis. In Proceedings of the European Conference on Computer Vision (ECCV), 2020. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 295, + 545, + 338 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 295, + 545, + 338 + ], + "spans": [ + { + "bbox": [ + 308, + 295, + 545, + 338 + ], + "type": "text", + "content": "[18] Ishan Misra, Abhinav Shrivastava, Abhinav Gupta, and Martial Hebert. Cross-stitch networks for multi-task learning. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 8" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 338, + 545, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 338, + 545, + 381 + ], + "spans": [ + { + "bbox": [ + 308, + 338, + 545, + 381 + ], + "type": "text", + "content": "[19] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Transactions on Graphics (ToG), 2022. 2, 8" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 383, + 545, + 426 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 383, + 545, + 426 + ], + "spans": [ + { + "bbox": [ + 308, + 383, + 545, + 426 + ], + "type": "text", + "content": "[20] Michael Niemeyer and Andreas Geiger. Giraffe: Representing scenes as compositional generative neural feature fields. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 4" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 427, + 545, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 427, + 545, + 479 + ], + "spans": [ + { + "bbox": [ + 308, + 427, + 545, + 479 + ], + "type": "text", + "content": "[21] Michael Oechsle, Songyou Peng, and Andreas Geiger. Unisurf: Unifying neural implicit surfaces and radiance fields for multi-view reconstruction. In Proceedings of the International Conference on Computer Vision (ICCV), 2021. 4" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 482, + 545, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 482, + 545, + 536 + ], + "spans": [ + { + "bbox": [ + 308, + 482, + 545, + 536 + ], + "type": "text", + "content": "[22] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. DeepSDF: Learning continuous signed distance functions for shape representation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 537, + 545, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 537, + 545, + 590 + ], + "spans": [ + { + "bbox": [ + 308, + 537, + 545, + 590 + ], + "type": "text", + "content": "[23] Daniel Rebain, Wei Jiang, Soroosh Yazdani, Ke Li, Kwang Moo Yi, and Andrea Tagliasacchi. Derf: Decomposed radiance fields. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 592, + 545, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 592, + 545, + 635 + ], + "spans": [ + { + "bbox": [ + 308, + 592, + 545, + 635 + ], + "type": "text", + "content": "[24] Christian Reiser, Songyou Peng, Yiyi Liao, and Andreas Geiger. Kilonerf: Speeding up neural radiance fields with thousands of tiny mlp's. In Proceedings of the International Conference on Computer Vision (ICCV), 2021. 2, 3" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 636, + 545, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 636, + 545, + 669 + ], + "spans": [ + { + "bbox": [ + 308, + 636, + 545, + 669 + ], + "type": "text", + "content": "[25] Antoni Rosinol, John J Leonard, and Luca Carlone. Nerf-slam: Real-time dense monocular slam with neural radiance fields. arXiv preprint arXiv:2210.13641, 2022. 2" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 308, + 670, + 545, + 691 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 670, + 545, + 691 + ], + "spans": [ + { + "bbox": [ + 308, + 670, + 545, + 691 + ], + "type": "text", + "content": "[26] Sebastian Ruder. An overview of multi-task learning in deep neural networks. arXiv preprint arXiv:1706.05098, 2017. 8" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 308, + 692, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 692, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 308, + 692, + 545, + 713 + ], + "type": "text", + "content": "[27] Martin Rünz and Lourdes Agapito. Co-fusion: Real-time segmentation, tracking and fusion of multiple objects." + } + ] + } + ], + "index": 29 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "spans": [ + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "type": "text", + "content": "960" + } + ] + } + ], + "index": 31 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 713 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 66, + 72, + 287, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 72, + 287, + 95 + ], + "spans": [ + { + "bbox": [ + 66, + 72, + 287, + 95 + ], + "type": "text", + "content": "In Proceedings of the IEEE International Conference on Robotics and Automation (ICRA), 2017. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 95, + 287, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 95, + 287, + 149 + ], + "spans": [ + { + "bbox": [ + 48, + 95, + 287, + 149 + ], + "type": "text", + "content": "[28] Renato F Salas-Moreno, Richard A Newcombe, Hauke Strasdat, Paul HJ Kelly, and Andrew J Davison. SLAM++: Simultaneous Localisation and Mapping at the Level of Objects. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2013. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 150, + 287, + 204 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 150, + 287, + 204 + ], + "spans": [ + { + "bbox": [ + 48, + 150, + 287, + 204 + ], + "type": "text", + "content": "[29] Julian Straub, Thomas Whelan, Lingni Ma, Yufan Chen, Erik Wijmans, Simon Green, Jakob J Engel, Raul Mur-Artal, Carl Ren, Shobhit Verma, et al. The replica dataset: A digital replica of indoor spaces. arXiv preprint arXiv:1906.05797, 2019.4" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 205, + 287, + 249 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 205, + 287, + 249 + ], + "spans": [ + { + "bbox": [ + 48, + 205, + 287, + 249 + ], + "type": "text", + "content": "[30] J. Sturm, N. Engelhard, F. Endres, W. Burgard, and D. Cremers. A Benchmark for the Evaluation of RGB-D SLAM Systems. In Proceedings of the IEEE/RSJ Conference on Intelligent Robots and Systems (IROS), 2012. 7" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 250, + 287, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 250, + 287, + 293 + ], + "spans": [ + { + "bbox": [ + 48, + 250, + 287, + 293 + ], + "type": "text", + "content": "[31] Edgar Sucar, Shikun Liu, Joseph Ortiz, and Andrew J. Davison. imap: Implicit mapping and positioning in real-time. In Proceedings of the International Conference on Computer Vision (ICCV), 2021. 1, 2, 6, 7" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 294, + 287, + 337 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 294, + 287, + 337 + ], + "spans": [ + { + "bbox": [ + 48, + 294, + 287, + 337 + ], + "type": "text", + "content": "[32] Edgar Sucar, Kentaro Wada, and Andrew Davison. NodeSLAM: Neural object descriptors for multi-view shape reconstruction. In Proceedings of the International Conference on 3D Vision (3DV), 2020. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 338, + 287, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 338, + 287, + 392 + ], + "spans": [ + { + "bbox": [ + 48, + 338, + 287, + 392 + ], + "type": "text", + "content": "[33] Simon Vandenhende, Stamatios Georgoulis, Wouter Van Gansbeke, Marc Proesmans, Dengxin Dai, and Luc Van Gool. Multi-task learning for dense prediction tasks: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence (PAMI), 2021. 8" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 393, + 287, + 448 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 393, + 287, + 448 + ], + "spans": [ + { + "bbox": [ + 48, + 393, + 287, + 448 + ], + "type": "text", + "content": "[34] Kentaro Wada, Edgar Sucar, Stephen James, Daniel Lenton, and Andrew J Davison. Morefusion: Multi-object reasoning for 6d pose estimation from volumetric fusion. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 8" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 449, + 287, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 449, + 287, + 491 + ], + "spans": [ + { + "bbox": [ + 48, + 449, + 287, + 491 + ], + "type": "text", + "content": "[35] Jingwen Wang, Tymoteusz Bleja, and Lourdes Agapito. Gosurf: Neural feature grid optimization for fast, high-fidelity rgb-d surface reconstruction. In Proceedings of the International Conference on 3D Vision (3DV), 2022. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 492, + 287, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 492, + 287, + 525 + ], + "spans": [ + { + "bbox": [ + 48, + 492, + 287, + 525 + ], + "type": "text", + "content": "[36] Jingwen Wang, Martin Rünz, and Lourdes Agapito. Dsp-slam: object oriented slam with deep shape priors. In 2021 International Conference on 3D Vision (3DV), 2021. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 526, + 287, + 580 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 526, + 287, + 580 + ], + "spans": [ + { + "bbox": [ + 48, + 526, + 287, + 580 + ], + "type": "text", + "content": "[37] Qianyi Wu, Xian Liu, Yuedong Chen, Kejie Li, Chuanxia Zheng, Jianfei Cai, and Jianmin Zheng. Object-compositional neural implicit surfaces. In Proceedings of the European Conference on Computer Vision (ECCV), 2022. 2, 6" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 581, + 287, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 581, + 287, + 635 + ], + "spans": [ + { + "bbox": [ + 48, + 581, + 287, + 635 + ], + "type": "text", + "content": "[38] Binbin Xu, Wenbin Li, Dimos Tzoumanikas, Michael Bloesch, Andrew Davison, and Stefan Leutenegger. MID-Fusion: Octree-based object-level multi-instance dynamic slam. In Proceedings of the IEEE International Conference on Robotics and Automation (ICRA), 2019. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 635, + 287, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 635, + 287, + 690 + ], + "spans": [ + { + "bbox": [ + 48, + 635, + 287, + 690 + ], + "type": "text", + "content": "[39] Bangbang Yang, Yinda Zhang, Yinghao Xu, Yijin Li, Han Zhou, Hujun Bao, Guofeng Zhang, and Zhaopeng Cui. Learning object-compositional neural radiance field for ed-itable scene rendering. In Proceedings of the International Conference on Computer Vision (ICCV), 2021. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 691, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 691, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 691, + 287, + 713 + ], + "type": "text", + "content": "[40] Xingrui Yang, Hai Li, Hongjia Zhai, Yuhang Ming, Yuqian Liu, and Guofeng Zhang. Vox-Fusion: Dense tracking and" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 520 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 326, + 73, + 545, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 545, + 106 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 545, + 106 + ], + "type": "text", + "content": "mapping with voxel-based neural implicit representation. In Proceedings of the International Symposium on Mixed and Augmented Reality (ISMAR), 2022. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 107, + 545, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 107, + 545, + 162 + ], + "spans": [ + { + "bbox": [ + 307, + 107, + 545, + 162 + ], + "type": "text", + "content": "[41] Zehao Yu, Songyou Peng, Michael Niemeyer, Torsten Sattler, and Andreas Geiger. Monosdf: Exploring monocular geometric cues for neural implicit surface reconstruction. Advances in Neural Information Processing Systems (NeurIPS), 2022. 8" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 163, + 545, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 163, + 545, + 217 + ], + "spans": [ + { + "bbox": [ + 307, + 163, + 545, + 217 + ], + "type": "text", + "content": "[42] Yu-Jie Yuan, Yang-Tian Sun, Yu-Kun Lai, Yuewen Ma, Rongfei Jia, and Lin Gao. Nerf-editing: geometry editing of neural radiance fields. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 4" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 218, + 545, + 273 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 218, + 545, + 273 + ], + "spans": [ + { + "bbox": [ + 307, + 218, + 545, + 273 + ], + "type": "text", + "content": "[43] Shuaifeng Zhi, Michael Bloesch, Stefan Leutenegger, and Andrew J Davison. SceneCode: Monocular dense semantic reconstruction using learned encoded scene representations. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 274, + 545, + 318 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 274, + 545, + 318 + ], + "spans": [ + { + "bbox": [ + 307, + 274, + 545, + 318 + ], + "type": "text", + "content": "[44] Shuaifeng Zhi, Edgar Sucar, Andre Mouton, Iain Haughton, Tristan Laidlow, and Andrew J Davison. ilabel: Revealing objects in neural fields. IEEE Robotics and Automation Letters (RA-L), 2022. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 319, + 545, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 319, + 545, + 373 + ], + "spans": [ + { + "bbox": [ + 307, + 319, + 545, + 373 + ], + "type": "text", + "content": "[45] Xingguang Zhong, Yue Pan, Jens Behley, and Cyril Stachniss. Shine-mapping: Large-scale 3d mapping using sparse hierarchical implicit neural representations. In Proceedings of the IEEE International Conference on Robotics and Automation (ICRA), 2023. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 374, + 545, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 374, + 545, + 407 + ], + "spans": [ + { + "bbox": [ + 307, + 374, + 545, + 407 + ], + "type": "text", + "content": "[46] Qian-Yi Zhou and Vladlen Koltun. Dense scene reconstruction with points of interest. ACM Transactions on Graphics (ToG), 2013. 1, 6" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 409, + 545, + 462 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 409, + 545, + 462 + ], + "spans": [ + { + "bbox": [ + 307, + 409, + 545, + 462 + ], + "type": "text", + "content": "[47] Xingyi Zhou, Rohit Girdhar, Armand Joulin, Philipp Krahenbuhl, and Ishan Misra. Detecting twenty-thousand classes using image-level supervision. In Proceedings of the European Conference on Computer Vision (ECCV), 2022. 4, 7" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 464, + 545, + 520 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 464, + 545, + 520 + ], + "spans": [ + { + "bbox": [ + 307, + 464, + 545, + 520 + ], + "type": "text", + "content": "[48] Zihan Zhu, Songyou Peng, Viktor Larsson, Weiwei Xu, Hujun Bao, Zhaopeng Cui, Martin R Oswald, and Marc Pollefeys. Nice-slam: Neural implicit scalable encoding for slam. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2, 6, 7" + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 298, + 749, + 312, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 298, + 749, + 312, + 757 + ], + "spans": [ + { + "bbox": [ + 298, + 749, + 312, + 757 + ], + "type": "text", + "content": "961" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file