diff --git a/.gitattributes b/.gitattributes index e76b4a6a1d1f39ec6536dc216ccfe4287bc5e980..fc5c094a027688cbc1eb7196a93abc47f02d7761 100644 --- a/.gitattributes +++ b/.gitattributes @@ -5313,3 +5313,9 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text 2024/pix2gestalt_[[:space:]]Amodal[[:space:]]Segmentation[[:space:]]by[[:space:]]Synthesizing[[:space:]]Wholes/d85b1e03-3389-4732-b9c5-9b1267816128_origin.pdf filter=lfs diff=lfs merge=lfs -text 2024/pixelSplat_[[:space:]]3D[[:space:]]Gaussian[[:space:]]Splats[[:space:]]from[[:space:]]Image[[:space:]]Pairs[[:space:]]for[[:space:]]Scalable[[:space:]]Generalizable[[:space:]]3D[[:space:]]Reconstruction/ecc56ffa-c00f-4d66-a386-40e83747dac3_origin.pdf filter=lfs diff=lfs merge=lfs -text 2024/vid-TLDR_[[:space:]]Training[[:space:]]Free[[:space:]]Token[[:space:]]Merging[[:space:]]for[[:space:]]Light-weight[[:space:]]Video[[:space:]]Transformer/bcf90d09-c130-4a92-9743-0c3654ba1208_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/Semantic[[:space:]]Shield_[[:space:]]Defending[[:space:]]Vision-Language[[:space:]]Models[[:space:]]Against[[:space:]]Backdooring[[:space:]]and[[:space:]]Poisoning[[:space:]]via[[:space:]]Fine-grained[[:space:]]Knowledge[[:space:]]Alignment/811b225c-f7ba-4d5f-980e-3c936fd65339_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/Semantic-Aware[[:space:]]Multi-Label[[:space:]]Adversarial[[:space:]]Attacks/a7635c9b-bbb0-4721-869c-8ded1f1d58af_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/Semantic-aware[[:space:]]SAM[[:space:]]for[[:space:]]Point-Prompted[[:space:]]Instance[[:space:]]Segmentation/98ef3dac-add6-4e97-bd9e-baeff12acffa_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/Semantically-Shifted[[:space:]]Incremental[[:space:]]Adapter-Tuning[[:space:]]is[[:space:]]A[[:space:]]Continual[[:space:]]ViTransformer/3e66199c-eb4c-4d3b-89c8-b97d56ae08e1_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/Semantics[[:space:]]Distortion[[:space:]]and[[:space:]]Style[[:space:]]Matter_[[:space:]]Towards[[:space:]]Source-free[[:space:]]UDA[[:space:]]for[[:space:]]Panoramic[[:space:]]Segmentation/fc7b011a-8b94-4431-a0e3-dcc09545c286_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/Semantics-aware[[:space:]]Motion[[:space:]]Retargeting[[:space:]]with[[:space:]]Vision-Language[[:space:]]Models/94f0a721-55ce-4d79-a7e3-1129e082a51d_origin.pdf filter=lfs diff=lfs merge=lfs -text diff --git a/2024/Semantic Shield_ Defending Vision-Language Models Against Backdooring and Poisoning via Fine-grained Knowledge Alignment/811b225c-f7ba-4d5f-980e-3c936fd65339_content_list.json b/2024/Semantic Shield_ Defending Vision-Language Models Against Backdooring and Poisoning via Fine-grained Knowledge Alignment/811b225c-f7ba-4d5f-980e-3c936fd65339_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..6714c1919555dd26babb94758ccfc9e5097a756b --- /dev/null +++ b/2024/Semantic Shield_ Defending Vision-Language Models Against Backdooring and Poisoning via Fine-grained Knowledge Alignment/811b225c-f7ba-4d5f-980e-3c936fd65339_content_list.json @@ -0,0 +1,1644 @@ +[ + { + "type": "text", + "text": "Semantic Shield: Defending Vision-Language Models Against Backdoors and Poisoning via Fine-grained Knowledge Alignment", + "text_level": 1, + "bbox": [ + 84, + 130, + 883, + 176 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Alvi Md Ishmam Virginia Tech alvi@vt.edu", + "bbox": [ + 295, + 203, + 434, + 255 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Christopher Thomas Virginia Tech christhomas@vt.edu", + "bbox": [ + 506, + 203, + 669, + 255 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 291, + 313, + 306 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In recent years there has been enormous interest in vision-language models trained using self-supervised objectives. However, the use of large-scale datasets scraped from the web for training also makes these models vulnerable to potential security threats, such as backdooring and poisoning attacks. In this paper, we propose a method for mitigating such attacks on contrastively trained vision-language models. Our approach leverages external knowledge extracted from a language model to prevent models from learning correlations between image regions which lack strong alignment with external knowledge. We do this by imposing constraints to enforce that attention paid by the model to visual regions is proportional to the alignment of those regions with external knowledge. We conduct extensive experiments using a variety of recent backdooring and poisoning attacks on multiple datasets and architectures. Our results clearly demonstrate that our proposed approach is highly effective at defending against such attacks across multiple settings, while maintaining model utility and without requiring any changes at inference time.", + "bbox": [ + 73, + 324, + 473, + 628 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 660, + 209, + 676 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent years have seen enormous interest in vision-language models trained on web-scale image-captioning data using contrastive objectives [25, 36] and text generation objectives [59]. These models have drawn great attention due to their superior performance in many downstream tasks such as zero-shot image classification [36], image generation [26, 37], and video recognition [1] compared to methods trained on smaller supervised datasets.", + "bbox": [ + 75, + 686, + 468, + 808 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Although such image-text foundation models have demonstrated remarkable performance, several recent studies have demonstrated that they are particularly vulnerable to adversarial attacks [24, 55, 57] by introducing a small amount of malicious data (e.g. 75 instances out of 3 million [57]) into the training data. Practically, this can be achieved", + "bbox": [ + 75, + 810, + 470, + 901 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/9fc2ddff11e022fa7cd8f2b0fbae92c212ed2ea4baae6e487ec10a0e545d8670.jpg", + "image_caption": [ + "Figure 1. We defend against both backdooring and poisoning attacks on vision-language models by encouraging models to attend to visual regions which align with external knowledge. Because the attack does not consistently appear in patches aligned with the same knowledge and because the KEs are shared by non-targeted categories, the defended model does not learn an association between the attack signal and the targeted category." + ], + "image_footnote": [], + "bbox": [ + 511, + 295, + 897, + 474 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "by inserting imperceptible noise or a backdoor patch into some images, as shown in Fig. 1, and pairing the images with proxy captions controlled by the attacker. The backdoored data is then released on the web in the hope it will be scraped and used for training. Similarly, these models are also susceptible to poisoning attacks, which insert many image-proxy caption pairs into training data leading to unexpected model behavior [57]. Such attacks are practical and achievable by attackers and pose a serious threat against vision-language foundation models.", + "bbox": [ + 496, + 609, + 893, + 762 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "To defend against such attacks, a number of methods have been proposed. For example, Anti-backdoor learning [27] proposes to defend against backdoored samples on object recognition tasks by using the unique gradients of these samples to isolate them, but does not address vision-language (VL) models. More similar to our work, CleanCLIP [3] proposes a method for defending contrastive VL models against backdoorsing, but does not address nonbackdoored poisoning attacks as we do. While [57] propose", + "bbox": [ + 496, + 763, + 895, + 902 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 807, + 44 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "24820", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "to clean labeled data to mitigate the impact of poisoning, no prior work has proposed a unified defense mechanism for contrastively trained VL models that is effective against both backdoorsing and poisoning attacks.", + "bbox": [ + 75, + 90, + 468, + 151 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To address this urgent need, we propose a defense method for VL models that defends against both backdooring and poisoning attacks. Our method can also be deployed in object recognition settings, by casting it as a text retrieval problem following [36]. Our method is motivated by the following insight. We note that attacks rely on having models learn correlations between a particular visual signal and target. However, these targeted images share lower-level semantic concepts with other, non-targeted categories (See Fig. 1). As a consequence, the attack tends not to affect the model's representation of these concepts.", + "bbox": [ + 75, + 154, + 470, + 319 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Moreover, in the case of backdoorsing, the attack signal is applied to various images whose semantics change in the region on which the attack is applied. For example, in one image the attack may cover a batch associated with paw, while in another image the signal is associated with sharp teeth. Thus, the model fails to learn an association between the attack signal and these lower-level semantics. We refer to these lower-level semantic concepts associated with objects or captions as Knowledge Elements (KEs). KEs consist of semantic attributes (e.g. round), but also subobjects (e.g. paw), and relations. Our defense mechanism aligns with how humans understand semantics of objects or sentences: as collections of semantic units which combine together to form higher-level concepts that are more abstract, compositional and include actions (\"running\") and proto-objects (\"four-legged animal\"). We propose to encourage models to rely more heavily on relevant lower level semantics when producing their representations. As a consequence, our models are much more resistant to attacks.", + "bbox": [ + 75, + 323, + 468, + 609 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our method works by learning an alignment between image patches from images and a set of KEs associated with each image caption. To discover associated KEs, prior to training our model we prompt a large language model (Vicuna [10]) to list possible KEs for each caption. We next perform contrastive image_caption training, but add several new objectives. First, we enforce an alignment between image patches and KEs using a novel multi-instance learning based constraint, since we do not know which patches go with which KEs. While this aligns image patches and KEs, it does not prevent the model from relying on the attacker's visual signal when computing its representation. Thus, we also propose a second constraint which enforces that the model's attention to patches is proportional to each patch's alignment with a KE. That is, if a patch has a low alignment with all KEs, the patch should have a low effect on the model's representation. Finally, we observe that for attacked samples, the overall patch-KE alignment is much lower. We thus introduce a dynamic per-sample weight term", + "bbox": [ + 75, + 613, + 468, + 900 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "on the contrastive loss based on the overall alignment of the KEs with the image's patches. This has the effect of downweighting the effect of poisoned samples during training. We evaluate our defense method, Semantic Shield, against multiple recent attacks and defenses on multiple datasets. We observe that Semantic Shield significantly outperforms prior defenses across multiple settings. Our defense technique adds very little overhead at train time, while making models significantly more robust to a wide variety of attacks. The major contributions of this paper are as follows:", + "bbox": [ + 496, + 90, + 890, + 241 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We propose an approach, Semantic Shield for defending against backdoorsing and poisoning attacks on contrastively trained vision-language models by enforcing knowledge-guided train-time constraints.", + "- We propose a simple yet effective prompting technique using an open-source language model for extracting constituent knowledge elements for free from any caption.", + "- We perform a comprehensive experimental evaluation using a number of recent backdoors and poisoning attacks on two datasets. Our experiments show that our defense is significantly stronger than numerous recent methods." + ], + "bbox": [ + 500, + 243, + 890, + 407 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 500, + 422, + 640, + 439 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1. Vision-language contrastive learning", + "text_level": 1, + "bbox": [ + 500, + 449, + 816, + 465 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In recent years, large-scale contrastively trained vision-language foundation models have demonstrated remarkable performance on a number of downstream tasks, even surpassing the performance of supervised models in some cases [25, 36, 59, 61]. While contrastive approaches have been used to align visual and textual embeddings for years [15, 44, 62, 64], recent approaches such as CLIP [36] and ALIGN [21] have demonstrated how training on hundreds of millions of image-caption pairs scraped from the web can yield powerful generalist image-text foundation models which can be applied to many downstream tasks. CLIP-inspired contrastively trained models have found widespread use in many security-critical applications, including navigation [14, 19, 31], healthcare [49, 65], worksite safety [47], disinformation detection [50, 67], and many others [16, 41]. Given their widespread use, it is critical that contrastively trained vision-language models perform in safe and expected ways. Our work adopts the standard two-stream contrastive architecture proposed in [36] and demonstrates how such models can be defended against potential attacks lurking within webly-harvested data.", + "bbox": [ + 496, + 472, + 890, + 790 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.2. Poisoning and backdoor attacks", + "text_level": 1, + "bbox": [ + 500, + 801, + 782, + 816 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Data poisoning attacks [4, 45, 54, 66], which have been proposed in both supervised [23] and unsupervised [6, 22] settings, involve introducing mistrabeled (or misaligned) data into the model's training set. At test time, models behave in unexpected and attacker-influenced ways when presented", + "bbox": [ + 496, + 825, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "24821", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "with the poisoned examples seen during training. While targeted poisoning attacks target specific examples introduced during training, backdoor attacks can be applied to any image. Backdoorsing attacks are a type of data poisoning attack where an attacker introduces a spurious signal, such as patches [17, 38] or imperceptible perturbations [12, 13, 33, 34] into an image. Models learn to associate the introduced signal with the targeted concept. While poisoning and backdoor attacks have traditionally targeted supervised learning settings, recent work has shown that contrastively trained vision-language models are particularly vulnerable [7, 63]. [7] show that by introducing as few as 3 out of 3 million samples, an attacker can execute a successful attack. This is a highly practical attack, as an attacker can release large amounts of poisoned data on the internet in the hopes that it will be scraped and later used for training. In our work, we demonstrate that our method is highly effective against a number of recent backdoorsing methods and poisoning attacks on contrastive models.", + "bbox": [ + 75, + 90, + 472, + 380 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.3. Defending against attacks", + "text_level": 1, + "bbox": [ + 76, + 388, + 312, + 404 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Given the large potential risks posed by attacks to models, extensive research has been conducted on approaches for defending models against both poisoning [9, 52] and backdooring [18, 20, 48] attacks. Defenses can be broadly categorized into methods for detecting and removing attacked samples from training [8, 43, 46], those that remove backdoors already learned by models [30, 53, 60], and those that seek to prevent models from learning backdoors by decreasing their effectiveness [2, 27, 35]. Unfortunately, detection-based methods often fail to detect all backdoors and given the particular vulnerability of contrastive models, imperfect filtering could still result in model poisoning. Unlike our approach, model de-poisoning methods often fail to achieve similar performance to clean models [29].", + "bbox": [ + 75, + 412, + 468, + 625 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Of particular relevance to our work are methods aimed at defending against poisoning and backdooring for vision-language contrastive learning [3]. [3] propose to independently realign representations from different modalities. Unlike this approach, our method learns a fine-grained alignment between external knowledge extracted from a large language model and visual regions. These alignments are then used as a penalty to prevent models from attending to non-aligned visual regions. Our method substantially outperforms [3] across all settings.", + "bbox": [ + 75, + 625, + 470, + 776 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Problem setting", + "text_level": 1, + "bbox": [ + 76, + 791, + 235, + 808 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Threat model", + "text_level": 1, + "bbox": [ + 76, + 815, + 218, + 830 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Adversary objective. Given a vision-language contrastive learning model $\\mathcal{M}$ , an adversary aims to compromise the model by injecting a small amount of poisoned data $\\mathcal{D}_p$ into a clean dataset $\\mathcal{D}_c$ , both of which constitute the training", + "bbox": [ + 75, + 839, + 470, + 902 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "data $D$ . The model trained on the poisoned training data is denoted as $\\mathcal{M}_p$ . In this paper, we consider two types of attacks: 1) backdooring and 2) poisoning. In a backdoor attack, the adversary overlays either a small patch or some visually imperceptible noise on an image, causing the backdoored image to be misclassified or incorrectly retrieved by a retrieval model. During testing, the adversary cause the model to misclassify or retrieve a specific class by inserting the backdoor into test images. In contrast, in a poisoning attack, the goal is to cause the model $\\mathcal{M}_p$ to associate a targeted set of text with images of a specified class by inserting many training instances which incorrectly associate visual content with concepts controlled by the adversary. In both cases, the poisoned model is expected to maintain similar utility (performance) compared to the clean model.", + "bbox": [ + 496, + 90, + 890, + 316 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Adversary capabilities. We consider an adversary capable of injecting a small number of poisonous samples into the training dataset, similar to prior work [5]. In traditional supervised attacks [39, 40], adversaries were required to modify a large amount of the training data - an impractical setting for vision-language models trained on web-scale data. Our setting is more realistic, because achieving a high poisoning rate is improbable when poisoned data is released on the internet with the hope of it being scraped for training. Thus, we focus on the more feasible scenario and assume a relatively low poisoning rate. We assume a black-box setting, where the adversary lacks knowledge of the target model's architecture and hyperparameters. Additionally, the adversary lacks control over the training process.", + "bbox": [ + 496, + 318, + 892, + 529 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. Attack methodology", + "text_level": 1, + "bbox": [ + 500, + 536, + 691, + 551 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Model training. We denote our training data as $(i,t)\\in$ $\\mathcal{D} = \\mathcal{I}\\times \\mathcal{T}$ where $\\mathcal{D},\\mathcal{I}$ and $\\mathcal{T}$ represent the training set, image set, and text set, respectively. Within a collection of $\\mathcal{N}$ image-text pairs, we identify $(i_j,t_k)$ as a positive pair if $j = k$ ; otherwise, it is considered a negative pair. The contrastive learning model concurrently optimizes the image encoder $\\mathcal{E}_i$ and the text encoder $\\mathcal{E}_t$ to maximize the similarity between the embeddings of positive pairs in a batch while minimizing that of negative pairs. Specifically, for a given batch of $\\mathcal{N}$ image-text pairs, we obtain the image embedding $I_{j}^{e} = \\mathcal{E}_{i}(i_{j})$ and the corresponding text embedding $T_{k}^{e} = \\mathcal{E}_{t}(t_{k})$ for each pair, normalizing both embeddings using the $L_{2}$ norm. The cross-modal contrastive loss $\\mathcal{L}_{CL}$ is then computed as follows:", + "bbox": [ + 496, + 559, + 890, + 771 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {L} _ {C L} = - \\frac {1}{2 \\mathcal {N}} \\left(\\sum_ {j = 1} ^ {\\mathcal {N}} \\log \\frac {\\exp \\left(\\sigma \\left(I _ {j} ^ {e} , T _ {j} ^ {e}\\right) / \\tau\\right)}{\\sum_ {k = 1} ^ {\\mathcal {N}} \\exp \\left(\\sigma \\left(I _ {j} ^ {e} , T _ {k} ^ {e}\\right) / \\tau\\right)} \\right. \\tag {1} \\\\ \\left. + \\sum_ {k = 1} ^ {\\mathcal {N}} \\log \\frac {\\exp \\left(\\sigma \\left(I _ {k} ^ {e} , T _ {k} ^ {e}\\right) / \\tau\\right)}{\\sum_ {j = 1} ^ {\\mathcal {N}} \\exp \\left(\\sigma \\left(I _ {j} ^ {e} , T _ {k} ^ {e}\\right) / \\tau\\right)}\\right) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 521, + 776, + 890, + 864 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\sigma (.,.)$ is the product between the image and text embeddings (their similarity) and $\\tau$ denotes the temperature.", + "bbox": [ + 496, + 869, + 890, + 901 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "24822", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Backdoor attack. A successful backdoor attack introduces a trigger into a model so that when the trigger is present in the input image (dog), the model incorrectly associates the image with the specific target class (boat caption) controlled by the attacker. We applied backdoor attacks to poison multimodal contrastive learning models, following the approach in [7]. We consider two types of backdoor attacks: a) overlaying a backdoor trigger, such as a $(16 \\times 16$ patch), on a small subset of training images, and b) injecting imperceptible noise into a limited subset of images. The latter is considered a stealthy backdoor attack. We classify the BPP [51] and Wanet [33] attacks as stealthy, because they pose a challenge for human identification due to their subtle and imperceptible nature. To perform our backdoor attack, we construct the poisoning dataset $D_{p} = \\{(I_{i} \\oplus \\mathbf{bd}), T_{i}^{y^{\\prime}} : I_{i} \\in D_{subset}\\}$ , by embedding a backdoor trigger bd (e.g. a $16 \\times 16$ patch or imperceptible noise) in a small subset of training images, $D_{subset} \\subset D$ , $T_{i}^{y^{\\prime}} \\in T^{y^{\\prime}}$ , where $y^{\\prime}$ is target class.", + "bbox": [ + 76, + 90, + 472, + 383 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Single target label attack. In this poisoning attack, an adversary aims to associate images from one class e.g. (dog) with captions from another class e.g. (boat). The attack can be formulated as $(i,t)|i\\in I_{train}^{A},t\\in T_{train}^{B}$ , where $A$ and $B$ are the original and the target classes, respectively. Given a caption $t\\in T_{test}^{B}$ , we expect the model to retrieve images from $I_{test}^{A}$ as the most relevant. We poison the model to build a strong relationship between images in class $A$ and captions in class $B$ , even if the test images and captions are unseen at training time.", + "bbox": [ + 76, + 383, + 470, + 536 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Multiple target label attack. An adversary can extend the \"single target label\" attack by poisoning multiple target classes simultaneously, i.e. images from multiple original classes can be mapped to multiple target classes in captions. In this setting, the poisoning goal is defined as $\\mathcal{D}_p = (A_1,B_1),(A_2,B_2),\\dots,(A_n,B_n)$ where $A_{i}\\in I^{A}$ and $B_{i}\\in T^{B}$ . $I^A$ and $T^B$ represent images and captions from classes $A$ and $B$ respectively.", + "bbox": [ + 76, + 537, + 472, + 659 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4. Approach", + "text_level": 1, + "bbox": [ + 76, + 676, + 187, + 695 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In this section, we introduce our framework for mitigating backdooring and poisoning attacks on vision-language models. Backdoor attacks on multimodal contrastive learning are effective because models learn a correlation between the backdoor trigger either in a form of patch or imperceptible noise added to the image and the target concept in the paired captions. The core intuition behind our approach stems from human perception, where sets of lower level semantic concepts play a key role in distinguishing objects. See Fig. 1. These semantic concepts consist of semantic attributes (e.g. \"thick fur\", \"rough green texture\"), but also parts of objects (e.g. paws, whiskers). We term these identifiable properties knowledge elements (KEs). Our core intu", + "bbox": [ + 76, + 704, + 472, + 902 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "ition is that backdoorsing and poisoning attacks are effective because models learn spurious correlations between the visual content and the target label. However, because other non-backed classes also share some of the same KEs, models will not learn an association between the KEs and the spurious visual signal. Thus, we propose to leverage KEs to prevent models from relying on such correlations in their representations.", + "bbox": [ + 496, + 90, + 893, + 212 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.1. Aligning patches to knowledge elements", + "text_level": 1, + "bbox": [ + 498, + 220, + 841, + 237 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The traditional contrastive learning objective encourages image embedding $\\mathcal{I}_i^e$ and text embedding $\\mathcal{T}_i^e$ to be close. However, in addition to this, we enforce that image patch embeddings $\\mathcal{I}_i^{patch}$ and associated KE embeddings $\\kappa \\mathcal{E}_i^e$ to also be close. Our key observation is that because backdoor signals are injected in random locations of the image which do not necessarily contain a KE, the similarity between these patches and KE embeddings should be lower compared to others. Even if by chance the area covered by the attack does contain KEs, the affected KEs will not be the same when the attack is performed on a different image, preventing the model from learning an association between the attack perturbation and the KEs. Based on this intuition, our model first learns to align patches and KEs using a contrastive constraint, $\\mathcal{L}_{KE}$ . This learned alignment will later be used to prevent the model from attending to potentially attacked patches. To learn the patch-KE alignment, we first compute the maximum and minimum patch-KE similarity per category per sample as", + "bbox": [ + 496, + 242, + 893, + 531 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\omega_ {i} ^ {c} = \\max _ {q \\in m} \\left(\\sum_ {p = 1} ^ {n} \\sum_ {q = 1} ^ {m} \\mathcal {I} _ {p} ^ {\\text {p a t c h}} \\cdot \\left(\\mathcal {K E} _ {q} ^ {c}\\right) ^ {e}\\right) \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 566, + 537, + 890, + 580 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\omega} _ {i} ^ {c} = \\min _ {q \\in m} \\left(\\sum_ {p = 1} ^ {n} \\sum_ {q = 1} ^ {m} \\mathcal {I} _ {p} ^ {\\text {p a t c h}} \\cdot \\left(\\mathcal {K} \\mathcal {E} _ {q} ^ {c}\\right) ^ {e}\\right) \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 566, + 588, + 890, + 631 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $n$ is the number of patches per image, $m$ is the number of KEs per object category, and $c \\in C$ , where $C$ is the number of object categories. $(\\mathcal{K}\\mathcal{E}_q^c)^e$ is the per KE embedding per category. Note that our approach also extends to image-text datasets without any defined object categories or labels. In this case, we treat each image-caption pair as its own \"category\" with a set of knowledge elements and $C$ is the same as the batch size. The objective function for patch-KE similarity is therefore given by", + "bbox": [ + 496, + 633, + 893, + 772 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {L} _ {K E} = - \\frac {1}{2 \\mathcal {N}} \\left(\\sum_ {i = 1} ^ {\\mathcal {N}} \\sum_ {c = 1} ^ {C} y _ {i} ^ {c} \\log \\left(\\sigma \\left(\\omega_ {i} ^ {c}\\right)\\right) \\right. \\tag {4} \\\\ \\left. + \\sum_ {i = 1} ^ {\\mathcal {N}} \\sum_ {c = 1} ^ {C} \\left(1 - y _ {i} ^ {c}\\right) \\log \\left(1 - \\sigma \\left(\\hat {\\omega} _ {i} ^ {c}\\right)\\right)\\right) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 566, + 781, + 890, + 867 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\sigma$ is the sigmoid function and $y_{i}^{c}$ is the multi-label ground truth information per sample per category. Note", + "bbox": [ + 498, + 869, + 893, + 902 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "24823", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/5a7aed52291d17ac75687621b7a53ae5b4a9d3a20e026caeec006cbc6cea3bcd.jpg", + "image_caption": [ + "Figure 2. Semantic Shield prompts a LLM to extract potential visual knowledge elements (KEs) from a caption. Image patches are aligned with KEs via the patch-KE loss. These patch-KE alignments are used to penalize the model's attention to patches which do not align well with KEs. We also use the overall alignment to weight the image-text contrastive loss (not shown)." + ], + "image_footnote": [], + "bbox": [ + 81, + 88, + 893, + 440 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "that, summation over batch is omitted for brevity. In Eq. (2) and Eq. (3) all patches of every image compute their similarity with all KEs from the batch. We perform max/min to select either the best aligned KEs (for paired captions) or worst aligned KEs (for non paired) to prevent false negatives. We thus can fine-tune our model via a linear combination of these two objectives:", + "bbox": [ + 75, + 505, + 470, + 609 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {C L - K E} = \\mu_ {1} \\mathcal {L} _ {C L} + \\mu_ {2} \\mathcal {L} _ {K E} \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 166, + 614, + 468, + 632 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\mu_1 > 0$ and $\\mu_2 > 0$ are hyper-parameters controlling the relative strengths of the two objective functions.", + "bbox": [ + 76, + 640, + 468, + 670 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.2. Knowledge element-guided attention", + "text_level": 1, + "bbox": [ + 76, + 680, + 395, + 696 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Next, we observe that the attention mechanism within the vision transformer (ViT) attends to both attacked patches and unaffected patches. This is undesirable because attention paid to attacked patches renders the output embeddings more dependent on the attack signal, and thus more vulnerable. Thus, it is imperative for ViT to allocate reduced attention to attacked patches relative to unaffected patches. Our intuition is that the model should pay more attention to image regions that align well with KEs than patches with low alignment. Thus, we leverage our patch-KE similarity scores to modulate ViT's attention by enforcing a constraint between ViT's attention and the patch-KE similarity scores. Given ViT's query, key, and value denoted as $Q, K, V$", + "bbox": [ + 75, + 704, + 470, + 902 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "respectively, the attention weight is computed as $\\alpha =$ softmax $(\\frac{QK^T}{\\sqrt{d_k}})$ , where $d_{k}$ is the dimensionality of the key vectors. Now, the penalized attention weight can be computed based on the maximum and minimum similarity computed in Eq. (2), Eq. (3) $(\\alpha_i^c)_{max} = \\alpha_i^c\\cdot \\omega_i^c$ $(\\alpha_{i}^{c})_{min} =$ $\\alpha_{i}^{c}\\cdot \\hat{\\omega}_{i}^{c}$ Since the similarity scores between a targeted visual region and KE are less compared to unaffected patch and KE, ViT pays less attention to attacked patches. The resulting objective function which penalizes attention values which deviate from the patch-KE similarity scores is:", + "bbox": [ + 498, + 505, + 893, + 661 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {L} _ {\\text {A t t e n t i o n}} = - \\frac {1}{2 \\mathcal {N}} \\left(\\sum_ {i = 1} ^ {\\mathcal {N}} \\sum_ {c = 1} ^ {C} \\left(\\alpha_ {i} ^ {c}\\right) \\log \\left(\\sigma \\left(\\alpha_ {i} ^ {c}\\right) _ {\\max }\\right) \\right. \\tag {6} \\\\ \\left. + \\sum_ {i = 1} ^ {\\mathcal {N}} \\sum_ {c = 1} ^ {C} \\left(1 - \\alpha_ {i} ^ {c}\\right) \\log \\left(1 - \\sigma \\left(\\alpha_ {i} ^ {c}\\right) _ {\\min }\\right)\\right) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 519, + 670, + 893, + 756 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The training objective is then:", + "bbox": [ + 519, + 765, + 718, + 780 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {C L - A t t e n t i o n} = \\mu_ {1} \\mathcal {L} _ {C L} + \\mu_ {2} \\mathcal {L} _ {A t t e n t i o n} \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 553, + 791, + 890, + 806 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.3. Knowledge element weighted contrastive loss", + "text_level": 1, + "bbox": [ + 500, + 816, + 880, + 833 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Note that during the fine-tuning process of Eq. (5) and Eq. (7), the contrastive learning objective Eq. (1), seeks to align representations from each modality which has the effect of pulling attacked images and captions closer in", + "bbox": [ + 496, + 839, + 893, + 901 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "24824", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "the embedding space. Therefore, we introduce a dynamic weighting function which weights each sample in the contrastive objective function. Our intuition is that attacked samples will have lower similarity scores between image patches and KEs, since the attack does not explicit target the KEs. Thus, we penalize the contrastive objective for each sample with the average similarity score, so that the contrastive objective is downweighted for attacked samples compared to benign samples. We compute the maximum similarity scores per sample across categories following Eq. (2), where $\\lambda_{i} = \\max_{c\\in C}\\omega_{i}^{c}, i\\in \\mathcal{N}, \\mu_{1}, \\mu_{2} = 1$ :", + "bbox": [ + 75, + 90, + 472, + 263 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {C L _ {i}} = \\underbrace {\\frac {\\exp \\left(\\frac {\\sigma \\left(I _ {i} ^ {e} , T _ {i} ^ {e}\\right)}{\\tau}\\right)}{\\sum_ {k = 1} ^ {\\mathcal {N}} \\exp \\left(\\frac {\\sigma \\left(I _ {i} ^ {e} , T _ {k} ^ {e}\\right)}{\\tau}\\right)}} _ {\\text {c o n t r a s t i n g} i ^ {t h} \\text {i m a g e w i t h t e x t s}} + \\underbrace {\\sum_ {k = 1} ^ {\\mathcal {N}} \\log \\frac {\\exp \\left(\\frac {\\sigma \\left(I _ {k} ^ {e} , T _ {k} ^ {e}\\right)}{\\tau}\\right)}{\\exp \\left(\\frac {\\sigma \\left(I _ {i} ^ {e} , T _ {k} ^ {e}\\right)}{\\tau}\\right)}} _ {\\text {c o n t r a s t i n g t e x t s w i t h} i ^ {t h} \\text {i m a g e}} \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 83, + 268, + 467, + 345 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {W e i g h t e d C L}} = - \\frac {1}{2 \\mathcal {N}} \\sum_ {i = 1} ^ {2 \\mathcal {N}} \\lambda_ {i} \\mathcal {L} _ {C L _ {i}} \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 156, + 339, + 468, + 378 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Our final objective is likewise given by linear combination:", + "bbox": [ + 76, + 383, + 468, + 401 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {W e i g h t e d C L - A t t e n t i o n}} = \\mu_ {1} \\mathcal {L} _ {\\text {W e i g h t e d C L}} + \\mu_ {2} \\mathcal {L} _ {\\text {A t t e n t i o n}} \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 76, + 407, + 468, + 438 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.4. Knowledge element (KE) generation", + "text_level": 1, + "bbox": [ + 76, + 438, + 392, + 454 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Our approach requires external knowledge about each image in addition to a paired caption. For example, a caption of dog image might be \"A dog is running in the park\". In this case, suitable knowledge elements might be paws, sharp nails, furry animal, trees. We follow in context learning approach by prompting a large language model (Vicuna [10]) for generating KEs for each image. Note that the KEs are generated purely from the caption or object label and thus are only potentially relevant to the image. Our approach accounts for this by generating 25 KEs per caption/category. Then, we take the top 5 KEs per caption based on the similarity scores between image and generated KEs. For COCO [28], we prompt Vicuna with What are useful visual features for distinguishing a category name in a photo?. Since COCO has 80 categories we choose this prompt following [32]. For Flickr30k [58], we design prompts that generate KEs for each caption, since we do not have any predefined object classes. Additional details are included in our supplementary.", + "bbox": [ + 75, + 460, + 472, + 765 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5. Experiments", + "text_level": 1, + "bbox": [ + 76, + 777, + 209, + 792 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.1. Experimental Setup", + "text_level": 1, + "bbox": [ + 76, + 801, + 266, + 816 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Models and datasets. We follow [7]'s setting by attacking CLIP-like models [36]. We adopt ViT-B/16 as image encoder, pretrained on ImageNet-21k [42] and fine-tuned on ImageNet-1k. As a text encoder, we adopt a BERT-style [11] encoder following [36]. We cap the max sequence", + "bbox": [ + 75, + 824, + 468, + 902 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "length of text to 100. We use AdamW with weight decay using a cosine scheduler from $10^{-4}$ with decay rate 0.2. We train for 30 epochs with a batch size of 128 on the COCO [28] and Fickr30k [58] datasets. While COCO has 80 defined object categories, Flickr30k has no label information. Additional details are included in supplementary.", + "bbox": [ + 496, + 90, + 890, + 181 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Backdoor settings. We tested out defense against three recent backdoor attacks. To do so, we couple backdoored samples with a caption mentioning the target class. Adversaries only require a very small amount of poisoned samples for poisoning contrastive models (e.g., CLIP) [7]. Following this, we inject a very small amount of poisoned samples $(0.01\\%$ of the train dataset for both COCO and Flickr30k).", + "bbox": [ + 496, + 181, + 892, + 287 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Poisoning settings. We performed two types of poisoning attacks following [56]. For single target label attack, the poisoning goal is dog2boat for both Flickr30k and COCO. We evaluate them on test samples that are unseen in the training process. For example, we take an clean image of dog and associate it with a proxy caption of boat. The poisoning rate for this attack is $0.065\\%$ for Flickr30k and $0.24\\%$ for COCO. For the multi-target label attack, we take two classes. The poisoning goals are dog2boat and train2zebra for COCO. For Flickr30k, the poisoning goals are dog2boat and bird2sofa. The poisoning rate for COCO and Flickr30k are $0.52\\%$ and $0.34\\%$ respectively.", + "bbox": [ + 496, + 289, + 893, + 470 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.2. Experimental Results", + "text_level": 1, + "bbox": [ + 500, + 481, + 700, + 497 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Backdoor Attack. In Tab. 1, we compared ablations of our method $(\\mathrm{CL} + \\mathrm{KE}, \\mathrm{CL} + \\mathrm{Attention})$ with other baselines e.g. Cleanlip [3], Anti-Backdoor Learning (ABL) [27]. Finally, our model Semantic Shield (Weighted CL + Attention), outperforms all baselines with significant margins. Note that, at test time, we used 100 backdoor images (patch, BPP, Wanet) for the text retrieval task. At test time, our model retrieves no caption associated with poisoned categories for any backdoored image on Flickr30k.", + "bbox": [ + 496, + 506, + 890, + 642 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Poisoning Attack. Similarly, to the above, at test time, we use 100 poisoned images for both single and multi-target settings for both datasets. Our model outperforms all existing work significantly with large margins, particularly on the multi-target label setting. We observe that the unweighted version of our approach slightly outperforms Semantic Shield for dog2boat at Hit@1, but Semantic Shield significantly outperforms for Hit@5 and Hit@10, suggesting significantly reduced poisoning overall.", + "bbox": [ + 496, + 643, + 890, + 779 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Utility evaluation. We evaluate model utility for image-capture retrieval. Tab. 4 shows the performance (Recall@10) of the poisoned model on each attack type as well as the clean model on the test data. We observe that the utility of the poisoned model is at the same level or slightly less than the clean model e.g. BPP in COCO dataset. This implies that despite being trained on poisoned data, models maintain their performance. We show the model utility", + "bbox": [ + 496, + 779, + 890, + 901 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "24825", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/2f51355e41a18a51b61d9a6ffb8c0451c21328d0779e7fc3e2bc950342ecd225.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
DatasetModelsBackdoor PatchBackdoor BPPBackdoor Wanet
Hit@1 ↓Hit@5↓Hit@10↓Hit@1 ↓Hit@5↓Hit@10↓Hit@1 ↓Hit@5↓Hit@10↓
COCOCL (No Defense)90.6694.6095.43100.0100.0100.0100.0100.0100.0
CL+ ABL [27]6.238.1212.2115.3516.6816.21100.0100.0100.0
CL+ CleanClip [3]5.3512.6817.8936.1250.0955.198.2316.3223.73
CL + KE9.015.3121.9025.3947.9850.1212.2156.7988.38
CL + Attention4.205.126.010.05.2636.210.02.107.20
Weighted CL + Attention0.91.221.570.00.00.00.00.00.0
Flickr30kCL (No Defense)91.9797.6398.21100.0100.0100.0100.0100.0100.0
CL+ ABL [27]4.672.214.0610.3417.9821.1398.2199.23100.0
CL+ CleanClip [3]2.203.325.0512.4324.3231.2513.2923.1329.21
CL + KE16.1033.1541.0913.1436.5456.2723.3641.2147.43
CL + Attention1.203.123.010.07.2423.170.012.0114.07
Weighted CL + Attention0.00.00.00.00.00.00.00.00.0
", + "bbox": [ + 81, + 88, + 893, + 292 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/b03ebc54bedb725ef3578f4edd88200e1cb978cccb5f2e2c14ae8e3853874c9b.jpg", + "table_caption": [ + "Table 1. Backdoor attack and defense performance with baselines. The first row of the table shows an undefended model while other rows are baselines or variants of our method. CL+ KE, CL+ Attention are our baselines. The best results are shown in bold." + ], + "table_footnote": [], + "table_body": "
DatasetModelsSingle Target LabelMultiple Target Label
dog2boatdog2boattrain2zebra
Hit@1 ↓Hit@5↓Hit@10↓Hit@1 ↓Hit@5↓Hit@10↓Hit@1 ↓Hit@5↓Hit@10↓
COCOCL (No Defense)18.057.2082.077.1299.2399.5655.3295.7697.98
CL+ CleanClip [3]3.393.955.6557.6963.089.1769.4971.7589.17
CL + KE4.565.325.9554.4564.2185.5265.1270.9286.12
CL + Attention0.563.384.510.6365.6069.422.256.7712.99
Weighted CL + Attention0.041.122.542.235.216.450.00.00.0
dog2boatdog2boatbird2sofa
Hit@1 ↓Hit@5↓Hit@10↓Hit@1 ↓Hit@5↓Hit@10↓Hit@1 ↓Hit@5↓Hit@10↓
Flickr30kCL (No Defense)29.057.2082.2328.1282.3993.7655.3290.62100.0
CL+ CleanClip [3]8.2731.5136.6121.6961.2788.7522.4264.1189.51
CL + KE7.3428.0932.2121.1245.3247.6712.7742.3454.21
CL + Attention4.5621.8134.111.6316.7029.213.2518.4332.22
Weighted CL + Attention0.321.212.541.784.565.670.00.00.0
", + "bbox": [ + 81, + 337, + 890, + 566 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 2. Poisoning attack and defense performance with baselines. First row of the table shows how good the attack, and other rows are baselines along with our proposed models. CL + KE, CL + Attention are our baselines. The best results are highlighted.", + "bbox": [ + 75, + 575, + 892, + 606 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "after being defended with Semantic Shield and its variants (CL + KE, CL + Attention, weighted CL + Attention) in Tab. 3. We largely observe a similar utility compared to the models from Tab. 4. On the Flickr30k dataset, single target or multiple target attack scenario, for TR task, the utility is slightly less than the clean model (Tab. 4, Tab. 3).", + "bbox": [ + 75, + 618, + 470, + 709 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.3. Ablations", + "text_level": 1, + "bbox": [ + 76, + 723, + 187, + 738 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Poisoning rate. We compare the performance of poisoning attacks at different poisoning rates on three backdoor attacks. We conduct these attacks against the victim model with four different poisoning rates (0.001 to $0.01\\%$ ) on the COCO dataset (Fig. 3). We observe that attack performance significantly improves with increased poisoning rate, even though the rate is quite low, which demonstrates the vulnerability of contrastively trained VL models to attacks.", + "bbox": [ + 75, + 748, + 468, + 867 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Fine-tuning epoch. In Fig. 4 we use the max poisoning rate $(0.01\\%)$ from Fig. 3 to illustrate Semantic Shield's per", + "bbox": [ + 76, + 869, + 470, + 902 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/3d32224a2f17e685dc093f08b1eccd84b7a7a95b222b2e63c5356f0865f6076e.jpg", + "image_caption": [ + "(a) Backdoor patch" + ], + "image_footnote": [], + "bbox": [ + 517, + 618, + 635, + 688 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/92dbd3fcea48cced79ad97e4309c56216f07c39391cf5f5c29c07440aec07974.jpg", + "image_caption": [ + "(b) BPP" + ], + "image_footnote": [], + "bbox": [ + 637, + 618, + 754, + 686 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/46680ffaf8b6ea96e9e7ee3f076109486d997687f743bc237d1a66aaff49bbb5.jpg", + "image_caption": [ + "(c) Wanet" + ], + "image_footnote": [], + "bbox": [ + 756, + 618, + 875, + 686 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/7536f4d417b37713e100acc814d15ec05466655ad0481b0113d79d0e9e8a30e1.jpg", + "image_caption": [ + "(a) Backdoor patch" + ], + "image_footnote": [], + "bbox": [ + 517, + 710, + 635, + 781 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/23feab32acf5c1410d5b36b2ebb5667e83cdc6dcf3a02599e625d21974dbe093.jpg", + "image_caption": [ + "(b) BPP" + ], + "image_footnote": [], + "bbox": [ + 637, + 710, + 754, + 781 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/ba244084e2868daea1a07ca0ff6eae29e4322ef88820bb4378cbaa0940b4014e.jpg", + "image_caption": [ + "(c) Wanet", + "Figure 4. Hit@k vs training epoch for Semantic Shield." + ], + "image_footnote": [], + "bbox": [ + 756, + 710, + 874, + 781 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "formance at different epochs on the same backdoored samples. We notice that Hit@k gradually reduces for all three attacks, demonstrating the increasing effectiveness of Semantic Shield's defense with increased training.", + "bbox": [ + 498, + 839, + 893, + 902 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "24826", + "bbox": [ + 478, + 945, + 519, + 955 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/574afbc300cf04f3721def56ce2c6f8123522b67f5bd32141825deddece5f022.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
DatasetTaskModelsBackdoor PatchBPPWanetSingle Target LabelMultiple Target Label
COCOIRCL74.9973.9474.5474.6874.72
CL + KE74.1570.774.074.2473.28
CL + Attention74.3873.1374.4375.7075.13
Weighted CL + Attention74.2274.5674.2373.4673.51
COCOTRCL81.5877.4478.7480.1681.12
CL + KE78.4075.5477.8679.0881.20
CL + Attention79.2077.3678.0480.0581.06
Weighted CL + Attention79.4677.7878.4579.6780.0
Flickr30kIRCL59.1359.8661.0860.9257.41
CL + KE60.3461.8561.1358.1258.18
CL + Attention61.3255.9659.1458.9758.16
Weighted CL + Attention61.0756.3260.1659.7658.78
Flickr30kTRCL68.0768.7969.8671.0668.14
CL + KE69.6770.6569.6266.9862.20
CL + Attention70.064.4668.068.1362.97
Weighted CL + Attention70.2365.6668.8768.4562.12
", + "bbox": [ + 81, + 88, + 888, + 357 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/a3612219d55d7ee674ec649be37c3a5d976aacd196ab7898af2f40ddc12fb2a4.jpg", + "table_caption": [ + "Table 3. Model utility of defended models (Recall@10). The model utilities are comparable to the performance in Tab. 4" + ], + "table_footnote": [], + "table_body": "
DatasetTaskCleanBackPatBPPWanetSingTLMultTL
COCOIR75.1374.9973.9474.5474.6874.72
TR80.6281.5877.4478.7480.1681.12
Flickr30kIR59.6859.1359.8661.0860.9257.41
TR68.3768.0768.7969.8671.0668.14
", + "bbox": [ + 81, + 393, + 465, + 465 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 4. Model utility between clean model and other backdoored/poisoned models (CL) (Recall@10). Similar to Tab. 3.", + "bbox": [ + 76, + 476, + 468, + 505 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6. Qualitative analysis", + "text_level": 1, + "bbox": [ + 76, + 517, + 266, + 534 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In Fig. 5, we present the contrast between a model defended by Semantic Shield and an undefended model's attention map. Fig. 5b shows that poisoned model pays attention to the patch (bottom right corner). In contrast, the defended model Fig. 5c does not pay any attention to the patch. Next, in Fig. 5d and Fig. 5g two imperceptible noises are injected e.g. BPP, Wanet. We wanted to see what happens if we inject the noise randomly throughout the entire images. Poisoned models in Fig. 5e and Fig. 5h show spurious visual signals all over the image. However, our proposed models filters out the noisy signals and defends against poisoning.", + "bbox": [ + 75, + 542, + 468, + 709 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "7. Conclusion", + "text_level": 1, + "bbox": [ + 76, + 722, + 194, + 738 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this paper, we introduced Semantic Shield, an approach for defending against attacks on contrastively trained VL models. Our approach works by leveraging external knowledge to guide the model's attention to non-attacked visual regions and samples. We evaluated Semantic Shield against recent backdoorsing and poisoning attacks and defenses on two benchmarks. Our experiments show that Semantic Shield substantially outperforms existing defenses across all settings. In future work, we will explore a tighter integration of the LLM using prompting by dynamically producing KEs online based on the de", + "bbox": [ + 75, + 747, + 468, + 900 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/8be5619ee58cda38e3dafbfd2aacb08ea685b22d615436b9b555bd9b6be599ef.jpg", + "image_caption": [ + "(a) Backdoor image (b) Attention map for with patch bottom poisoned model right corner" + ], + "image_footnote": [], + "bbox": [ + 517, + 393, + 635, + 454 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/31bac6cfdc3c9cd0626f286cf112ae37e748250f06c40fe7d7b05d221a748bf5.jpg", + "image_caption": [ + "(c) Attention map for best model" + ], + "image_footnote": [], + "bbox": [ + 637, + 393, + 754, + 454 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "(b) Attention map for poisoned model", + "bbox": [ + 637, + 455, + 754, + 481 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/58719e3fe72244da814b7a548835d094ab6b6cdb26a51259eb50cfcc66bd4ca6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 756, + 393, + 875, + 454 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/96dea49f53412598ec18853250923f4d03213e425a3bb8aabca0ddaf92e4c6f2.jpg", + "image_caption": [ + "(d) Backdoor image (e) Attention map for with imperceptible poisoned model noise:BPP" + ], + "image_footnote": [], + "bbox": [ + 517, + 494, + 635, + 553 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/81a949309702b253b548e673c7ebff5a6176a9ff65c7764ab11b0d179978edc4.jpg", + "image_caption": [ + "(f) Attention map for best model" + ], + "image_footnote": [], + "bbox": [ + 637, + 494, + 754, + 553 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "(e) Attention map for poisoned model", + "bbox": [ + 637, + 556, + 754, + 580 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/b014b6cad5722d0206243b5266ddfeec4eb96a0fb97d0e9217995474d36a0690.jpg", + "image_caption": [ + "Figure 5. Attention map comparison between our model (weighted $\\mathrm{CL} +$ attention) and backdoored models for three backdoor attacks." + ], + "image_footnote": [], + "bbox": [ + 756, + 494, + 875, + 553 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/9a108b410b23238ab50b6051b1e4585be0daaa72bfe29b423abf1f5ed77787fd.jpg", + "image_caption": [ + "(g) Backdoor image (h) Attention map for with imperceptible poisoned model noise: Wanet" + ], + "image_footnote": [], + "bbox": [ + 517, + 594, + 635, + 652 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/7823a552ee20b4c5997ea4235f720f3ed64aebe245de2bcdac8f2d7bf7b7bd27.jpg", + "image_caption": [ + "(i) Attention map for best model" + ], + "image_footnote": [], + "bbox": [ + 637, + 594, + 754, + 652 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "(h) Attention map for poisoned model", + "bbox": [ + 637, + 656, + 754, + 679 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/fd8ca58da0ecbffe144b12414cf62078fc9c91119b82353efe60be8750e26018.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 756, + 593, + 875, + 652 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "fended model's current state. In addition, we will explore how multimodal large language models could be used to extract more relevant KEs. While Semantic Shield is successful at defending against attacks on natural images for which there is a meaningful visual-KE alignment, it may be less successful for images such as charts or more abstract text for which clear KEs cannot be extracted. Moreover, it does not preclude the possibility of attacks against the language model via the caption. Future work should explore how the LLM can be jointly defended.", + "bbox": [ + 496, + 760, + 890, + 900 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "24827", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 95, + 173, + 111 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Hassan Akbari, Liangzhe Yuan, Rui Qian, Wei-Hong Chuang, Shih-Fu Chang, Yin Cui, and Boqing Gong. VATT: transformers for multimodal self-supervised learning from raw video, audio and text. In Advances in Neural Information Processing Systems 34: Annual Conference on Neural Information Processing Systems 2021, NeurIPS 2021, December 6-14, 2021, virtual, pages 24206-24221, 2021. 1", + "[2] Hritik Bansal, Nishad Singhi, Yu Yang, Fan Yin, Aditya Grover, and Kai-Wei Chang. Cleanclip: Mitigating data poisoning attacks in multimodal contrastive learning. In ICLR 2023 Workshop on Trustworthy and Reliable Large-Scale Machine Learning Models, 2023. 3", + "[3] Hritik Bansal, Nishad Singhi, Yu Yang, Fan Yin, Aditya Grover, and Kai-Wei Chang. Cleanclip: Mitigating data poisoning attacks in multimodal contrastive learning. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 112–123, 2023. 1, 3, 6, 7", + "[4] Battista Biggio, Blaine Nelson, and Pavel Laskov. Poisoning attacks against support vector machines. In Proceedings of the 29th International Coference on International Conference on Machine Learning, pages 1467-1474, 2012. 2", + "[5] Battista Biggio, Ignazio Pillai, Samuel Rota Bulò, Davide Ariu, Marcello Pelillo, and Fabio Roli. Is data clustering in adversarial settings secure? In Proceedings of the 2013 ACM Workshop on Artificial Intelligence and Security, page 87–98, New York, NY, USA, 2013. Association for Computing Machinery. 3", + "[6] Battista Biggio, Ignazio Pillai, Samuel Rota Bulò, Davide Ariu, Marcello Pelillo, and Fabio Roli. Is data clustering in adversarial settings secure? In Proceedings of the 2013 ACM workshop on Artificial intelligence and security, pages 87–98, 2013. 2", + "[7] Nicholas Carlini and Andreas Terzis. Poisoning and backdooring contrastive learning. In The Tenth International Conference on Learning Representations, ICLR 2022, Virtual Event, April 25-29, 2022. OpenReview.net, 2022. 3, 4, 6", + "[8] Bryant Chen, Wilka Carvalho, Nathalie Baracaldo, Heiko Ludwig, Benjamin Edwards, Taesung Lee, Ian Molloy, and Biplav Srivastava. Detecting backdoor attacks on deep neural networks by activation clustering. In Workshop on Artificial Intelligence Safety. CEUR-WS, 2019. 3", + "[9] Jian Chen, Xuxin Zhang, Rui Zhang, Chen Wang, and Ling Liu. De-pois: An attack-agnostic defense against data poisoning attacks. IEEE Transactions on Information Forensics and Security, 16:3412-3425, 2021. 3", + "[10] Wei-Lin Chiang, Zhuohan Li, Zi Lin, Ying Sheng, Zhanghao Wu, Hao Zhang, Lianmin Zheng, Siyuan Zhuang, Yonghao Zhuang, Joseph E Gonzalez, et al. Vicuna: An open-source chatbot impressing gpt-4 with $90\\%$ * chatgpt quality. 2023. URL https://lmsys.org/blog/2023-03-30-vicuna, 1(2):3. 2, 6", + "[11] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. BERT: pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 Conference of the North American Chapter of the" + ], + "bbox": [ + 78, + 121, + 470, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Association for Computational Linguistics: Human Language Technologies, NAACL-HLT 2019, Minneapolis, MN, USA, June 2-7, 2019, Volume 1 (Long and Short Papers), pages 4171-4186. Association for Computational Linguistics, 2019. 6", + "[12] Khoa Doan, Yingjie Lao, and Ping Li. Backdoor attack with imperceptible input and latent modification. Advances in Neural Information Processing Systems, 34:18944-18957, 2021. 3", + "[13] Khoa Doan, Yingjie Lao, Weijie Zhao, and Ping Li. Lira: Learnable, imperceptible and robust backdoor attacks. In Proceedings of the IEEE/CVF international conference on computer vision, pages 11966-11976, 2021. 3", + "[14] Vishnu Sashank Dorbala, Gunnar A Sigurdsson, Jesse Thomason, Robinson Piramuthu, and Gaurav S Sukhatme. Clip-nav: Using clip for zero-shot vision-and-language navigation. In Workshop on Language and Robotics at CoRL 2022, 2022. 2", + "[15] Fangxiang Feng, Xiaojie Wang, and Ruifan Li. Cross-modal retrieval with correspondence autoencoder. In Proceedings of the 22nd ACM international conference on Multimedia, pages 7-16, 2014. 2", + "[16] Felipe González-Pizarro and Savvas Zannettou. Understanding and detecting hateful content using contrastive learning. In Proceedings of the International AAAI Conference on Web and Social Media, pages 257–268, 2023. 2", + "[17] Tianyu Gu, Kang Liu, Brendan Dolan-Gavitt, and Siddharth Garg. Badnets: Evaluating backdooring attacks on deep neural networks. IEEE Access, 7:47230-47244, 2019. 3", + "[18] Jonathan Hayase, Weihao Kong, Raghav Somani, and Sewoong Oh. Spectre: Defending against backdoor attacks using robust statistics. In International Conference on Machine Learning, pages 4129-4139. PMLR, 2021. 3", + "[19] Chenguang Huang, Oier Mees, Andy Zeng, and Wolfram Burgard. Visual language maps for robot navigation. In 2023 IEEE International Conference on Robotics and Automation (ICRA), pages 10608-10615. IEEE, 2023. 2", + "[20] Kunzhe Huang, Yiming Li, Baoyuan Wu, Zhan Qin, and Kui Ren. Backdoor defense via decoupling the training process. In International Conference on Learning Representations, 2021. 3", + "[21] Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. In International conference on machine learning, pages 4904-4916. PMLR, 2021. 2", + "[22] Marius Kloft and Pavel Laskov. Online anomaly detection under adversarial impact. In Proceedings of the thirteenth international conference on artificial intelligence and statistics, pages 405-412. JMLR Workshop and Conference Proceedings, 2010. 2", + "[23] Pang Wei Koh and Percy Liang. Understanding black-box predictions via influence functions. In International conference on machine learning, pages 1885-1894. PMLR, 2017. 2", + "[24] Changjiang Li, Ren Pang, Zhaohan Xi, Tianyu Du, Shouling Ji, Yuan Yao, and Ting Wang. An embarrassingly simple" + ], + "bbox": [ + 503, + 92, + 890, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "24828", + "bbox": [ + 478, + 945, + 517, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "backdoor attack on self-supervised learning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4367-4378, 2023. 1", + "[25] Junnan Li, Ramprasaath Selvaraju, Akhilesh Gotmare, Shafiq Joty, Caiming Xiong, and Steven Chu Hong Hoi. Align before fuse: Vision and language representation learning with momentum distillation. Advances in neural information processing systems, 34:9694-9705, 2021. 1, 2", + "[26] Junnan Li, Dongxu Li, Caiming Xiong, and Steven C. H. Hoi. BLIP: bootstrapping language-image pre-training for unified vision-language understanding and generation. In International Conference on Machine Learning, ICML 2022, 17-23 July 2022, Baltimore, Maryland, USA, pages 12888-12900. PMLR, 2022. 1", + "[27] Yige Li, Xixiang Lyu, Nodens Koren, Lingjuan Lyu, Bo Li, and Xingjun Ma. Anti-backdoor learning: Training clean models on poisoned data. In Advances in Neural Information Processing Systems 34: Annual Conference on Neural Information Processing Systems 2021, NeurIPS 2021, December 6-14, 2021, virtual, pages 14900-14912, 2021. 1, 3, 6, 7", + "[28] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In Computer Vision-ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part V 13, pages 740-755. Springer, 2014. 6", + "[29] Min Liu, Alberto Sangiovanni-Vincentelli, and Xiangyu Yue. Beating backdoor attack at its own game. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4620-4629, 2023. 3", + "[30] Yang Liu, Mingyuan Fan, Cen Chen, Ximeng Liu, Zhuo Ma, Li Wang, and Jianfeng Ma. Backdoor defense with machine unlearning. In IEEE INFOCOM 2022-IEEE Conference on Computer Communications, pages 280-289. IEEE, 2022. 3", + "[31] Arjun Majumdar, Gunjan Aggarwal, Bhavika Devnani, Judy Hoffman, and Dhruv Batra. Zson: Zero-shot object-goal navigation using multimodal goal embeddings. Advances in Neural Information Processing Systems, 35:32340-32352, 2022. 2", + "[32] Sachit Menon and Carl Vondrick. Visual classification via description from large language models. In The Eleventh International Conference on Learning Representations, ICLR 2023, Kigali, Rwanda, May 1-5, 2023. OpenReview.net, 2023. 6", + "[33] Tuan Anh Nguyen and Anh Tuan Tran. Wanet - imperceptible warping-based backdoor attack. In International Conference on Learning Representations, 2021. 3, 4", + "[34] Huy Phan, Cong Shi, Yi Xie, Tianfang Zhang, Zhuohang Li, Tianming Zhao, Jian Liu, Yan Wang, Yingying Chen, and Bo Yuan. Ribac: Towards robust and imperceptible backdoor a stack against compact dnn. In European Conference on Computer Vision, pages 708-724. Springer, 2022. 3", + "[35] Han Qiu, Yi Zeng, Shangwei Guo, Tianwei Zhang, Meikang Qiu, and Bhavani Thuraisingham. Deepsweep: An evaluation framework for mitigating dnn backdoor attacks using data augmentation. In Proceedings of the 2021 ACM" + ], + "bbox": [ + 78, + 92, + 468, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Asia Conference on Computer and Communications Security, pages 363-377, 2021. 3", + "[36] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In International Conference on Machine Learning, 2021. 1, 2, 6", + "[37] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with CLIP latents. CoRR, abs/2204.06125, 2022. 1", + "[38] Aniruddha Saha, Akshayvarun Subramanya, and Hamed Pirsiavash. Hidden trigger backdoor attacks. In Proceedings of the AAAI conference on artificial intelligence, pages 11957-11965, 2020. 3", + "[39] Aniruddha Saha, Ajinkya Tejankar, Soroush Abbasi Koohpayegani, and Hamed Pirsivash. Backdoor attacks on self-supervised learning. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR 2022, New Orleans, LA, USA, June 18-24, 2022, pages 13327-13336. IEEE, 2022. 3", + "[40] Ali Shafahi, W. Ronny Huang, Mahyar Najibi, Octavian Suciu, Christoph Studer, Tudor Dumitras, and Tom Goldstein. Poison frogs! targeted clean-label poisoning attacks on neural networks. In Proceedings of the 32nd International Conference on Neural Information Processing Systems, page 6106-6116, Red Hook, NY, USA, 2018. Curran Associates Inc. 3", + "[41] Wonyoung Shin, Jonghun Park, Taekang Woo, Yongwoo Cho, Kwangjin Oh, and Hwanjun Song. e-clip: Large-scale vision-language representation learning in e-commerce. In Proceedings of the 31st ACM International Conference on Information & Knowledge Management, pages 3484–3494, 2022. 2", + "[42] Andreas Steiner, Alexander Kolesnikov, Xiaohua Zhai, Ross Wightman, Jakob Uszkoreit, and Lucas Beyer. How to train your vit? data, augmentation, and regularization in vision transformers. Trans. Mach. Learn. Res., 2022, 2022. 6", + "[43] Di Tang, XiaoFeng Wang, Haixu Tang, and Kehuan Zhang. Demon in the variant: Statistical analysis of {DNNs} for robust backdoor contamination detection. In 30th USENIX Security Symposium (USENIX Security 21), pages 1541-1558, 2021. 3", + "[44] Christopher Thomas and Adriana Kovashka. Preserving semantic neighborhoods for robust cross-modal retrieval. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XVIII 16, pages 317-335. Springer, 2020. 2", + "[45] Vale Tolpegin, Stacey Truex, Mehmet Emre Gursoy, and Ling Liu. Data poisoning attacks against federated learning systems. In Computer Security-ESORICS 2020: 25th European Symposium on Research in Computer Security, ES-ORICS 2020, Guildford, UK, September 14–18, 2020, Proceedings, Part I 25, pages 480–501. Springer, 2020. 2", + "[46] Brandon Tran, Jerry Li, and Aleksander Madry. Spectral signatures in backdoor attacks. Advances in neural information processing systems, 31, 2018. 3" + ], + "bbox": [ + 501, + 92, + 893, + 900 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "24829", + "bbox": [ + 478, + 945, + 517, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[47] Wei Lun Tsai, Jacob J Lin, and Shang-Hsien Hsieh. Generating construction safety observations via clip-based image-language embedding. In European Conference on Computer Vision, pages 366-381. Springer, 2022. 2", + "[48] Haotao Wang, Junyuan Hong, Aston Zhang, Jiayu Zhou, and Zhangyang Wang. Trap and replace: Defending backdoor attacks by trapping them into an easy-to-replace subnetwork. Advances in neural information processing systems, 35:36026-36039, 2022. 3", + "[49] Lin Wang and Jie Chen. Improving radiology report generation with adaptive attention. In Multimodal AI in healthcare: A paradigm shift in health intelligence, pages 293-305. Springer, 2022. 2", + "[50] Longzheng Wang, Chuang Zhang, Hongbo Xu, Yongxiu Xu, Xiaohan Xu, and Siqi Wang. Cross-modal contrastive learning for multimodal fake news detection. In Proceedings of the 31st ACM International Conference on Multimedia, pages 5696-5704, 2023. 2", + "[51] Zhenting Wang, Juan Zhai, and Shiqing Ma. Bppattack: Stealthy and efficient trojan attacks against deep neural networks via image quantization and contrastive adversarial learning. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR 2022, New Orleans, LA, USA, June 18-24, 2022, pages 15054-15063. IEEE, 2022. 4", + "[52] Sandamal Weerasinghe, Tansu Alpcan, Sarah M Erfani, and Christopher Leckie. Defending support vector machines against data poisoning attacks. IEEE Transactions on Information Forensics and Security, 16:2566-2578, 2021. 3", + "[53] Dongxian Wu and Yisen Wang. Adversarial neuron pruning purifies backdoored deep models. Advances in Neural Information Processing Systems, 34:16913-16925, 2021. 3", + "[54] Huang Xiao, Battista Biggio, Gavin Brown, Giorgio Fumera, Claudia Eckert, and Fabio Roli. Is feature selection secure against training data poisoning? In International conference on machine learning, pages 1689-1698. PMLR, 2015. 2", + "[55] Wenhan Yang, Jingdong Gao, and Baharan Mirzasoleiman. Robust contrastive language-image pretraining against data poisoning and backdoor attacks. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. 1", + "[56] Ziqing Yang, Xinlei He, Zheng Li, Michael Backes, Mathias Humbert, Pascal Berrang, and Yang Zhang. Data poisoning attacks against multimodal encoders. In International Conference on Machine Learning, ICML 2023, 23-29 July 2023, Honolulu, Hawaii, USA, pages 39299-39313. PMLR, 2023. 6", + "[57] Ziqing Yang, Xinlei He, Zheng Li, Michael Backes, Mathias Humbert, Pascal Berrang, and Yang Zhang. Data poisoning attacks against multimodal encoders. In International Conference on Machine Learning, ICML 2023, 23-29 July 2023, Honolulu, Hawaii, USA, pages 39299-39313. PMLR, 2023. 1", + "[58] Peter Young, Alice Lai, Micah Hodosh, and Julia Hockenmaier. From image descriptions to visual denotations: New similarity metrics for semantic inference over event descriptions. Trans. Assoc. Comput. Linguistics, 2:67-78, 2014. 6", + "[59] Jiahui Yu, Zirui Wang, Vijay Vasudevan, Legg Yeung, Mojtaba Seyedhosseini, and Yonghui Wu. Coca: Contrastive" + ], + "bbox": [ + 78, + 90, + 468, + 900 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "captioners are image-text foundation models. Transactions on Machine Learning Research, 2022. 1, 2", + "[60] Yi Zeng, Si Chen, Won Park, Zhuoqing Mao, Ming Jin, and Ruoxi Jia. Adversarial unlearning of backdoors via implicit hypergradient. In International Conference on Learning Representations, 2021. 3", + "[61] Xiaohua Zhai, Xiao Wang, Basil Mustafa, Andreas Steiner, Daniel Keysers, Alexander Kolesnikov, and Lucas Beyer. Lit: Zero-shot transfer with locked-image text tuning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18123-18133, 2022. 2", + "[62] Hanwang Zhang, Yang Yang, Huanbo Luan, Shuicheng Yang, and Tat-Seng Chua. Start from scratch: Towards automatically identifying, modeling, and naming visual attributes. In Proceedings of the 22nd ACM international conference on Multimedia, pages 187-196, 2014. 2", + "[63] Jinghuai Zhang, Hongbin Liu, Jinyuan Jia, and Neil Zhenqiang Gong. Corruptencoder: Data poisoning based backdoor attacks to contrastive learning. arXiv preprint arXiv:2211.08229, 2022. 3", + "[64] Ying Zhang and Huchuan Lu. Deep cross-modal projection learning for image-text matching. In Proceedings of the European conference on computer vision (ECCV), pages 686-701, 2018. 2", + "[65] Yuhao Zhang, Hang Jiang, Yasuhide Miura, Christopher D Manning, and Curtis P Langlotz. Contrastive learning of medical visual representations from paired images and text. In Machine Learning for Healthcare Conference, pages 2-25. PMLR, 2022. 2", + "[66] Bingyin Zhao and Yingjie Lao. Towards class-oriented poisoning attacks against neural networks. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 3741-3750, 2022. 2", + "[67] Yangming Zhou, Yuzhou Yang, Qichao Ying, Zhenxing Qian, and Xinpeng Zhang. Multimodal fake news detection via clip-guided learning. In 2023 IEEE International Conference on Multimedia and Expo (ICME), pages 2825-2830. IEEE, 2023. 2" + ], + "bbox": [ + 503, + 92, + 890, + 626 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "24830", + "bbox": [ + 478, + 945, + 519, + 955 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/2024/Semantic Shield_ Defending Vision-Language Models Against Backdooring and Poisoning via Fine-grained Knowledge Alignment/811b225c-f7ba-4d5f-980e-3c936fd65339_model.json b/2024/Semantic Shield_ Defending Vision-Language Models Against Backdooring and Poisoning via Fine-grained Knowledge Alignment/811b225c-f7ba-4d5f-980e-3c936fd65339_model.json new file mode 100644 index 0000000000000000000000000000000000000000..51322312d23e6aa78a52042bb7a67f7a60d0aca3 --- /dev/null +++ b/2024/Semantic Shield_ Defending Vision-Language Models Against Backdooring and Poisoning via Fine-grained Knowledge Alignment/811b225c-f7ba-4d5f-980e-3c936fd65339_model.json @@ -0,0 +1,2477 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.808, + 0.045 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.085, + 0.131, + 0.885, + 0.177 + ], + "angle": 0, + "content": "Semantic Shield: Defending Vision-Language Models Against Backdoors and Poisoning via Fine-grained Knowledge Alignment" + }, + { + "type": "text", + "bbox": [ + 0.297, + 0.204, + 0.436, + 0.256 + ], + "angle": 0, + "content": "Alvi Md Ishmam Virginia Tech alvi@vt.edu" + }, + { + "type": "text", + "bbox": [ + 0.507, + 0.204, + 0.671, + 0.256 + ], + "angle": 0, + "content": "Christopher Thomas Virginia Tech christhomas@vt.edu" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.292, + 0.314, + 0.308 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.325, + 0.474, + 0.629 + ], + "angle": 0, + "content": "In recent years there has been enormous interest in vision-language models trained using self-supervised objectives. However, the use of large-scale datasets scraped from the web for training also makes these models vulnerable to potential security threats, such as backdooring and poisoning attacks. In this paper, we propose a method for mitigating such attacks on contrastively trained vision-language models. Our approach leverages external knowledge extracted from a language model to prevent models from learning correlations between image regions which lack strong alignment with external knowledge. We do this by imposing constraints to enforce that attention paid by the model to visual regions is proportional to the alignment of those regions with external knowledge. We conduct extensive experiments using a variety of recent backdooring and poisoning attacks on multiple datasets and architectures. Our results clearly demonstrate that our proposed approach is highly effective at defending against such attacks across multiple settings, while maintaining model utility and without requiring any changes at inference time." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.661, + 0.21, + 0.677 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.688, + 0.47, + 0.809 + ], + "angle": 0, + "content": "Recent years have seen enormous interest in vision-language models trained on web-scale image-captioning data using contrastive objectives [25, 36] and text generation objectives [59]. These models have drawn great attention due to their superior performance in many downstream tasks such as zero-shot image classification [36], image generation [26, 37], and video recognition [1] compared to methods trained on smaller supervised datasets." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.811, + 0.471, + 0.902 + ], + "angle": 0, + "content": "Although such image-text foundation models have demonstrated remarkable performance, several recent studies have demonstrated that they are particularly vulnerable to adversarial attacks [24, 55, 57] by introducing a small amount of malicious data (e.g. 75 instances out of 3 million [57]) into the training data. Practically, this can be achieved" + }, + { + "type": "image", + "bbox": [ + 0.513, + 0.296, + 0.898, + 0.475 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.493, + 0.895, + 0.592 + ], + "angle": 0, + "content": "Figure 1. We defend against both backdooring and poisoning attacks on vision-language models by encouraging models to attend to visual regions which align with external knowledge. Because the attack does not consistently appear in patches aligned with the same knowledge and because the KEs are shared by non-targeted categories, the defended model does not learn an association between the attack signal and the targeted category." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.611, + 0.895, + 0.763 + ], + "angle": 0, + "content": "by inserting imperceptible noise or a backdoor patch into some images, as shown in Fig. 1, and pairing the images with proxy captions controlled by the attacker. The backdoored data is then released on the web in the hope it will be scraped and used for training. Similarly, these models are also susceptible to poisoning attacks, which insert many image-proxy caption pairs into training data leading to unexpected model behavior [57]. Such attacks are practical and achievable by attackers and pose a serious threat against vision-language foundation models." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.765, + 0.896, + 0.903 + ], + "angle": 0, + "content": "To defend against such attacks, a number of methods have been proposed. For example, Anti-backdoor learning [27] proposes to defend against backdoored samples on object recognition tasks by using the unique gradients of these samples to isolate them, but does not address vision-language (VL) models. More similar to our work, CleanCLIP [3] proposes a method for defending contrastive VL models against backdoorsing, but does not address nonbackdoored poisoning attacks as we do. While [57] propose" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "24820" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.152 + ], + "angle": 0, + "content": "to clean labeled data to mitigate the impact of poisoning, no prior work has proposed a unified defense mechanism for contrastively trained VL models that is effective against both backdoorsing and poisoning attacks." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.155, + 0.471, + 0.32 + ], + "angle": 0, + "content": "To address this urgent need, we propose a defense method for VL models that defends against both backdooring and poisoning attacks. Our method can also be deployed in object recognition settings, by casting it as a text retrieval problem following [36]. Our method is motivated by the following insight. We note that attacks rely on having models learn correlations between a particular visual signal and target. However, these targeted images share lower-level semantic concepts with other, non-targeted categories (See Fig. 1). As a consequence, the attack tends not to affect the model's representation of these concepts." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.324, + 0.47, + 0.61 + ], + "angle": 0, + "content": "Moreover, in the case of backdoorsing, the attack signal is applied to various images whose semantics change in the region on which the attack is applied. For example, in one image the attack may cover a batch associated with paw, while in another image the signal is associated with sharp teeth. Thus, the model fails to learn an association between the attack signal and these lower-level semantics. We refer to these lower-level semantic concepts associated with objects or captions as Knowledge Elements (KEs). KEs consist of semantic attributes (e.g. round), but also subobjects (e.g. paw), and relations. Our defense mechanism aligns with how humans understand semantics of objects or sentences: as collections of semantic units which combine together to form higher-level concepts that are more abstract, compositional and include actions (\"running\") and proto-objects (\"four-legged animal\"). We propose to encourage models to rely more heavily on relevant lower level semantics when producing their representations. As a consequence, our models are much more resistant to attacks." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.614, + 0.47, + 0.901 + ], + "angle": 0, + "content": "Our method works by learning an alignment between image patches from images and a set of KEs associated with each image caption. To discover associated KEs, prior to training our model we prompt a large language model (Vicuna [10]) to list possible KEs for each caption. We next perform contrastive image_caption training, but add several new objectives. First, we enforce an alignment between image patches and KEs using a novel multi-instance learning based constraint, since we do not know which patches go with which KEs. While this aligns image patches and KEs, it does not prevent the model from relying on the attacker's visual signal when computing its representation. Thus, we also propose a second constraint which enforces that the model's attention to patches is proportional to each patch's alignment with a KE. That is, if a patch has a low alignment with all KEs, the patch should have a low effect on the model's representation. Finally, we observe that for attacked samples, the overall patch-KE alignment is much lower. We thus introduce a dynamic per-sample weight term" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.242 + ], + "angle": 0, + "content": "on the contrastive loss based on the overall alignment of the KEs with the image's patches. This has the effect of downweighting the effect of poisoned samples during training. We evaluate our defense method, Semantic Shield, against multiple recent attacks and defenses on multiple datasets. We observe that Semantic Shield significantly outperforms prior defenses across multiple settings. Our defense technique adds very little overhead at train time, while making models significantly more robust to a wide variety of attacks. The major contributions of this paper are as follows:" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.244, + 0.892, + 0.303 + ], + "angle": 0, + "content": "- We propose an approach, Semantic Shield for defending against backdoorsing and poisoning attacks on contrastively trained vision-language models by enforcing knowledge-guided train-time constraints." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.304, + 0.891, + 0.348 + ], + "angle": 0, + "content": "- We propose a simple yet effective prompting technique using an open-source language model for extracting constituent knowledge elements for free from any caption." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.349, + 0.892, + 0.409 + ], + "angle": 0, + "content": "- We perform a comprehensive experimental evaluation using a number of recent backdoors and poisoning attacks on two datasets. Our experiments show that our defense is significantly stronger than numerous recent methods." + }, + { + "type": "list", + "bbox": [ + 0.5, + 0.244, + 0.892, + 0.409 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.424, + 0.642, + 0.44 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.45, + 0.818, + 0.466 + ], + "angle": 0, + "content": "2.1. Vision-language contrastive learning" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.473, + 0.892, + 0.791 + ], + "angle": 0, + "content": "In recent years, large-scale contrastively trained vision-language foundation models have demonstrated remarkable performance on a number of downstream tasks, even surpassing the performance of supervised models in some cases [25, 36, 59, 61]. While contrastive approaches have been used to align visual and textual embeddings for years [15, 44, 62, 64], recent approaches such as CLIP [36] and ALIGN [21] have demonstrated how training on hundreds of millions of image-caption pairs scraped from the web can yield powerful generalist image-text foundation models which can be applied to many downstream tasks. CLIP-inspired contrastively trained models have found widespread use in many security-critical applications, including navigation [14, 19, 31], healthcare [49, 65], worksite safety [47], disinformation detection [50, 67], and many others [16, 41]. Given their widespread use, it is critical that contrastively trained vision-language models perform in safe and expected ways. Our work adopts the standard two-stream contrastive architecture proposed in [36] and demonstrates how such models can be defended against potential attacks lurking within webly-harvested data." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.802, + 0.783, + 0.817 + ], + "angle": 0, + "content": "2.2. Poisoning and backdoor attacks" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.826, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Data poisoning attacks [4, 45, 54, 66], which have been proposed in both supervised [23] and unsupervised [6, 22] settings, involve introducing mistrabeled (or misaligned) data into the model's training set. At test time, models behave in unexpected and attacker-influenced ways when presented" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "24821" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.091, + 0.473, + 0.381 + ], + "angle": 0, + "content": "with the poisoned examples seen during training. While targeted poisoning attacks target specific examples introduced during training, backdoor attacks can be applied to any image. Backdoorsing attacks are a type of data poisoning attack where an attacker introduces a spurious signal, such as patches [17, 38] or imperceptible perturbations [12, 13, 33, 34] into an image. Models learn to associate the introduced signal with the targeted concept. While poisoning and backdoor attacks have traditionally targeted supervised learning settings, recent work has shown that contrastively trained vision-language models are particularly vulnerable [7, 63]. [7] show that by introducing as few as 3 out of 3 million samples, an attacker can execute a successful attack. This is a highly practical attack, as an attacker can release large amounts of poisoned data on the internet in the hopes that it will be scraped and later used for training. In our work, we demonstrate that our method is highly effective against a number of recent backdoorsing methods and poisoning attacks on contrastive models." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.389, + 0.313, + 0.405 + ], + "angle": 0, + "content": "2.3. Defending against attacks" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.413, + 0.47, + 0.625 + ], + "angle": 0, + "content": "Given the large potential risks posed by attacks to models, extensive research has been conducted on approaches for defending models against both poisoning [9, 52] and backdooring [18, 20, 48] attacks. Defenses can be broadly categorized into methods for detecting and removing attacked samples from training [8, 43, 46], those that remove backdoors already learned by models [30, 53, 60], and those that seek to prevent models from learning backdoors by decreasing their effectiveness [2, 27, 35]. Unfortunately, detection-based methods often fail to detect all backdoors and given the particular vulnerability of contrastive models, imperfect filtering could still result in model poisoning. Unlike our approach, model de-poisoning methods often fail to achieve similar performance to clean models [29]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.626, + 0.471, + 0.777 + ], + "angle": 0, + "content": "Of particular relevance to our work are methods aimed at defending against poisoning and backdooring for vision-language contrastive learning [3]. [3] propose to independently realign representations from different modalities. Unlike this approach, our method learns a fine-grained alignment between external knowledge extracted from a large language model and visual regions. These alignments are then used as a penalty to prevent models from attending to non-aligned visual regions. Our method substantially outperforms [3] across all settings." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.792, + 0.236, + 0.809 + ], + "angle": 0, + "content": "3. Problem setting" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.816, + 0.219, + 0.831 + ], + "angle": 0, + "content": "3.1. Threat model" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.84, + 0.471, + 0.903 + ], + "angle": 0, + "content": "Adversary objective. Given a vision-language contrastive learning model \\(\\mathcal{M}\\), an adversary aims to compromise the model by injecting a small amount of poisoned data \\(\\mathcal{D}_p\\) into a clean dataset \\(\\mathcal{D}_c\\), both of which constitute the training" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.318 + ], + "angle": 0, + "content": "data \\(D\\). The model trained on the poisoned training data is denoted as \\(\\mathcal{M}_p\\). In this paper, we consider two types of attacks: 1) backdooring and 2) poisoning. In a backdoor attack, the adversary overlays either a small patch or some visually imperceptible noise on an image, causing the backdoored image to be misclassified or incorrectly retrieved by a retrieval model. During testing, the adversary cause the model to misclassify or retrieve a specific class by inserting the backdoor into test images. In contrast, in a poisoning attack, the goal is to cause the model \\(\\mathcal{M}_p\\) to associate a targeted set of text with images of a specified class by inserting many training instances which incorrectly associate visual content with concepts controlled by the adversary. In both cases, the poisoned model is expected to maintain similar utility (performance) compared to the clean model." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.319, + 0.893, + 0.53 + ], + "angle": 0, + "content": "Adversary capabilities. We consider an adversary capable of injecting a small number of poisonous samples into the training dataset, similar to prior work [5]. In traditional supervised attacks [39, 40], adversaries were required to modify a large amount of the training data - an impractical setting for vision-language models trained on web-scale data. Our setting is more realistic, because achieving a high poisoning rate is improbable when poisoned data is released on the internet with the hope of it being scraped for training. Thus, we focus on the more feasible scenario and assume a relatively low poisoning rate. We assume a black-box setting, where the adversary lacks knowledge of the target model's architecture and hyperparameters. Additionally, the adversary lacks control over the training process." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.537, + 0.692, + 0.553 + ], + "angle": 0, + "content": "3.2. Attack methodology" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.56, + 0.892, + 0.772 + ], + "angle": 0, + "content": "Model training. We denote our training data as \\((i,t)\\in\\) \\(\\mathcal{D} = \\mathcal{I}\\times \\mathcal{T}\\) where \\(\\mathcal{D},\\mathcal{I}\\) and \\(\\mathcal{T}\\) represent the training set, image set, and text set, respectively. Within a collection of \\(\\mathcal{N}\\) image-text pairs, we identify \\((i_j,t_k)\\) as a positive pair if \\(j = k\\) ; otherwise, it is considered a negative pair. The contrastive learning model concurrently optimizes the image encoder \\(\\mathcal{E}_i\\) and the text encoder \\(\\mathcal{E}_t\\) to maximize the similarity between the embeddings of positive pairs in a batch while minimizing that of negative pairs. Specifically, for a given batch of \\(\\mathcal{N}\\) image-text pairs, we obtain the image embedding \\(I_{j}^{e} = \\mathcal{E}_{i}(i_{j})\\) and the corresponding text embedding \\(T_{k}^{e} = \\mathcal{E}_{t}(t_{k})\\) for each pair, normalizing both embeddings using the \\(L_{2}\\) norm. The cross-modal contrastive loss \\(\\mathcal{L}_{CL}\\) is then computed as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.522, + 0.777, + 0.891, + 0.866 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {L} _ {C L} = - \\frac {1}{2 \\mathcal {N}} \\left(\\sum_ {j = 1} ^ {\\mathcal {N}} \\log \\frac {\\exp \\left(\\sigma \\left(I _ {j} ^ {e} , T _ {j} ^ {e}\\right) / \\tau\\right)}{\\sum_ {k = 1} ^ {\\mathcal {N}} \\exp \\left(\\sigma \\left(I _ {j} ^ {e} , T _ {k} ^ {e}\\right) / \\tau\\right)} \\right. \\tag {1} \\\\ \\left. + \\sum_ {k = 1} ^ {\\mathcal {N}} \\log \\frac {\\exp \\left(\\sigma \\left(I _ {k} ^ {e} , T _ {k} ^ {e}\\right) / \\tau\\right)}{\\sum_ {j = 1} ^ {\\mathcal {N}} \\exp \\left(\\sigma \\left(I _ {j} ^ {e} , T _ {k} ^ {e}\\right) / \\tau\\right)}\\right) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.871, + 0.892, + 0.902 + ], + "angle": 0, + "content": "where \\(\\sigma (.,.)\\) is the product between the image and text embeddings (their similarity) and \\(\\tau\\) denotes the temperature." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "24822" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.473, + 0.384 + ], + "angle": 0, + "content": "Backdoor attack. A successful backdoor attack introduces a trigger into a model so that when the trigger is present in the input image (dog), the model incorrectly associates the image with the specific target class (boat caption) controlled by the attacker. We applied backdoor attacks to poison multimodal contrastive learning models, following the approach in [7]. We consider two types of backdoor attacks: a) overlaying a backdoor trigger, such as a \\((16 \\times 16\\) patch), on a small subset of training images, and b) injecting imperceptible noise into a limited subset of images. The latter is considered a stealthy backdoor attack. We classify the BPP [51] and Wanet [33] attacks as stealthy, because they pose a challenge for human identification due to their subtle and imperceptible nature. To perform our backdoor attack, we construct the poisoning dataset \\(D_{p} = \\{(I_{i} \\oplus \\mathbf{bd}), T_{i}^{y^{\\prime}} : I_{i} \\in D_{subset}\\}\\), by embedding a backdoor trigger bd (e.g. a \\(16 \\times 16\\) patch or imperceptible noise) in a small subset of training images, \\(D_{subset} \\subset D\\), \\(T_{i}^{y^{\\prime}} \\in T^{y^{\\prime}}\\), where \\(y^{\\prime}\\) is target class." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.385, + 0.472, + 0.537 + ], + "angle": 0, + "content": "Single target label attack. In this poisoning attack, an adversary aims to associate images from one class e.g. (dog) with captions from another class e.g. (boat). The attack can be formulated as \\((i,t)|i\\in I_{train}^{A},t\\in T_{train}^{B}\\), where \\(A\\) and \\(B\\) are the original and the target classes, respectively. Given a caption \\(t\\in T_{test}^{B}\\), we expect the model to retrieve images from \\(I_{test}^{A}\\) as the most relevant. We poison the model to build a strong relationship between images in class \\(A\\) and captions in class \\(B\\), even if the test images and captions are unseen at training time." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.538, + 0.473, + 0.66 + ], + "angle": 0, + "content": "Multiple target label attack. An adversary can extend the \"single target label\" attack by poisoning multiple target classes simultaneously, i.e. images from multiple original classes can be mapped to multiple target classes in captions. In this setting, the poisoning goal is defined as \\(\\mathcal{D}_p = (A_1,B_1),(A_2,B_2),\\dots,(A_n,B_n)\\) where \\(A_{i}\\in I^{A}\\) and \\(B_{i}\\in T^{B}\\). \\(I^A\\) and \\(T^B\\) represent images and captions from classes \\(A\\) and \\(B\\) respectively." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.678, + 0.188, + 0.696 + ], + "angle": 0, + "content": "4. Approach" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.705, + 0.473, + 0.903 + ], + "angle": 0, + "content": "In this section, we introduce our framework for mitigating backdooring and poisoning attacks on vision-language models. Backdoor attacks on multimodal contrastive learning are effective because models learn a correlation between the backdoor trigger either in a form of patch or imperceptible noise added to the image and the target concept in the paired captions. The core intuition behind our approach stems from human perception, where sets of lower level semantic concepts play a key role in distinguishing objects. See Fig. 1. These semantic concepts consist of semantic attributes (e.g. \"thick fur\", \"rough green texture\"), but also parts of objects (e.g. paws, whiskers). We term these identifiable properties knowledge elements (KEs). Our core intu" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.895, + 0.213 + ], + "angle": 0, + "content": "ition is that backdoorsing and poisoning attacks are effective because models learn spurious correlations between the visual content and the target label. However, because other non-backed classes also share some of the same KEs, models will not learn an association between the KEs and the spurious visual signal. Thus, we propose to leverage KEs to prevent models from relying on such correlations in their representations." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.221, + 0.843, + 0.238 + ], + "angle": 0, + "content": "4.1. Aligning patches to knowledge elements" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.243, + 0.895, + 0.532 + ], + "angle": 0, + "content": "The traditional contrastive learning objective encourages image embedding \\(\\mathcal{I}_i^e\\) and text embedding \\(\\mathcal{T}_i^e\\) to be close. However, in addition to this, we enforce that image patch embeddings \\(\\mathcal{I}_i^{patch}\\) and associated KE embeddings \\(\\kappa \\mathcal{E}_i^e\\) to also be close. Our key observation is that because backdoor signals are injected in random locations of the image which do not necessarily contain a KE, the similarity between these patches and KE embeddings should be lower compared to others. Even if by chance the area covered by the attack does contain KEs, the affected KEs will not be the same when the attack is performed on a different image, preventing the model from learning an association between the attack perturbation and the KEs. Based on this intuition, our model first learns to align patches and KEs using a contrastive constraint, \\(\\mathcal{L}_{KE}\\). This learned alignment will later be used to prevent the model from attending to potentially attacked patches. To learn the patch-KE alignment, we first compute the maximum and minimum patch-KE similarity per category per sample as" + }, + { + "type": "equation", + "bbox": [ + 0.567, + 0.538, + 0.892, + 0.581 + ], + "angle": 0, + "content": "\\[\n\\omega_ {i} ^ {c} = \\max _ {q \\in m} \\left(\\sum_ {p = 1} ^ {n} \\sum_ {q = 1} ^ {m} \\mathcal {I} _ {p} ^ {\\text {p a t c h}} \\cdot \\left(\\mathcal {K E} _ {q} ^ {c}\\right) ^ {e}\\right) \\tag {2}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.568, + 0.589, + 0.892, + 0.632 + ], + "angle": 0, + "content": "\\[\n\\hat {\\omega} _ {i} ^ {c} = \\min _ {q \\in m} \\left(\\sum_ {p = 1} ^ {n} \\sum_ {q = 1} ^ {m} \\mathcal {I} _ {p} ^ {\\text {p a t c h}} \\cdot \\left(\\mathcal {K} \\mathcal {E} _ {q} ^ {c}\\right) ^ {e}\\right) \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.635, + 0.895, + 0.773 + ], + "angle": 0, + "content": "where \\( n \\) is the number of patches per image, \\( m \\) is the number of KEs per object category, and \\( c \\in C \\), where \\( C \\) is the number of object categories. \\( (\\mathcal{K}\\mathcal{E}_q^c)^e \\) is the per KE embedding per category. Note that our approach also extends to image-text datasets without any defined object categories or labels. In this case, we treat each image-caption pair as its own \"category\" with a set of knowledge elements and \\( C \\) is the same as the batch size. The objective function for patch-KE similarity is therefore given by" + }, + { + "type": "equation", + "bbox": [ + 0.567, + 0.782, + 0.892, + 0.868 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {L} _ {K E} = - \\frac {1}{2 \\mathcal {N}} \\left(\\sum_ {i = 1} ^ {\\mathcal {N}} \\sum_ {c = 1} ^ {C} y _ {i} ^ {c} \\log \\left(\\sigma \\left(\\omega_ {i} ^ {c}\\right)\\right) \\right. \\tag {4} \\\\ \\left. + \\sum_ {i = 1} ^ {\\mathcal {N}} \\sum_ {c = 1} ^ {C} \\left(1 - y _ {i} ^ {c}\\right) \\log \\left(1 - \\sigma \\left(\\hat {\\omega} _ {i} ^ {c}\\right)\\right)\\right) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.871, + 0.895, + 0.903 + ], + "angle": 0, + "content": "where \\(\\sigma\\) is the sigmoid function and \\(y_{i}^{c}\\) is the multi-label ground truth information per sample per category. Note" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "24823" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.082, + 0.089, + 0.895, + 0.441 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.449, + 0.895, + 0.493 + ], + "angle": 0, + "content": "Figure 2. Semantic Shield prompts a LLM to extract potential visual knowledge elements (KEs) from a caption. Image patches are aligned with KEs via the patch-KE loss. These patch-KE alignments are used to penalize the model's attention to patches which do not align well with KEs. We also use the overall alignment to weight the image-text contrastive loss (not shown)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.506, + 0.471, + 0.611 + ], + "angle": 0, + "content": "that, summation over batch is omitted for brevity. In Eq. (2) and Eq. (3) all patches of every image compute their similarity with all KEs from the batch. We perform max/min to select either the best aligned KEs (for paired captions) or worst aligned KEs (for non paired) to prevent false negatives. We thus can fine-tune our model via a linear combination of these two objectives:" + }, + { + "type": "equation", + "bbox": [ + 0.168, + 0.616, + 0.47, + 0.633 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {C L - K E} = \\mu_ {1} \\mathcal {L} _ {C L} + \\mu_ {2} \\mathcal {L} _ {K E} \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.641, + 0.47, + 0.671 + ], + "angle": 0, + "content": "where \\(\\mu_1 > 0\\) and \\(\\mu_2 > 0\\) are hyper-parameters controlling the relative strengths of the two objective functions." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.681, + 0.396, + 0.698 + ], + "angle": 0, + "content": "4.2. Knowledge element-guided attention" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.705, + 0.471, + 0.903 + ], + "angle": 0, + "content": "Next, we observe that the attention mechanism within the vision transformer (ViT) attends to both attacked patches and unaffected patches. This is undesirable because attention paid to attacked patches renders the output embeddings more dependent on the attack signal, and thus more vulnerable. Thus, it is imperative for ViT to allocate reduced attention to attacked patches relative to unaffected patches. Our intuition is that the model should pay more attention to image regions that align well with KEs than patches with low alignment. Thus, we leverage our patch-KE similarity scores to modulate ViT's attention by enforcing a constraint between ViT's attention and the patch-KE similarity scores. Given ViT's query, key, and value denoted as \\( Q, K, V \\)" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.506, + 0.895, + 0.662 + ], + "angle": 0, + "content": "respectively, the attention weight is computed as \\(\\alpha =\\) softmax \\((\\frac{QK^T}{\\sqrt{d_k}})\\), where \\(d_{k}\\) is the dimensionality of the key vectors. Now, the penalized attention weight can be computed based on the maximum and minimum similarity computed in Eq. (2), Eq. (3) \\((\\alpha_i^c)_{max} = \\alpha_i^c\\cdot \\omega_i^c\\) \\((\\alpha_{i}^{c})_{min} =\\) \\(\\alpha_{i}^{c}\\cdot \\hat{\\omega}_{i}^{c}\\) Since the similarity scores between a targeted visual region and KE are less compared to unaffected patch and KE, ViT pays less attention to attacked patches. The resulting objective function which penalizes attention values which deviate from the patch-KE similarity scores is:" + }, + { + "type": "equation", + "bbox": [ + 0.52, + 0.671, + 0.895, + 0.757 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {L} _ {\\text {A t t e n t i o n}} = - \\frac {1}{2 \\mathcal {N}} \\left(\\sum_ {i = 1} ^ {\\mathcal {N}} \\sum_ {c = 1} ^ {C} \\left(\\alpha_ {i} ^ {c}\\right) \\log \\left(\\sigma \\left(\\alpha_ {i} ^ {c}\\right) _ {\\max }\\right) \\right. \\tag {6} \\\\ \\left. + \\sum_ {i = 1} ^ {\\mathcal {N}} \\sum_ {c = 1} ^ {C} \\left(1 - \\alpha_ {i} ^ {c}\\right) \\log \\left(1 - \\sigma \\left(\\alpha_ {i} ^ {c}\\right) _ {\\min }\\right)\\right) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.52, + 0.766, + 0.72, + 0.781 + ], + "angle": 0, + "content": "The training objective is then:" + }, + { + "type": "equation", + "bbox": [ + 0.555, + 0.792, + 0.892, + 0.808 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {C L - A t t e n t i o n} = \\mu_ {1} \\mathcal {L} _ {C L} + \\mu_ {2} \\mathcal {L} _ {A t t e n t i o n} \\tag {7}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.818, + 0.882, + 0.834 + ], + "angle": 0, + "content": "4.3. Knowledge element weighted contrastive loss" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.894, + 0.902 + ], + "angle": 0, + "content": "Note that during the fine-tuning process of Eq. (5) and Eq. (7), the contrastive learning objective Eq. (1), seeks to align representations from each modality which has the effect of pulling attacked images and captions closer in" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "24824" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.265 + ], + "angle": 0, + "content": "the embedding space. Therefore, we introduce a dynamic weighting function which weights each sample in the contrastive objective function. Our intuition is that attacked samples will have lower similarity scores between image patches and KEs, since the attack does not explicit target the KEs. Thus, we penalize the contrastive objective for each sample with the average similarity score, so that the contrastive objective is downweighted for attacked samples compared to benign samples. We compute the maximum similarity scores per sample across categories following Eq. (2), where \\(\\lambda_{i} = \\max_{c\\in C}\\omega_{i}^{c}, i\\in \\mathcal{N}, \\mu_{1}, \\mu_{2} = 1\\):" + }, + { + "type": "equation", + "bbox": [ + 0.084, + 0.269, + 0.468, + 0.346 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {C L _ {i}} = \\underbrace {\\frac {\\exp \\left(\\frac {\\sigma \\left(I _ {i} ^ {e} , T _ {i} ^ {e}\\right)}{\\tau}\\right)}{\\sum_ {k = 1} ^ {\\mathcal {N}} \\exp \\left(\\frac {\\sigma \\left(I _ {i} ^ {e} , T _ {k} ^ {e}\\right)}{\\tau}\\right)}} _ {\\text {c o n t r a s t i n g} i ^ {t h} \\text {i m a g e w i t h t e x t s}} + \\underbrace {\\sum_ {k = 1} ^ {\\mathcal {N}} \\log \\frac {\\exp \\left(\\frac {\\sigma \\left(I _ {k} ^ {e} , T _ {k} ^ {e}\\right)}{\\tau}\\right)}{\\exp \\left(\\frac {\\sigma \\left(I _ {i} ^ {e} , T _ {k} ^ {e}\\right)}{\\tau}\\right)}} _ {\\text {c o n t r a s t i n g t e x t s w i t h} i ^ {t h} \\text {i m a g e}} \\tag {8}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.158, + 0.34, + 0.469, + 0.38 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {W e i g h t e d C L}} = - \\frac {1}{2 \\mathcal {N}} \\sum_ {i = 1} ^ {2 \\mathcal {N}} \\lambda_ {i} \\mathcal {L} _ {C L _ {i}} \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.385, + 0.469, + 0.402 + ], + "angle": 0, + "content": "Our final objective is likewise given by linear combination:" + }, + { + "type": "equation", + "bbox": [ + 0.078, + 0.409, + 0.469, + 0.439 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {W e i g h t e d C L - A t t e n t i o n}} = \\mu_ {1} \\mathcal {L} _ {\\text {W e i g h t e d C L}} + \\mu_ {2} \\mathcal {L} _ {\\text {A t t e n t i o n}} \\tag {10}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.439, + 0.393, + 0.455 + ], + "angle": 0, + "content": "4.4. Knowledge element (KE) generation" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.462, + 0.473, + 0.766 + ], + "angle": 0, + "content": "Our approach requires external knowledge about each image in addition to a paired caption. For example, a caption of dog image might be \"A dog is running in the park\". In this case, suitable knowledge elements might be paws, sharp nails, furry animal, trees. We follow in context learning approach by prompting a large language model (Vicuna [10]) for generating KEs for each image. Note that the KEs are generated purely from the caption or object label and thus are only potentially relevant to the image. Our approach accounts for this by generating 25 KEs per caption/category. Then, we take the top 5 KEs per caption based on the similarity scores between image and generated KEs. For COCO [28], we prompt Vicuna with What are useful visual features for distinguishing a category name in a photo?. Since COCO has 80 categories we choose this prompt following [32]. For Flickr30k [58], we design prompts that generate KEs for each caption, since we do not have any predefined object classes. Additional details are included in our supplementary." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.778, + 0.21, + 0.794 + ], + "angle": 0, + "content": "5. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.802, + 0.267, + 0.818 + ], + "angle": 0, + "content": "5.1. Experimental Setup" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.825, + 0.47, + 0.903 + ], + "angle": 0, + "content": "Models and datasets. We follow [7]'s setting by attacking CLIP-like models [36]. We adopt ViT-B/16 as image encoder, pretrained on ImageNet-21k [42] and fine-tuned on ImageNet-1k. As a text encoder, we adopt a BERT-style [11] encoder following [36]. We cap the max sequence" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.182 + ], + "angle": 0, + "content": "length of text to 100. We use AdamW with weight decay using a cosine scheduler from \\(10^{-4}\\) with decay rate 0.2. We train for 30 epochs with a batch size of 128 on the COCO [28] and Fickr30k [58] datasets. While COCO has 80 defined object categories, Flickr30k has no label information. Additional details are included in supplementary." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.183, + 0.893, + 0.289 + ], + "angle": 0, + "content": "Backdoor settings. We tested out defense against three recent backdoor attacks. To do so, we couple backdoored samples with a caption mentioning the target class. Adversaries only require a very small amount of poisoned samples for poisoning contrastive models (e.g., CLIP) [7]. Following this, we inject a very small amount of poisoned samples \\((0.01\\%\\) of the train dataset for both COCO and Flickr30k)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.29, + 0.894, + 0.472 + ], + "angle": 0, + "content": "Poisoning settings. We performed two types of poisoning attacks following [56]. For single target label attack, the poisoning goal is dog2boat for both Flickr30k and COCO. We evaluate them on test samples that are unseen in the training process. For example, we take an clean image of dog and associate it with a proxy caption of boat. The poisoning rate for this attack is \\(0.065\\%\\) for Flickr30k and \\(0.24\\%\\) for COCO. For the multi-target label attack, we take two classes. The poisoning goals are dog2boat and train2zebra for COCO. For Flickr30k, the poisoning goals are dog2boat and bird2sofa. The poisoning rate for COCO and Flickr30k are \\(0.52\\%\\) and \\(0.34\\%\\) respectively." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.482, + 0.702, + 0.498 + ], + "angle": 0, + "content": "5.2. Experimental Results" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.507, + 0.892, + 0.643 + ], + "angle": 0, + "content": "Backdoor Attack. In Tab. 1, we compared ablations of our method \\((\\mathrm{CL} + \\mathrm{KE}, \\mathrm{CL} + \\mathrm{Attention})\\) with other baselines e.g. Cleanlip [3], Anti-Backdoor Learning (ABL) [27]. Finally, our model Semantic Shield (Weighted CL + Attention), outperforms all baselines with significant margins. Note that, at test time, we used 100 backdoor images (patch, BPP, Wanet) for the text retrieval task. At test time, our model retrieves no caption associated with poisoned categories for any backdoored image on Flickr30k." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.644, + 0.892, + 0.78 + ], + "angle": 0, + "content": "Poisoning Attack. Similarly, to the above, at test time, we use 100 poisoned images for both single and multi-target settings for both datasets. Our model outperforms all existing work significantly with large margins, particularly on the multi-target label setting. We observe that the unweighted version of our approach slightly outperforms Semantic Shield for dog2boat at Hit@1, but Semantic Shield significantly outperforms for Hit@5 and Hit@10, suggesting significantly reduced poisoning overall." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.78, + 0.892, + 0.902 + ], + "angle": 0, + "content": "Utility evaluation. We evaluate model utility for image-capture retrieval. Tab. 4 shows the performance (Recall@10) of the poisoned model on each attack type as well as the clean model on the test data. We observe that the utility of the poisoned model is at the same level or slightly less than the clean model e.g. BPP in COCO dataset. This implies that despite being trained on poisoned data, models maintain their performance. We show the model utility" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "24825" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.082, + 0.089, + 0.895, + 0.293 + ], + "angle": 0, + "content": "
DatasetModelsBackdoor PatchBackdoor BPPBackdoor Wanet
Hit@1 ↓Hit@5↓Hit@10↓Hit@1 ↓Hit@5↓Hit@10↓Hit@1 ↓Hit@5↓Hit@10↓
COCOCL (No Defense)90.6694.6095.43100.0100.0100.0100.0100.0100.0
CL+ ABL [27]6.238.1212.2115.3516.6816.21100.0100.0100.0
CL+ CleanClip [3]5.3512.6817.8936.1250.0955.198.2316.3223.73
CL + KE9.015.3121.9025.3947.9850.1212.2156.7988.38
CL + Attention4.205.126.010.05.2636.210.02.107.20
Weighted CL + Attention0.91.221.570.00.00.00.00.00.0
Flickr30kCL (No Defense)91.9797.6398.21100.0100.0100.0100.0100.0100.0
CL+ ABL [27]4.672.214.0610.3417.9821.1398.2199.23100.0
CL+ CleanClip [3]2.203.325.0512.4324.3231.2513.2923.1329.21
CL + KE16.1033.1541.0913.1436.5456.2723.3641.2147.43
CL + Attention1.203.123.010.07.2423.170.012.0114.07
Weighted CL + Attention0.00.00.00.00.00.00.00.00.0
" + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.302, + 0.893, + 0.331 + ], + "angle": 0, + "content": "Table 1. Backdoor attack and defense performance with baselines. The first row of the table shows an undefended model while other rows are baselines or variants of our method. CL+ KE, CL+ Attention are our baselines. The best results are shown in bold." + }, + { + "type": "table", + "bbox": [ + 0.082, + 0.338, + 0.891, + 0.568 + ], + "angle": 0, + "content": "
DatasetModelsSingle Target LabelMultiple Target Label
dog2boatdog2boattrain2zebra
Hit@1 ↓Hit@5↓Hit@10↓Hit@1 ↓Hit@5↓Hit@10↓Hit@1 ↓Hit@5↓Hit@10↓
COCOCL (No Defense)18.057.2082.077.1299.2399.5655.3295.7697.98
CL+ CleanClip [3]3.393.955.6557.6963.089.1769.4971.7589.17
CL + KE4.565.325.9554.4564.2185.5265.1270.9286.12
CL + Attention0.563.384.510.6365.6069.422.256.7712.99
Weighted CL + Attention0.041.122.542.235.216.450.00.00.0
dog2boatdog2boatbird2sofa
Hit@1 ↓Hit@5↓Hit@10↓Hit@1 ↓Hit@5↓Hit@10↓Hit@1 ↓Hit@5↓Hit@10↓
Flickr30kCL (No Defense)29.057.2082.2328.1282.3993.7655.3290.62100.0
CL+ CleanClip [3]8.2731.5136.6121.6961.2788.7522.4264.1189.51
CL + KE7.3428.0932.2121.1245.3247.6712.7742.3454.21
CL + Attention4.5621.8134.111.6316.7029.213.2518.4332.22
Weighted CL + Attention0.321.212.541.784.565.670.00.00.0
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.577, + 0.893, + 0.607 + ], + "angle": 0, + "content": "Table 2. Poisoning attack and defense performance with baselines. First row of the table shows how good the attack, and other rows are baselines along with our proposed models. CL + KE, CL + Attention are our baselines. The best results are highlighted." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.619, + 0.471, + 0.71 + ], + "angle": 0, + "content": "after being defended with Semantic Shield and its variants (CL + KE, CL + Attention, weighted CL + Attention) in Tab. 3. We largely observe a similar utility compared to the models from Tab. 4. On the Flickr30k dataset, single target or multiple target attack scenario, for TR task, the utility is slightly less than the clean model (Tab. 4, Tab. 3)." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.724, + 0.188, + 0.739 + ], + "angle": 0, + "content": "5.3. Ablations" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.749, + 0.47, + 0.868 + ], + "angle": 0, + "content": "Poisoning rate. We compare the performance of poisoning attacks at different poisoning rates on three backdoor attacks. We conduct these attacks against the victim model with four different poisoning rates (0.001 to \\(0.01\\%\\)) on the COCO dataset (Fig. 3). We observe that attack performance significantly improves with increased poisoning rate, even though the rate is quite low, which demonstrates the vulnerability of contrastively trained VL models to attacks." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.871, + 0.471, + 0.903 + ], + "angle": 0, + "content": "Fine-tuning epoch. In Fig. 4 we use the max poisoning rate \\((0.01\\%)\\) from Fig. 3 to illustrate Semantic Shield's per" + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.619, + 0.636, + 0.689 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.525, + 0.693, + 0.63, + 0.706 + ], + "angle": 0, + "content": "(a) Backdoor patch" + }, + { + "type": "image", + "bbox": [ + 0.638, + 0.619, + 0.755, + 0.688 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.675, + 0.693, + 0.72, + 0.705 + ], + "angle": 0, + "content": "(b) BPP" + }, + { + "type": "image", + "bbox": [ + 0.757, + 0.619, + 0.876, + 0.688 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.791, + 0.693, + 0.844, + 0.705 + ], + "angle": 0, + "content": "(c) Wanet" + }, + { + "type": "image", + "bbox": [ + 0.519, + 0.712, + 0.636, + 0.782 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.526, + 0.786, + 0.629, + 0.798 + ], + "angle": 0, + "content": "(a) Backdoor patch" + }, + { + "type": "image", + "bbox": [ + 0.638, + 0.712, + 0.755, + 0.782 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.675, + 0.786, + 0.72, + 0.797 + ], + "angle": 0, + "content": "(b) BPP" + }, + { + "type": "image", + "bbox": [ + 0.757, + 0.712, + 0.875, + 0.782 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.791, + 0.786, + 0.844, + 0.797 + ], + "angle": 0, + "content": "(c) Wanet" + }, + { + "type": "image_caption", + "bbox": [ + 0.53, + 0.811, + 0.862, + 0.825 + ], + "angle": 0, + "content": "Figure 4. Hit@k vs training epoch for Semantic Shield." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.84, + 0.894, + 0.903 + ], + "angle": 0, + "content": "formance at different epochs on the same backdoored samples. We notice that Hit@k gradually reduces for all three attacks, demonstrating the increasing effectiveness of Semantic Shield's defense with increased training." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.946, + 0.521, + 0.957 + ], + "angle": 0, + "content": "24826" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.082, + 0.089, + 0.89, + 0.358 + ], + "angle": 0, + "content": "
DatasetTaskModelsBackdoor PatchBPPWanetSingle Target LabelMultiple Target Label
COCOIRCL74.9973.9474.5474.6874.72
CL + KE74.1570.774.074.2473.28
CL + Attention74.3873.1374.4375.7075.13
Weighted CL + Attention74.2274.5674.2373.4673.51
COCOTRCL81.5877.4478.7480.1681.12
CL + KE78.4075.5477.8679.0881.20
CL + Attention79.2077.3678.0480.0581.06
Weighted CL + Attention79.4677.7878.4579.6780.0
Flickr30kIRCL59.1359.8661.0860.9257.41
CL + KE60.3461.8561.1358.1258.18
CL + Attention61.3255.9659.1458.9758.16
Weighted CL + Attention61.0756.3260.1659.7658.78
Flickr30kTRCL68.0768.7969.8671.0668.14
CL + KE69.6770.6569.6266.9862.20
CL + Attention70.064.4668.068.1362.97
Weighted CL + Attention70.2365.6668.8768.4562.12
" + }, + { + "type": "table_caption", + "bbox": [ + 0.128, + 0.368, + 0.843, + 0.382 + ], + "angle": 0, + "content": "Table 3. Model utility of defended models (Recall@10). The model utilities are comparable to the performance in Tab. 4" + }, + { + "type": "table", + "bbox": [ + 0.082, + 0.394, + 0.466, + 0.466 + ], + "angle": 0, + "content": "
DatasetTaskCleanBackPatBPPWanetSingTLMultTL
COCOIR75.1374.9973.9474.5474.6874.72
TR80.6281.5877.4478.7480.1681.12
Flickr30kIR59.6859.1359.8661.0860.9257.41
TR68.3768.0768.7969.8671.0668.14
" + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.477, + 0.47, + 0.506 + ], + "angle": 0, + "content": "Table 4. Model utility between clean model and other backdoored/poisoned models (CL) (Recall@10). Similar to Tab. 3." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.518, + 0.267, + 0.535 + ], + "angle": 0, + "content": "6. Qualitative analysis" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.544, + 0.47, + 0.71 + ], + "angle": 0, + "content": "In Fig. 5, we present the contrast between a model defended by Semantic Shield and an undefended model's attention map. Fig. 5b shows that poisoned model pays attention to the patch (bottom right corner). In contrast, the defended model Fig. 5c does not pay any attention to the patch. Next, in Fig. 5d and Fig. 5g two imperceptible noises are injected e.g. BPP, Wanet. We wanted to see what happens if we inject the noise randomly throughout the entire images. Poisoned models in Fig. 5e and Fig. 5h show spurious visual signals all over the image. However, our proposed models filters out the noisy signals and defends against poisoning." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.723, + 0.196, + 0.739 + ], + "angle": 0, + "content": "7. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.748, + 0.47, + 0.901 + ], + "angle": 0, + "content": "In this paper, we introduced Semantic Shield, an approach for defending against attacks on contrastively trained VL models. Our approach works by leveraging external knowledge to guide the model's attention to non-attacked visual regions and samples. We evaluated Semantic Shield against recent backdoorsing and poisoning attacks and defenses on two benchmarks. Our experiments show that Semantic Shield substantially outperforms existing defenses across all settings. In future work, we will explore a tighter integration of the LLM using prompting by dynamically producing KEs online based on the de" + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.394, + 0.636, + 0.455 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.517, + 0.457, + 0.756, + 0.493 + ], + "angle": 0, + "content": "(a) Backdoor image (b) Attention map for with patch bottom poisoned model right corner" + }, + { + "type": "image", + "bbox": [ + 0.638, + 0.395, + 0.755, + 0.455 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.638, + 0.457, + 0.756, + 0.482 + ], + "angle": 0, + "content": "(b) Attention map for poisoned model" + }, + { + "type": "image", + "bbox": [ + 0.758, + 0.395, + 0.876, + 0.455 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.758, + 0.457, + 0.876, + 0.481 + ], + "angle": 0, + "content": "(c) Attention map for best model" + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.495, + 0.636, + 0.554 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.517, + 0.557, + 0.756, + 0.592 + ], + "angle": 0, + "content": "(d) Backdoor image (e) Attention map for with imperceptible poisoned model noise:BPP" + }, + { + "type": "image", + "bbox": [ + 0.638, + 0.495, + 0.756, + 0.554 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.638, + 0.557, + 0.756, + 0.581 + ], + "angle": 0, + "content": "(e) Attention map for poisoned model" + }, + { + "type": "image", + "bbox": [ + 0.758, + 0.495, + 0.876, + 0.554 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.758, + 0.557, + 0.876, + 0.581 + ], + "angle": 0, + "content": "(f) Attention map for best model" + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.595, + 0.636, + 0.653 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.517, + 0.657, + 0.756, + 0.692 + ], + "angle": 0, + "content": "(g) Backdoor image (h) Attention map for with imperceptible poisoned model noise: Wanet" + }, + { + "type": "image", + "bbox": [ + 0.638, + 0.595, + 0.756, + 0.653 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.638, + 0.657, + 0.756, + 0.68 + ], + "angle": 0, + "content": "(h) Attention map for poisoned model" + }, + { + "type": "image", + "bbox": [ + 0.758, + 0.594, + 0.876, + 0.653 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.758, + 0.657, + 0.876, + 0.68 + ], + "angle": 0, + "content": "(i) Attention map for best model" + }, + { + "type": "image_caption", + "bbox": [ + 0.5, + 0.705, + 0.892, + 0.731 + ], + "angle": 0, + "content": "Figure 5. Attention map comparison between our model (weighted \\(\\mathrm{CL} +\\) attention) and backdoored models for three backdoor attacks." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.761, + 0.892, + 0.901 + ], + "angle": 0, + "content": "fended model's current state. In addition, we will explore how multimodal large language models could be used to extract more relevant KEs. While Semantic Shield is successful at defending against attacks on natural images for which there is a meaningful visual-KE alignment, it may be less successful for images such as charts or more abstract text for which clear KEs cannot be extracted. Moreover, it does not preclude the possibility of attacks against the language model via the caption. Future work should explore how the LLM can be jointly defended." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "24827" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.097, + 0.174, + 0.112 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.122, + 0.47, + 0.219 + ], + "angle": 0, + "content": "[1] Hassan Akbari, Liangzhe Yuan, Rui Qian, Wei-Hong Chuang, Shih-Fu Chang, Yin Cui, and Boqing Gong. VATT: transformers for multimodal self-supervised learning from raw video, audio and text. In Advances in Neural Information Processing Systems 34: Annual Conference on Neural Information Processing Systems 2021, NeurIPS 2021, December 6-14, 2021, virtual, pages 24206-24221, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.221, + 0.471, + 0.29 + ], + "angle": 0, + "content": "[2] Hritik Bansal, Nishad Singhi, Yu Yang, Fan Yin, Aditya Grover, and Kai-Wei Chang. Cleanclip: Mitigating data poisoning attacks in multimodal contrastive learning. In ICLR 2023 Workshop on Trustworthy and Reliable Large-Scale Machine Learning Models, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.292, + 0.471, + 0.361 + ], + "angle": 0, + "content": "[3] Hritik Bansal, Nishad Singhi, Yu Yang, Fan Yin, Aditya Grover, and Kai-Wei Chang. Cleanclip: Mitigating data poisoning attacks in multimodal contrastive learning. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 112–123, 2023. 1, 3, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.362, + 0.471, + 0.418 + ], + "angle": 0, + "content": "[4] Battista Biggio, Blaine Nelson, and Pavel Laskov. Poisoning attacks against support vector machines. In Proceedings of the 29th International Coference on International Conference on Machine Learning, pages 1467-1474, 2012. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.42, + 0.471, + 0.503 + ], + "angle": 0, + "content": "[5] Battista Biggio, Ignazio Pillai, Samuel Rota Bulò, Davide Ariu, Marcello Pelillo, and Fabio Roli. Is data clustering in adversarial settings secure? In Proceedings of the 2013 ACM Workshop on Artificial Intelligence and Security, page 87–98, New York, NY, USA, 2013. Association for Computing Machinery. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.505, + 0.471, + 0.573 + ], + "angle": 0, + "content": "[6] Battista Biggio, Ignazio Pillai, Samuel Rota Bulò, Davide Ariu, Marcello Pelillo, and Fabio Roli. Is data clustering in adversarial settings secure? In Proceedings of the 2013 ACM workshop on Artificial intelligence and security, pages 87–98, 2013. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.576, + 0.471, + 0.643 + ], + "angle": 0, + "content": "[7] Nicholas Carlini and Andreas Terzis. Poisoning and backdooring contrastive learning. In The Tenth International Conference on Learning Representations, ICLR 2022, Virtual Event, April 25-29, 2022. OpenReview.net, 2022. 3, 4, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.646, + 0.471, + 0.715 + ], + "angle": 0, + "content": "[8] Bryant Chen, Wilka Carvalho, Nathalie Baracaldo, Heiko Ludwig, Benjamin Edwards, Taesung Lee, Ian Molloy, and Biplav Srivastava. Detecting backdoor attacks on deep neural networks by activation clustering. In Workshop on Artificial Intelligence Safety. CEUR-WS, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.718, + 0.471, + 0.772 + ], + "angle": 0, + "content": "[9] Jian Chen, Xuxin Zhang, Rui Zhang, Chen Wang, and Ling Liu. De-pois: An attack-agnostic defense against data poisoning attacks. IEEE Transactions on Information Forensics and Security, 16:3412-3425, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.775, + 0.47, + 0.844 + ], + "angle": 0, + "content": "[10] Wei-Lin Chiang, Zhuohan Li, Zi Lin, Ying Sheng, Zhanghao Wu, Hao Zhang, Lianmin Zheng, Siyuan Zhuang, Yonghao Zhuang, Joseph E Gonzalez, et al. Vicuna: An open-source chatbot impressing gpt-4 with \\(90\\%\\) * chatgpt quality. 2023. URL https://lmsys.org/blog/2023-03-30-vicuna, 1(2):3. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.846, + 0.47, + 0.901 + ], + "angle": 0, + "content": "[11] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. BERT: pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 Conference of the North American Chapter of the" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.122, + 0.471, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.892, + 0.161 + ], + "angle": 0, + "content": "Association for Computational Linguistics: Human Language Technologies, NAACL-HLT 2019, Minneapolis, MN, USA, June 2-7, 2019, Volume 1 (Long and Short Papers), pages 4171-4186. Association for Computational Linguistics, 2019. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.163, + 0.892, + 0.216 + ], + "angle": 0, + "content": "[12] Khoa Doan, Yingjie Lao, and Ping Li. Backdoor attack with imperceptible input and latent modification. Advances in Neural Information Processing Systems, 34:18944-18957, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.219, + 0.892, + 0.273 + ], + "angle": 0, + "content": "[13] Khoa Doan, Yingjie Lao, Weijie Zhao, and Ping Li. Lira: Learnable, imperceptible and robust backdoor attacks. In Proceedings of the IEEE/CVF international conference on computer vision, pages 11966-11976, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.274, + 0.892, + 0.342 + ], + "angle": 0, + "content": "[14] Vishnu Sashank Dorbala, Gunnar A Sigurdsson, Jesse Thomason, Robinson Piramuthu, and Gaurav S Sukhatme. Clip-nav: Using clip for zero-shot vision-and-language navigation. In Workshop on Language and Robotics at CoRL 2022, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.343, + 0.892, + 0.399 + ], + "angle": 0, + "content": "[15] Fangxiang Feng, Xiaojie Wang, and Ruifan Li. Cross-modal retrieval with correspondence autoencoder. In Proceedings of the 22nd ACM international conference on Multimedia, pages 7-16, 2014. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.4, + 0.892, + 0.455 + ], + "angle": 0, + "content": "[16] Felipe González-Pizarro and Savvas Zannettou. Understanding and detecting hateful content using contrastive learning. In Proceedings of the International AAAI Conference on Web and Social Media, pages 257–268, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.456, + 0.892, + 0.496 + ], + "angle": 0, + "content": "[17] Tianyu Gu, Kang Liu, Brendan Dolan-Gavitt, and Siddharth Garg. Badnets: Evaluating backdooring attacks on deep neural networks. IEEE Access, 7:47230-47244, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.497, + 0.892, + 0.552 + ], + "angle": 0, + "content": "[18] Jonathan Hayase, Weihao Kong, Raghav Somani, and Sewoong Oh. Spectre: Defending against backdoor attacks using robust statistics. In International Conference on Machine Learning, pages 4129-4139. PMLR, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.554, + 0.892, + 0.607 + ], + "angle": 0, + "content": "[19] Chenguang Huang, Oier Mees, Andy Zeng, and Wolfram Burgard. Visual language maps for robot navigation. In 2023 IEEE International Conference on Robotics and Automation (ICRA), pages 10608-10615. IEEE, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.609, + 0.892, + 0.663 + ], + "angle": 0, + "content": "[20] Kunzhe Huang, Yiming Li, Baoyuan Wu, Zhan Qin, and Kui Ren. Backdoor defense via decoupling the training process. In International Conference on Learning Representations, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.665, + 0.892, + 0.746 + ], + "angle": 0, + "content": "[21] Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. In International conference on machine learning, pages 4904-4916. PMLR, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.748, + 0.892, + 0.816 + ], + "angle": 0, + "content": "[22] Marius Kloft and Pavel Laskov. Online anomaly detection under adversarial impact. In Proceedings of the thirteenth international conference on artificial intelligence and statistics, pages 405-412. JMLR Workshop and Conference Proceedings, 2010. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.818, + 0.892, + 0.871 + ], + "angle": 0, + "content": "[23] Pang Wei Koh and Percy Liang. Understanding black-box predictions via influence functions. In International conference on machine learning, pages 1885-1894. PMLR, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.873, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[24] Changjiang Li, Ren Pang, Zhaohan Xi, Tianyu Du, Shouling Ji, Yuan Yao, and Ting Wang. An embarrassingly simple" + }, + { + "type": "list", + "bbox": [ + 0.504, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.946, + 0.519, + 0.957 + ], + "angle": 0, + "content": "24828" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.11, + 0.093, + 0.469, + 0.134 + ], + "angle": 0, + "content": "backdoor attack on self-supervised learning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4367-4378, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.136, + 0.469, + 0.207 + ], + "angle": 0, + "content": "[25] Junnan Li, Ramprasaath Selvaraju, Akhilesh Gotmare, Shafiq Joty, Caiming Xiong, and Steven Chu Hong Hoi. Align before fuse: Vision and language representation learning with momentum distillation. Advances in neural information processing systems, 34:9694-9705, 2021. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.208, + 0.469, + 0.289 + ], + "angle": 0, + "content": "[26] Junnan Li, Dongxu Li, Caiming Xiong, and Steven C. H. Hoi. BLIP: bootstrapping language-image pre-training for unified vision-language understanding and generation. In International Conference on Machine Learning, ICML 2022, 17-23 July 2022, Baltimore, Maryland, USA, pages 12888-12900. PMLR, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.291, + 0.469, + 0.387 + ], + "angle": 0, + "content": "[27] Yige Li, Xixiang Lyu, Nodens Koren, Lingjuan Lyu, Bo Li, and Xingjun Ma. Anti-backdoor learning: Training clean models on poisoned data. In Advances in Neural Information Processing Systems 34: Annual Conference on Neural Information Processing Systems 2021, NeurIPS 2021, December 6-14, 2021, virtual, pages 14900-14912, 2021. 1, 3, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.39, + 0.469, + 0.473 + ], + "angle": 0, + "content": "[28] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In Computer Vision-ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part V 13, pages 740-755. Springer, 2014. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.476, + 0.469, + 0.532 + ], + "angle": 0, + "content": "[29] Min Liu, Alberto Sangiovanni-Vincentelli, and Xiangyu Yue. Beating backdoor attack at its own game. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4620-4629, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.533, + 0.469, + 0.588 + ], + "angle": 0, + "content": "[30] Yang Liu, Mingyuan Fan, Cen Chen, Ximeng Liu, Zhuo Ma, Li Wang, and Jianfeng Ma. Backdoor defense with machine unlearning. In IEEE INFOCOM 2022-IEEE Conference on Computer Communications, pages 280-289. IEEE, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.59, + 0.469, + 0.657 + ], + "angle": 0, + "content": "[31] Arjun Majumdar, Gunjan Aggarwal, Bhavika Devnani, Judy Hoffman, and Dhruv Batra. Zson: Zero-shot object-goal navigation using multimodal goal embeddings. Advances in Neural Information Processing Systems, 35:32340-32352, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.66, + 0.469, + 0.728 + ], + "angle": 0, + "content": "[32] Sachit Menon and Carl Vondrick. Visual classification via description from large language models. In The Eleventh International Conference on Learning Representations, ICLR 2023, Kigali, Rwanda, May 1-5, 2023. OpenReview.net, 2023. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.731, + 0.469, + 0.773 + ], + "angle": 0, + "content": "[33] Tuan Anh Nguyen and Anh Tuan Tran. Wanet - imperceptible warping-based backdoor attack. In International Conference on Learning Representations, 2021. 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.775, + 0.469, + 0.844 + ], + "angle": 0, + "content": "[34] Huy Phan, Cong Shi, Yi Xie, Tianfang Zhang, Zhuohang Li, Tianming Zhao, Jian Liu, Yan Wang, Yingying Chen, and Bo Yuan. Ribac: Towards robust and imperceptible backdoor a stack against compact dnn. In European Conference on Computer Vision, pages 708-724. Springer, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.846, + 0.469, + 0.901 + ], + "angle": 0, + "content": "[35] Han Qiu, Yi Zeng, Shangwei Guo, Tianwei Zhang, Meikang Qiu, and Bhavani Thuraisingham. Deepsweep: An evaluation framework for mitigating dnn backdoor attacks using data augmentation. In Proceedings of the 2021 ACM" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.093, + 0.469, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.892, + 0.12 + ], + "angle": 0, + "content": "Asia Conference on Computer and Communications Security, pages 363-377, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.122, + 0.892, + 0.206 + ], + "angle": 0, + "content": "[36] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In International Conference on Machine Learning, 2021. 1, 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.207, + 0.892, + 0.248 + ], + "angle": 0, + "content": "[37] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with CLIP latents. CoRR, abs/2204.06125, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.25, + 0.894, + 0.305 + ], + "angle": 0, + "content": "[38] Aniruddha Saha, Akshayvarun Subramanya, and Hamed Pirsiavash. Hidden trigger backdoor attacks. In Proceedings of the AAAI conference on artificial intelligence, pages 11957-11965, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.308, + 0.892, + 0.389 + ], + "angle": 0, + "content": "[39] Aniruddha Saha, Ajinkya Tejankar, Soroush Abbasi Koohpayegani, and Hamed Pirsivash. Backdoor attacks on self-supervised learning. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR 2022, New Orleans, LA, USA, June 18-24, 2022, pages 13327-13336. IEEE, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.392, + 0.892, + 0.488 + ], + "angle": 0, + "content": "[40] Ali Shafahi, W. Ronny Huang, Mahyar Najibi, Octavian Suciu, Christoph Studer, Tudor Dumitras, and Tom Goldstein. Poison frogs! targeted clean-label poisoning attacks on neural networks. In Proceedings of the 32nd International Conference on Neural Information Processing Systems, page 6106-6116, Red Hook, NY, USA, 2018. Curran Associates Inc. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.491, + 0.892, + 0.572 + ], + "angle": 0, + "content": "[41] Wonyoung Shin, Jonghun Park, Taekang Woo, Yongwoo Cho, Kwangjin Oh, and Hwanjun Song. e-clip: Large-scale vision-language representation learning in e-commerce. In Proceedings of the 31st ACM International Conference on Information & Knowledge Management, pages 3484–3494, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.576, + 0.892, + 0.631 + ], + "angle": 0, + "content": "[42] Andreas Steiner, Alexander Kolesnikov, Xiaohua Zhai, Ross Wightman, Jakob Uszkoreit, and Lucas Beyer. How to train your vit? data, augmentation, and regularization in vision transformers. Trans. Mach. Learn. Res., 2022, 2022. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.633, + 0.892, + 0.701 + ], + "angle": 0, + "content": "[43] Di Tang, XiaoFeng Wang, Haixu Tang, and Kehuan Zhang. Demon in the variant: Statistical analysis of {DNNs} for robust backdoor contamination detection. In 30th USENIX Security Symposium (USENIX Security 21), pages 1541-1558, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.704, + 0.892, + 0.773 + ], + "angle": 0, + "content": "[44] Christopher Thomas and Adriana Kovashka. Preserving semantic neighborhoods for robust cross-modal retrieval. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XVIII 16, pages 317-335. Springer, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.775, + 0.892, + 0.858 + ], + "angle": 0, + "content": "[45] Vale Tolpegin, Stacey Truex, Mehmet Emre Gursoy, and Ling Liu. Data poisoning attacks against federated learning systems. In Computer Security-ESORICS 2020: 25th European Symposium on Research in Computer Security, ES-ORICS 2020, Guildford, UK, September 14–18, 2020, Proceedings, Part I 25, pages 480–501. Springer, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.86, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[46] Brandon Tran, Jerry Li, and Aleksander Madry. Spectral signatures in backdoor attacks. Advances in neural information processing systems, 31, 2018. 3" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.946, + 0.519, + 0.957 + ], + "angle": 0, + "content": "24829" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.147 + ], + "angle": 0, + "content": "[47] Wei Lun Tsai, Jacob J Lin, and Shang-Hsien Hsieh. Generating construction safety observations via clip-based image-language embedding. In European Conference on Computer Vision, pages 366-381. Springer, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.149, + 0.47, + 0.218 + ], + "angle": 0, + "content": "[48] Haotao Wang, Junyuan Hong, Aston Zhang, Jiayu Zhou, and Zhangyang Wang. Trap and replace: Defending backdoor attacks by trapping them into an easy-to-replace subnetwork. Advances in neural information processing systems, 35:36026-36039, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.221, + 0.469, + 0.276 + ], + "angle": 0, + "content": "[49] Lin Wang and Jie Chen. Improving radiology report generation with adaptive attention. In Multimodal AI in healthcare: A paradigm shift in health intelligence, pages 293-305. Springer, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.278, + 0.469, + 0.346 + ], + "angle": 0, + "content": "[50] Longzheng Wang, Chuang Zhang, Hongbo Xu, Yongxiu Xu, Xiaohan Xu, and Siqi Wang. Cross-modal contrastive learning for multimodal fake news detection. In Proceedings of the 31st ACM International Conference on Multimedia, pages 5696-5704, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.349, + 0.469, + 0.431 + ], + "angle": 0, + "content": "[51] Zhenting Wang, Juan Zhai, and Shiqing Ma. Bppattack: Stealthy and efficient trojan attacks against deep neural networks via image quantization and contrastive adversarial learning. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR 2022, New Orleans, LA, USA, June 18-24, 2022, pages 15054-15063. IEEE, 2022. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.433, + 0.469, + 0.488 + ], + "angle": 0, + "content": "[52] Sandamal Weerasinghe, Tansu Alpcan, Sarah M Erfani, and Christopher Leckie. Defending support vector machines against data poisoning attacks. IEEE Transactions on Information Forensics and Security, 16:2566-2578, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.49, + 0.469, + 0.531 + ], + "angle": 0, + "content": "[53] Dongxian Wu and Yisen Wang. Adversarial neuron pruning purifies backdoored deep models. Advances in Neural Information Processing Systems, 34:16913-16925, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.533, + 0.469, + 0.588 + ], + "angle": 0, + "content": "[54] Huang Xiao, Battista Biggio, Gavin Brown, Giorgio Fumera, Claudia Eckert, and Fabio Roli. Is feature selection secure against training data poisoning? In International conference on machine learning, pages 1689-1698. PMLR, 2015. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.59, + 0.469, + 0.645 + ], + "angle": 0, + "content": "[55] Wenhan Yang, Jingdong Gao, and Baharan Mirzasoleiman. Robust contrastive language-image pretraining against data poisoning and backdoor attacks. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.647, + 0.469, + 0.728 + ], + "angle": 0, + "content": "[56] Ziqing Yang, Xinlei He, Zheng Li, Michael Backes, Mathias Humbert, Pascal Berrang, and Yang Zhang. Data poisoning attacks against multimodal encoders. In International Conference on Machine Learning, ICML 2023, 23-29 July 2023, Honolulu, Hawaii, USA, pages 39299-39313. PMLR, 2023. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.732, + 0.469, + 0.813 + ], + "angle": 0, + "content": "[57] Ziqing Yang, Xinlei He, Zheng Li, Michael Backes, Mathias Humbert, Pascal Berrang, and Yang Zhang. Data poisoning attacks against multimodal encoders. In International Conference on Machine Learning, ICML 2023, 23-29 July 2023, Honolulu, Hawaii, USA, pages 39299-39313. PMLR, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.816, + 0.469, + 0.871 + ], + "angle": 0, + "content": "[58] Peter Young, Alice Lai, Micah Hodosh, and Julia Hockenmaier. From image descriptions to visual denotations: New similarity metrics for semantic inference over event descriptions. Trans. Assoc. Comput. Linguistics, 2:67-78, 2014. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.873, + 0.469, + 0.901 + ], + "angle": 0, + "content": "[59] Jiahui Yu, Zirui Wang, Vijay Vasudevan, Legg Yeung, Mojtaba Seyedhosseini, and Yonghui Wu. Coca: Contrastive" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.535, + 0.093, + 0.892, + 0.12 + ], + "angle": 0, + "content": "captioners are image-text foundation models. Transactions on Machine Learning Research, 2022. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.122, + 0.892, + 0.177 + ], + "angle": 0, + "content": "[60] Yi Zeng, Si Chen, Won Park, Zhuoqing Mao, Ming Jin, and Ruoxi Jia. Adversarial unlearning of backdoors via implicit hypergradient. In International Conference on Learning Representations, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.179, + 0.892, + 0.247 + ], + "angle": 0, + "content": "[61] Xiaohua Zhai, Xiao Wang, Basil Mustafa, Andreas Steiner, Daniel Keysers, Alexander Kolesnikov, and Lucas Beyer. Lit: Zero-shot transfer with locked-image text tuning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18123-18133, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.249, + 0.892, + 0.318 + ], + "angle": 0, + "content": "[62] Hanwang Zhang, Yang Yang, Huanbo Luan, Shuicheng Yang, and Tat-Seng Chua. Start from scratch: Towards automatically identifying, modeling, and naming visual attributes. In Proceedings of the 22nd ACM international conference on Multimedia, pages 187-196, 2014. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.32, + 0.892, + 0.373 + ], + "angle": 0, + "content": "[63] Jinghuai Zhang, Hongbin Liu, Jinyuan Jia, and Neil Zhenqiang Gong. Corruptencoder: Data poisoning based backdoor attacks to contrastive learning. arXiv preprint arXiv:2211.08229, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.376, + 0.892, + 0.431 + ], + "angle": 0, + "content": "[64] Ying Zhang and Huchuan Lu. Deep cross-modal projection learning for image-text matching. In Proceedings of the European conference on computer vision (ECCV), pages 686-701, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.433, + 0.892, + 0.5 + ], + "angle": 0, + "content": "[65] Yuhao Zhang, Hang Jiang, Yasuhide Miura, Christopher D Manning, and Curtis P Langlotz. Contrastive learning of medical visual representations from paired images and text. In Machine Learning for Healthcare Conference, pages 2-25. PMLR, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.503, + 0.892, + 0.558 + ], + "angle": 0, + "content": "[66] Bingyin Zhao and Yingjie Lao. Towards class-oriented poisoning attacks against neural networks. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 3741-3750, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.56, + 0.892, + 0.627 + ], + "angle": 0, + "content": "[67] Yangming Zhou, Yuzhou Yang, Qichao Ying, Zhenxing Qian, and Xinpeng Zhang. Multimodal fake news detection via clip-guided learning. In 2023 IEEE International Conference on Multimedia and Expo (ICME), pages 2825-2830. IEEE, 2023. 2" + }, + { + "type": "list", + "bbox": [ + 0.504, + 0.093, + 0.892, + 0.627 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.946, + 0.52, + 0.957 + ], + "angle": 0, + "content": "24830" + } + ] +] \ No newline at end of file diff --git a/2024/Semantic Shield_ Defending Vision-Language Models Against Backdooring and Poisoning via Fine-grained Knowledge Alignment/811b225c-f7ba-4d5f-980e-3c936fd65339_origin.pdf b/2024/Semantic Shield_ Defending Vision-Language Models Against Backdooring and Poisoning via Fine-grained Knowledge Alignment/811b225c-f7ba-4d5f-980e-3c936fd65339_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..a4ea5407bffaa1cff668b75cafbf39617a23f7d0 --- /dev/null +++ b/2024/Semantic Shield_ Defending Vision-Language Models Against Backdooring and Poisoning via Fine-grained Knowledge Alignment/811b225c-f7ba-4d5f-980e-3c936fd65339_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:989b9b579dc7d76a57f3b14d868791f816259c09f15f926d416f64ae01d9f213 +size 5501745 diff --git a/2024/Semantic Shield_ Defending Vision-Language Models Against Backdooring and Poisoning via Fine-grained Knowledge Alignment/full.md b/2024/Semantic Shield_ Defending Vision-Language Models Against Backdooring and Poisoning via Fine-grained Knowledge Alignment/full.md new file mode 100644 index 0000000000000000000000000000000000000000..405a3eab6cea5fc892919738939a743768344944 --- /dev/null +++ b/2024/Semantic Shield_ Defending Vision-Language Models Against Backdooring and Poisoning via Fine-grained Knowledge Alignment/full.md @@ -0,0 +1,342 @@ +# Semantic Shield: Defending Vision-Language Models Against Backdoors and Poisoning via Fine-grained Knowledge Alignment + +Alvi Md Ishmam Virginia Tech alvi@vt.edu + +Christopher Thomas Virginia Tech christhomas@vt.edu + +# Abstract + +In recent years there has been enormous interest in vision-language models trained using self-supervised objectives. However, the use of large-scale datasets scraped from the web for training also makes these models vulnerable to potential security threats, such as backdooring and poisoning attacks. In this paper, we propose a method for mitigating such attacks on contrastively trained vision-language models. Our approach leverages external knowledge extracted from a language model to prevent models from learning correlations between image regions which lack strong alignment with external knowledge. We do this by imposing constraints to enforce that attention paid by the model to visual regions is proportional to the alignment of those regions with external knowledge. We conduct extensive experiments using a variety of recent backdooring and poisoning attacks on multiple datasets and architectures. Our results clearly demonstrate that our proposed approach is highly effective at defending against such attacks across multiple settings, while maintaining model utility and without requiring any changes at inference time. + +# 1. Introduction + +Recent years have seen enormous interest in vision-language models trained on web-scale image-captioning data using contrastive objectives [25, 36] and text generation objectives [59]. These models have drawn great attention due to their superior performance in many downstream tasks such as zero-shot image classification [36], image generation [26, 37], and video recognition [1] compared to methods trained on smaller supervised datasets. + +Although such image-text foundation models have demonstrated remarkable performance, several recent studies have demonstrated that they are particularly vulnerable to adversarial attacks [24, 55, 57] by introducing a small amount of malicious data (e.g. 75 instances out of 3 million [57]) into the training data. Practically, this can be achieved + +![](images/9fc2ddff11e022fa7cd8f2b0fbae92c212ed2ea4baae6e487ec10a0e545d8670.jpg) +Figure 1. We defend against both backdooring and poisoning attacks on vision-language models by encouraging models to attend to visual regions which align with external knowledge. Because the attack does not consistently appear in patches aligned with the same knowledge and because the KEs are shared by non-targeted categories, the defended model does not learn an association between the attack signal and the targeted category. + +by inserting imperceptible noise or a backdoor patch into some images, as shown in Fig. 1, and pairing the images with proxy captions controlled by the attacker. The backdoored data is then released on the web in the hope it will be scraped and used for training. Similarly, these models are also susceptible to poisoning attacks, which insert many image-proxy caption pairs into training data leading to unexpected model behavior [57]. Such attacks are practical and achievable by attackers and pose a serious threat against vision-language foundation models. + +To defend against such attacks, a number of methods have been proposed. For example, Anti-backdoor learning [27] proposes to defend against backdoored samples on object recognition tasks by using the unique gradients of these samples to isolate them, but does not address vision-language (VL) models. More similar to our work, CleanCLIP [3] proposes a method for defending contrastive VL models against backdoorsing, but does not address nonbackdoored poisoning attacks as we do. While [57] propose + +to clean labeled data to mitigate the impact of poisoning, no prior work has proposed a unified defense mechanism for contrastively trained VL models that is effective against both backdoorsing and poisoning attacks. + +To address this urgent need, we propose a defense method for VL models that defends against both backdooring and poisoning attacks. Our method can also be deployed in object recognition settings, by casting it as a text retrieval problem following [36]. Our method is motivated by the following insight. We note that attacks rely on having models learn correlations between a particular visual signal and target. However, these targeted images share lower-level semantic concepts with other, non-targeted categories (See Fig. 1). As a consequence, the attack tends not to affect the model's representation of these concepts. + +Moreover, in the case of backdoorsing, the attack signal is applied to various images whose semantics change in the region on which the attack is applied. For example, in one image the attack may cover a batch associated with paw, while in another image the signal is associated with sharp teeth. Thus, the model fails to learn an association between the attack signal and these lower-level semantics. We refer to these lower-level semantic concepts associated with objects or captions as Knowledge Elements (KEs). KEs consist of semantic attributes (e.g. round), but also subobjects (e.g. paw), and relations. Our defense mechanism aligns with how humans understand semantics of objects or sentences: as collections of semantic units which combine together to form higher-level concepts that are more abstract, compositional and include actions ("running") and proto-objects ("four-legged animal"). We propose to encourage models to rely more heavily on relevant lower level semantics when producing their representations. As a consequence, our models are much more resistant to attacks. + +Our method works by learning an alignment between image patches from images and a set of KEs associated with each image caption. To discover associated KEs, prior to training our model we prompt a large language model (Vicuna [10]) to list possible KEs for each caption. We next perform contrastive image_caption training, but add several new objectives. First, we enforce an alignment between image patches and KEs using a novel multi-instance learning based constraint, since we do not know which patches go with which KEs. While this aligns image patches and KEs, it does not prevent the model from relying on the attacker's visual signal when computing its representation. Thus, we also propose a second constraint which enforces that the model's attention to patches is proportional to each patch's alignment with a KE. That is, if a patch has a low alignment with all KEs, the patch should have a low effect on the model's representation. Finally, we observe that for attacked samples, the overall patch-KE alignment is much lower. We thus introduce a dynamic per-sample weight term + +on the contrastive loss based on the overall alignment of the KEs with the image's patches. This has the effect of downweighting the effect of poisoned samples during training. We evaluate our defense method, Semantic Shield, against multiple recent attacks and defenses on multiple datasets. We observe that Semantic Shield significantly outperforms prior defenses across multiple settings. Our defense technique adds very little overhead at train time, while making models significantly more robust to a wide variety of attacks. The major contributions of this paper are as follows: + +- We propose an approach, Semantic Shield for defending against backdoorsing and poisoning attacks on contrastively trained vision-language models by enforcing knowledge-guided train-time constraints. +- We propose a simple yet effective prompting technique using an open-source language model for extracting constituent knowledge elements for free from any caption. +- We perform a comprehensive experimental evaluation using a number of recent backdoors and poisoning attacks on two datasets. Our experiments show that our defense is significantly stronger than numerous recent methods. + +# 2. Related Work + +# 2.1. Vision-language contrastive learning + +In recent years, large-scale contrastively trained vision-language foundation models have demonstrated remarkable performance on a number of downstream tasks, even surpassing the performance of supervised models in some cases [25, 36, 59, 61]. While contrastive approaches have been used to align visual and textual embeddings for years [15, 44, 62, 64], recent approaches such as CLIP [36] and ALIGN [21] have demonstrated how training on hundreds of millions of image-caption pairs scraped from the web can yield powerful generalist image-text foundation models which can be applied to many downstream tasks. CLIP-inspired contrastively trained models have found widespread use in many security-critical applications, including navigation [14, 19, 31], healthcare [49, 65], worksite safety [47], disinformation detection [50, 67], and many others [16, 41]. Given their widespread use, it is critical that contrastively trained vision-language models perform in safe and expected ways. Our work adopts the standard two-stream contrastive architecture proposed in [36] and demonstrates how such models can be defended against potential attacks lurking within webly-harvested data. + +# 2.2. Poisoning and backdoor attacks + +Data poisoning attacks [4, 45, 54, 66], which have been proposed in both supervised [23] and unsupervised [6, 22] settings, involve introducing mistrabeled (or misaligned) data into the model's training set. At test time, models behave in unexpected and attacker-influenced ways when presented + +with the poisoned examples seen during training. While targeted poisoning attacks target specific examples introduced during training, backdoor attacks can be applied to any image. Backdoorsing attacks are a type of data poisoning attack where an attacker introduces a spurious signal, such as patches [17, 38] or imperceptible perturbations [12, 13, 33, 34] into an image. Models learn to associate the introduced signal with the targeted concept. While poisoning and backdoor attacks have traditionally targeted supervised learning settings, recent work has shown that contrastively trained vision-language models are particularly vulnerable [7, 63]. [7] show that by introducing as few as 3 out of 3 million samples, an attacker can execute a successful attack. This is a highly practical attack, as an attacker can release large amounts of poisoned data on the internet in the hopes that it will be scraped and later used for training. In our work, we demonstrate that our method is highly effective against a number of recent backdoorsing methods and poisoning attacks on contrastive models. + +# 2.3. Defending against attacks + +Given the large potential risks posed by attacks to models, extensive research has been conducted on approaches for defending models against both poisoning [9, 52] and backdooring [18, 20, 48] attacks. Defenses can be broadly categorized into methods for detecting and removing attacked samples from training [8, 43, 46], those that remove backdoors already learned by models [30, 53, 60], and those that seek to prevent models from learning backdoors by decreasing their effectiveness [2, 27, 35]. Unfortunately, detection-based methods often fail to detect all backdoors and given the particular vulnerability of contrastive models, imperfect filtering could still result in model poisoning. Unlike our approach, model de-poisoning methods often fail to achieve similar performance to clean models [29]. + +Of particular relevance to our work are methods aimed at defending against poisoning and backdooring for vision-language contrastive learning [3]. [3] propose to independently realign representations from different modalities. Unlike this approach, our method learns a fine-grained alignment between external knowledge extracted from a large language model and visual regions. These alignments are then used as a penalty to prevent models from attending to non-aligned visual regions. Our method substantially outperforms [3] across all settings. + +# 3. Problem setting + +# 3.1. Threat model + +Adversary objective. Given a vision-language contrastive learning model $\mathcal{M}$ , an adversary aims to compromise the model by injecting a small amount of poisoned data $\mathcal{D}_p$ into a clean dataset $\mathcal{D}_c$ , both of which constitute the training + +data $D$ . The model trained on the poisoned training data is denoted as $\mathcal{M}_p$ . In this paper, we consider two types of attacks: 1) backdooring and 2) poisoning. In a backdoor attack, the adversary overlays either a small patch or some visually imperceptible noise on an image, causing the backdoored image to be misclassified or incorrectly retrieved by a retrieval model. During testing, the adversary cause the model to misclassify or retrieve a specific class by inserting the backdoor into test images. In contrast, in a poisoning attack, the goal is to cause the model $\mathcal{M}_p$ to associate a targeted set of text with images of a specified class by inserting many training instances which incorrectly associate visual content with concepts controlled by the adversary. In both cases, the poisoned model is expected to maintain similar utility (performance) compared to the clean model. + +Adversary capabilities. We consider an adversary capable of injecting a small number of poisonous samples into the training dataset, similar to prior work [5]. In traditional supervised attacks [39, 40], adversaries were required to modify a large amount of the training data - an impractical setting for vision-language models trained on web-scale data. Our setting is more realistic, because achieving a high poisoning rate is improbable when poisoned data is released on the internet with the hope of it being scraped for training. Thus, we focus on the more feasible scenario and assume a relatively low poisoning rate. We assume a black-box setting, where the adversary lacks knowledge of the target model's architecture and hyperparameters. Additionally, the adversary lacks control over the training process. + +# 3.2. Attack methodology + +Model training. We denote our training data as $(i,t)\in$ $\mathcal{D} = \mathcal{I}\times \mathcal{T}$ where $\mathcal{D},\mathcal{I}$ and $\mathcal{T}$ represent the training set, image set, and text set, respectively. Within a collection of $\mathcal{N}$ image-text pairs, we identify $(i_j,t_k)$ as a positive pair if $j = k$ ; otherwise, it is considered a negative pair. The contrastive learning model concurrently optimizes the image encoder $\mathcal{E}_i$ and the text encoder $\mathcal{E}_t$ to maximize the similarity between the embeddings of positive pairs in a batch while minimizing that of negative pairs. Specifically, for a given batch of $\mathcal{N}$ image-text pairs, we obtain the image embedding $I_{j}^{e} = \mathcal{E}_{i}(i_{j})$ and the corresponding text embedding $T_{k}^{e} = \mathcal{E}_{t}(t_{k})$ for each pair, normalizing both embeddings using the $L_{2}$ norm. The cross-modal contrastive loss $\mathcal{L}_{CL}$ is then computed as follows: + +$$ +\begin{array}{l} \mathcal {L} _ {C L} = - \frac {1}{2 \mathcal {N}} \left(\sum_ {j = 1} ^ {\mathcal {N}} \log \frac {\exp \left(\sigma \left(I _ {j} ^ {e} , T _ {j} ^ {e}\right) / \tau\right)}{\sum_ {k = 1} ^ {\mathcal {N}} \exp \left(\sigma \left(I _ {j} ^ {e} , T _ {k} ^ {e}\right) / \tau\right)} \right. \tag {1} \\ \left. + \sum_ {k = 1} ^ {\mathcal {N}} \log \frac {\exp \left(\sigma \left(I _ {k} ^ {e} , T _ {k} ^ {e}\right) / \tau\right)}{\sum_ {j = 1} ^ {\mathcal {N}} \exp \left(\sigma \left(I _ {j} ^ {e} , T _ {k} ^ {e}\right) / \tau\right)}\right) \\ \end{array} +$$ + +where $\sigma (.,.)$ is the product between the image and text embeddings (their similarity) and $\tau$ denotes the temperature. + +Backdoor attack. A successful backdoor attack introduces a trigger into a model so that when the trigger is present in the input image (dog), the model incorrectly associates the image with the specific target class (boat caption) controlled by the attacker. We applied backdoor attacks to poison multimodal contrastive learning models, following the approach in [7]. We consider two types of backdoor attacks: a) overlaying a backdoor trigger, such as a $(16 \times 16$ patch), on a small subset of training images, and b) injecting imperceptible noise into a limited subset of images. The latter is considered a stealthy backdoor attack. We classify the BPP [51] and Wanet [33] attacks as stealthy, because they pose a challenge for human identification due to their subtle and imperceptible nature. To perform our backdoor attack, we construct the poisoning dataset $D_{p} = \{(I_{i} \oplus \mathbf{bd}), T_{i}^{y^{\prime}} : I_{i} \in D_{subset}\}$ , by embedding a backdoor trigger bd (e.g. a $16 \times 16$ patch or imperceptible noise) in a small subset of training images, $D_{subset} \subset D$ , $T_{i}^{y^{\prime}} \in T^{y^{\prime}}$ , where $y^{\prime}$ is target class. + +Single target label attack. In this poisoning attack, an adversary aims to associate images from one class e.g. (dog) with captions from another class e.g. (boat). The attack can be formulated as $(i,t)|i\in I_{train}^{A},t\in T_{train}^{B}$ , where $A$ and $B$ are the original and the target classes, respectively. Given a caption $t\in T_{test}^{B}$ , we expect the model to retrieve images from $I_{test}^{A}$ as the most relevant. We poison the model to build a strong relationship between images in class $A$ and captions in class $B$ , even if the test images and captions are unseen at training time. + +Multiple target label attack. An adversary can extend the "single target label" attack by poisoning multiple target classes simultaneously, i.e. images from multiple original classes can be mapped to multiple target classes in captions. In this setting, the poisoning goal is defined as $\mathcal{D}_p = (A_1,B_1),(A_2,B_2),\dots,(A_n,B_n)$ where $A_{i}\in I^{A}$ and $B_{i}\in T^{B}$ . $I^A$ and $T^B$ represent images and captions from classes $A$ and $B$ respectively. + +# 4. Approach + +In this section, we introduce our framework for mitigating backdooring and poisoning attacks on vision-language models. Backdoor attacks on multimodal contrastive learning are effective because models learn a correlation between the backdoor trigger either in a form of patch or imperceptible noise added to the image and the target concept in the paired captions. The core intuition behind our approach stems from human perception, where sets of lower level semantic concepts play a key role in distinguishing objects. See Fig. 1. These semantic concepts consist of semantic attributes (e.g. "thick fur", "rough green texture"), but also parts of objects (e.g. paws, whiskers). We term these identifiable properties knowledge elements (KEs). Our core intu + +ition is that backdoorsing and poisoning attacks are effective because models learn spurious correlations between the visual content and the target label. However, because other non-backed classes also share some of the same KEs, models will not learn an association between the KEs and the spurious visual signal. Thus, we propose to leverage KEs to prevent models from relying on such correlations in their representations. + +# 4.1. Aligning patches to knowledge elements + +The traditional contrastive learning objective encourages image embedding $\mathcal{I}_i^e$ and text embedding $\mathcal{T}_i^e$ to be close. However, in addition to this, we enforce that image patch embeddings $\mathcal{I}_i^{patch}$ and associated KE embeddings $\kappa \mathcal{E}_i^e$ to also be close. Our key observation is that because backdoor signals are injected in random locations of the image which do not necessarily contain a KE, the similarity between these patches and KE embeddings should be lower compared to others. Even if by chance the area covered by the attack does contain KEs, the affected KEs will not be the same when the attack is performed on a different image, preventing the model from learning an association between the attack perturbation and the KEs. Based on this intuition, our model first learns to align patches and KEs using a contrastive constraint, $\mathcal{L}_{KE}$ . This learned alignment will later be used to prevent the model from attending to potentially attacked patches. To learn the patch-KE alignment, we first compute the maximum and minimum patch-KE similarity per category per sample as + +$$ +\omega_ {i} ^ {c} = \max _ {q \in m} \left(\sum_ {p = 1} ^ {n} \sum_ {q = 1} ^ {m} \mathcal {I} _ {p} ^ {\text {p a t c h}} \cdot \left(\mathcal {K E} _ {q} ^ {c}\right) ^ {e}\right) \tag {2} +$$ + +$$ +\hat {\omega} _ {i} ^ {c} = \min _ {q \in m} \left(\sum_ {p = 1} ^ {n} \sum_ {q = 1} ^ {m} \mathcal {I} _ {p} ^ {\text {p a t c h}} \cdot \left(\mathcal {K} \mathcal {E} _ {q} ^ {c}\right) ^ {e}\right) \tag {3} +$$ + +where $n$ is the number of patches per image, $m$ is the number of KEs per object category, and $c \in C$ , where $C$ is the number of object categories. $(\mathcal{K}\mathcal{E}_q^c)^e$ is the per KE embedding per category. Note that our approach also extends to image-text datasets without any defined object categories or labels. In this case, we treat each image-caption pair as its own "category" with a set of knowledge elements and $C$ is the same as the batch size. The objective function for patch-KE similarity is therefore given by + +$$ +\begin{array}{l} \mathcal {L} _ {K E} = - \frac {1}{2 \mathcal {N}} \left(\sum_ {i = 1} ^ {\mathcal {N}} \sum_ {c = 1} ^ {C} y _ {i} ^ {c} \log \left(\sigma \left(\omega_ {i} ^ {c}\right)\right) \right. \tag {4} \\ \left. + \sum_ {i = 1} ^ {\mathcal {N}} \sum_ {c = 1} ^ {C} \left(1 - y _ {i} ^ {c}\right) \log \left(1 - \sigma \left(\hat {\omega} _ {i} ^ {c}\right)\right)\right) \\ \end{array} +$$ + +where $\sigma$ is the sigmoid function and $y_{i}^{c}$ is the multi-label ground truth information per sample per category. Note + +![](images/5a7aed52291d17ac75687621b7a53ae5b4a9d3a20e026caeec006cbc6cea3bcd.jpg) +Figure 2. Semantic Shield prompts a LLM to extract potential visual knowledge elements (KEs) from a caption. Image patches are aligned with KEs via the patch-KE loss. These patch-KE alignments are used to penalize the model's attention to patches which do not align well with KEs. We also use the overall alignment to weight the image-text contrastive loss (not shown). + +that, summation over batch is omitted for brevity. In Eq. (2) and Eq. (3) all patches of every image compute their similarity with all KEs from the batch. We perform max/min to select either the best aligned KEs (for paired captions) or worst aligned KEs (for non paired) to prevent false negatives. We thus can fine-tune our model via a linear combination of these two objectives: + +$$ +\mathcal {L} _ {C L - K E} = \mu_ {1} \mathcal {L} _ {C L} + \mu_ {2} \mathcal {L} _ {K E} \tag {5} +$$ + +where $\mu_1 > 0$ and $\mu_2 > 0$ are hyper-parameters controlling the relative strengths of the two objective functions. + +# 4.2. Knowledge element-guided attention + +Next, we observe that the attention mechanism within the vision transformer (ViT) attends to both attacked patches and unaffected patches. This is undesirable because attention paid to attacked patches renders the output embeddings more dependent on the attack signal, and thus more vulnerable. Thus, it is imperative for ViT to allocate reduced attention to attacked patches relative to unaffected patches. Our intuition is that the model should pay more attention to image regions that align well with KEs than patches with low alignment. Thus, we leverage our patch-KE similarity scores to modulate ViT's attention by enforcing a constraint between ViT's attention and the patch-KE similarity scores. Given ViT's query, key, and value denoted as $Q, K, V$ + +respectively, the attention weight is computed as $\alpha =$ softmax $(\frac{QK^T}{\sqrt{d_k}})$ , where $d_{k}$ is the dimensionality of the key vectors. Now, the penalized attention weight can be computed based on the maximum and minimum similarity computed in Eq. (2), Eq. (3) $(\alpha_i^c)_{max} = \alpha_i^c\cdot \omega_i^c$ $(\alpha_{i}^{c})_{min} =$ $\alpha_{i}^{c}\cdot \hat{\omega}_{i}^{c}$ Since the similarity scores between a targeted visual region and KE are less compared to unaffected patch and KE, ViT pays less attention to attacked patches. The resulting objective function which penalizes attention values which deviate from the patch-KE similarity scores is: + +$$ +\begin{array}{l} \mathcal {L} _ {\text {A t t e n t i o n}} = - \frac {1}{2 \mathcal {N}} \left(\sum_ {i = 1} ^ {\mathcal {N}} \sum_ {c = 1} ^ {C} \left(\alpha_ {i} ^ {c}\right) \log \left(\sigma \left(\alpha_ {i} ^ {c}\right) _ {\max }\right) \right. \tag {6} \\ \left. + \sum_ {i = 1} ^ {\mathcal {N}} \sum_ {c = 1} ^ {C} \left(1 - \alpha_ {i} ^ {c}\right) \log \left(1 - \sigma \left(\alpha_ {i} ^ {c}\right) _ {\min }\right)\right) \\ \end{array} +$$ + +The training objective is then: + +$$ +\mathcal {L} _ {C L - A t t e n t i o n} = \mu_ {1} \mathcal {L} _ {C L} + \mu_ {2} \mathcal {L} _ {A t t e n t i o n} \tag {7} +$$ + +# 4.3. Knowledge element weighted contrastive loss + +Note that during the fine-tuning process of Eq. (5) and Eq. (7), the contrastive learning objective Eq. (1), seeks to align representations from each modality which has the effect of pulling attacked images and captions closer in + +the embedding space. Therefore, we introduce a dynamic weighting function which weights each sample in the contrastive objective function. Our intuition is that attacked samples will have lower similarity scores between image patches and KEs, since the attack does not explicit target the KEs. Thus, we penalize the contrastive objective for each sample with the average similarity score, so that the contrastive objective is downweighted for attacked samples compared to benign samples. We compute the maximum similarity scores per sample across categories following Eq. (2), where $\lambda_{i} = \max_{c\in C}\omega_{i}^{c}, i\in \mathcal{N}, \mu_{1}, \mu_{2} = 1$ : + +$$ +\mathcal {L} _ {C L _ {i}} = \underbrace {\frac {\exp \left(\frac {\sigma \left(I _ {i} ^ {e} , T _ {i} ^ {e}\right)}{\tau}\right)}{\sum_ {k = 1} ^ {\mathcal {N}} \exp \left(\frac {\sigma \left(I _ {i} ^ {e} , T _ {k} ^ {e}\right)}{\tau}\right)}} _ {\text {c o n t r a s t i n g} i ^ {t h} \text {i m a g e w i t h t e x t s}} + \underbrace {\sum_ {k = 1} ^ {\mathcal {N}} \log \frac {\exp \left(\frac {\sigma \left(I _ {k} ^ {e} , T _ {k} ^ {e}\right)}{\tau}\right)}{\exp \left(\frac {\sigma \left(I _ {i} ^ {e} , T _ {k} ^ {e}\right)}{\tau}\right)}} _ {\text {c o n t r a s t i n g t e x t s w i t h} i ^ {t h} \text {i m a g e}} \tag {8} +$$ + +$$ +\mathcal {L} _ {\text {W e i g h t e d C L}} = - \frac {1}{2 \mathcal {N}} \sum_ {i = 1} ^ {2 \mathcal {N}} \lambda_ {i} \mathcal {L} _ {C L _ {i}} \tag {8} +$$ + +Our final objective is likewise given by linear combination: + +$$ +\mathcal {L} _ {\text {W e i g h t e d C L - A t t e n t i o n}} = \mu_ {1} \mathcal {L} _ {\text {W e i g h t e d C L}} + \mu_ {2} \mathcal {L} _ {\text {A t t e n t i o n}} \tag {10} +$$ + +# 4.4. Knowledge element (KE) generation + +Our approach requires external knowledge about each image in addition to a paired caption. For example, a caption of dog image might be "A dog is running in the park". In this case, suitable knowledge elements might be paws, sharp nails, furry animal, trees. We follow in context learning approach by prompting a large language model (Vicuna [10]) for generating KEs for each image. Note that the KEs are generated purely from the caption or object label and thus are only potentially relevant to the image. Our approach accounts for this by generating 25 KEs per caption/category. Then, we take the top 5 KEs per caption based on the similarity scores between image and generated KEs. For COCO [28], we prompt Vicuna with What are useful visual features for distinguishing a category name in a photo?. Since COCO has 80 categories we choose this prompt following [32]. For Flickr30k [58], we design prompts that generate KEs for each caption, since we do not have any predefined object classes. Additional details are included in our supplementary. + +# 5. Experiments + +# 5.1. Experimental Setup + +Models and datasets. We follow [7]'s setting by attacking CLIP-like models [36]. We adopt ViT-B/16 as image encoder, pretrained on ImageNet-21k [42] and fine-tuned on ImageNet-1k. As a text encoder, we adopt a BERT-style [11] encoder following [36]. We cap the max sequence + +length of text to 100. We use AdamW with weight decay using a cosine scheduler from $10^{-4}$ with decay rate 0.2. We train for 30 epochs with a batch size of 128 on the COCO [28] and Fickr30k [58] datasets. While COCO has 80 defined object categories, Flickr30k has no label information. Additional details are included in supplementary. + +Backdoor settings. We tested out defense against three recent backdoor attacks. To do so, we couple backdoored samples with a caption mentioning the target class. Adversaries only require a very small amount of poisoned samples for poisoning contrastive models (e.g., CLIP) [7]. Following this, we inject a very small amount of poisoned samples $(0.01\%$ of the train dataset for both COCO and Flickr30k). + +Poisoning settings. We performed two types of poisoning attacks following [56]. For single target label attack, the poisoning goal is dog2boat for both Flickr30k and COCO. We evaluate them on test samples that are unseen in the training process. For example, we take an clean image of dog and associate it with a proxy caption of boat. The poisoning rate for this attack is $0.065\%$ for Flickr30k and $0.24\%$ for COCO. For the multi-target label attack, we take two classes. The poisoning goals are dog2boat and train2zebra for COCO. For Flickr30k, the poisoning goals are dog2boat and bird2sofa. The poisoning rate for COCO and Flickr30k are $0.52\%$ and $0.34\%$ respectively. + +# 5.2. Experimental Results + +Backdoor Attack. In Tab. 1, we compared ablations of our method $(\mathrm{CL} + \mathrm{KE}, \mathrm{CL} + \mathrm{Attention})$ with other baselines e.g. Cleanlip [3], Anti-Backdoor Learning (ABL) [27]. Finally, our model Semantic Shield (Weighted CL + Attention), outperforms all baselines with significant margins. Note that, at test time, we used 100 backdoor images (patch, BPP, Wanet) for the text retrieval task. At test time, our model retrieves no caption associated with poisoned categories for any backdoored image on Flickr30k. + +Poisoning Attack. Similarly, to the above, at test time, we use 100 poisoned images for both single and multi-target settings for both datasets. Our model outperforms all existing work significantly with large margins, particularly on the multi-target label setting. We observe that the unweighted version of our approach slightly outperforms Semantic Shield for dog2boat at Hit@1, but Semantic Shield significantly outperforms for Hit@5 and Hit@10, suggesting significantly reduced poisoning overall. + +Utility evaluation. We evaluate model utility for image-capture retrieval. Tab. 4 shows the performance (Recall@10) of the poisoned model on each attack type as well as the clean model on the test data. We observe that the utility of the poisoned model is at the same level or slightly less than the clean model e.g. BPP in COCO dataset. This implies that despite being trained on poisoned data, models maintain their performance. We show the model utility + +
DatasetModelsBackdoor PatchBackdoor BPPBackdoor Wanet
Hit@1 ↓Hit@5↓Hit@10↓Hit@1 ↓Hit@5↓Hit@10↓Hit@1 ↓Hit@5↓Hit@10↓
COCOCL (No Defense)90.6694.6095.43100.0100.0100.0100.0100.0100.0
CL+ ABL [27]6.238.1212.2115.3516.6816.21100.0100.0100.0
CL+ CleanClip [3]5.3512.6817.8936.1250.0955.198.2316.3223.73
CL + KE9.015.3121.9025.3947.9850.1212.2156.7988.38
CL + Attention4.205.126.010.05.2636.210.02.107.20
Weighted CL + Attention0.91.221.570.00.00.00.00.00.0
Flickr30kCL (No Defense)91.9797.6398.21100.0100.0100.0100.0100.0100.0
CL+ ABL [27]4.672.214.0610.3417.9821.1398.2199.23100.0
CL+ CleanClip [3]2.203.325.0512.4324.3231.2513.2923.1329.21
CL + KE16.1033.1541.0913.1436.5456.2723.3641.2147.43
CL + Attention1.203.123.010.07.2423.170.012.0114.07
Weighted CL + Attention0.00.00.00.00.00.00.00.00.0
+ +Table 1. Backdoor attack and defense performance with baselines. The first row of the table shows an undefended model while other rows are baselines or variants of our method. CL+ KE, CL+ Attention are our baselines. The best results are shown in bold. + +
DatasetModelsSingle Target LabelMultiple Target Label
dog2boatdog2boattrain2zebra
Hit@1 ↓Hit@5↓Hit@10↓Hit@1 ↓Hit@5↓Hit@10↓Hit@1 ↓Hit@5↓Hit@10↓
COCOCL (No Defense)18.057.2082.077.1299.2399.5655.3295.7697.98
CL+ CleanClip [3]3.393.955.6557.6963.089.1769.4971.7589.17
CL + KE4.565.325.9554.4564.2185.5265.1270.9286.12
CL + Attention0.563.384.510.6365.6069.422.256.7712.99
Weighted CL + Attention0.041.122.542.235.216.450.00.00.0
dog2boatdog2boatbird2sofa
Hit@1 ↓Hit@5↓Hit@10↓Hit@1 ↓Hit@5↓Hit@10↓Hit@1 ↓Hit@5↓Hit@10↓
Flickr30kCL (No Defense)29.057.2082.2328.1282.3993.7655.3290.62100.0
CL+ CleanClip [3]8.2731.5136.6121.6961.2788.7522.4264.1189.51
CL + KE7.3428.0932.2121.1245.3247.6712.7742.3454.21
CL + Attention4.5621.8134.111.6316.7029.213.2518.4332.22
Weighted CL + Attention0.321.212.541.784.565.670.00.00.0
+ +Table 2. Poisoning attack and defense performance with baselines. First row of the table shows how good the attack, and other rows are baselines along with our proposed models. CL + KE, CL + Attention are our baselines. The best results are highlighted. + +after being defended with Semantic Shield and its variants (CL + KE, CL + Attention, weighted CL + Attention) in Tab. 3. We largely observe a similar utility compared to the models from Tab. 4. On the Flickr30k dataset, single target or multiple target attack scenario, for TR task, the utility is slightly less than the clean model (Tab. 4, Tab. 3). + +# 5.3. Ablations + +Poisoning rate. We compare the performance of poisoning attacks at different poisoning rates on three backdoor attacks. We conduct these attacks against the victim model with four different poisoning rates (0.001 to $0.01\%$ ) on the COCO dataset (Fig. 3). We observe that attack performance significantly improves with increased poisoning rate, even though the rate is quite low, which demonstrates the vulnerability of contrastively trained VL models to attacks. + +Fine-tuning epoch. In Fig. 4 we use the max poisoning rate $(0.01\%)$ from Fig. 3 to illustrate Semantic Shield's per + +![](images/3d32224a2f17e685dc093f08b1eccd84b7a7a95b222b2e63c5356f0865f6076e.jpg) +(a) Backdoor patch + +![](images/92dbd3fcea48cced79ad97e4309c56216f07c39391cf5f5c29c07440aec07974.jpg) +(b) BPP + +![](images/46680ffaf8b6ea96e9e7ee3f076109486d997687f743bc237d1a66aaff49bbb5.jpg) +(c) Wanet + +![](images/7536f4d417b37713e100acc814d15ec05466655ad0481b0113d79d0e9e8a30e1.jpg) +(a) Backdoor patch + +![](images/23feab32acf5c1410d5b36b2ebb5667e83cdc6dcf3a02599e625d21974dbe093.jpg) +(b) BPP + +![](images/ba244084e2868daea1a07ca0ff6eae29e4322ef88820bb4378cbaa0940b4014e.jpg) +(c) Wanet +Figure 4. Hit@k vs training epoch for Semantic Shield. + +formance at different epochs on the same backdoored samples. We notice that Hit@k gradually reduces for all three attacks, demonstrating the increasing effectiveness of Semantic Shield's defense with increased training. + +
DatasetTaskModelsBackdoor PatchBPPWanetSingle Target LabelMultiple Target Label
COCOIRCL74.9973.9474.5474.6874.72
CL + KE74.1570.774.074.2473.28
CL + Attention74.3873.1374.4375.7075.13
Weighted CL + Attention74.2274.5674.2373.4673.51
COCOTRCL81.5877.4478.7480.1681.12
CL + KE78.4075.5477.8679.0881.20
CL + Attention79.2077.3678.0480.0581.06
Weighted CL + Attention79.4677.7878.4579.6780.0
Flickr30kIRCL59.1359.8661.0860.9257.41
CL + KE60.3461.8561.1358.1258.18
CL + Attention61.3255.9659.1458.9758.16
Weighted CL + Attention61.0756.3260.1659.7658.78
Flickr30kTRCL68.0768.7969.8671.0668.14
CL + KE69.6770.6569.6266.9862.20
CL + Attention70.064.4668.068.1362.97
Weighted CL + Attention70.2365.6668.8768.4562.12
+ +Table 3. Model utility of defended models (Recall@10). The model utilities are comparable to the performance in Tab. 4 + +
DatasetTaskCleanBackPatBPPWanetSingTLMultTL
COCOIR75.1374.9973.9474.5474.6874.72
TR80.6281.5877.4478.7480.1681.12
Flickr30kIR59.6859.1359.8661.0860.9257.41
TR68.3768.0768.7969.8671.0668.14
+ +Table 4. Model utility between clean model and other backdoored/poisoned models (CL) (Recall@10). Similar to Tab. 3. + +# 6. Qualitative analysis + +In Fig. 5, we present the contrast between a model defended by Semantic Shield and an undefended model's attention map. Fig. 5b shows that poisoned model pays attention to the patch (bottom right corner). In contrast, the defended model Fig. 5c does not pay any attention to the patch. Next, in Fig. 5d and Fig. 5g two imperceptible noises are injected e.g. BPP, Wanet. We wanted to see what happens if we inject the noise randomly throughout the entire images. Poisoned models in Fig. 5e and Fig. 5h show spurious visual signals all over the image. However, our proposed models filters out the noisy signals and defends against poisoning. + +# 7. Conclusion + +In this paper, we introduced Semantic Shield, an approach for defending against attacks on contrastively trained VL models. Our approach works by leveraging external knowledge to guide the model's attention to non-attacked visual regions and samples. We evaluated Semantic Shield against recent backdoorsing and poisoning attacks and defenses on two benchmarks. Our experiments show that Semantic Shield substantially outperforms existing defenses across all settings. In future work, we will explore a tighter integration of the LLM using prompting by dynamically producing KEs online based on the de + +![](images/8be5619ee58cda38e3dafbfd2aacb08ea685b22d615436b9b555bd9b6be599ef.jpg) +(a) Backdoor image (b) Attention map for with patch bottom poisoned model right corner + +![](images/31bac6cfdc3c9cd0626f286cf112ae37e748250f06c40fe7d7b05d221a748bf5.jpg) +(c) Attention map for best model + +(b) Attention map for poisoned model + +![](images/58719e3fe72244da814b7a548835d094ab6b6cdb26a51259eb50cfcc66bd4ca6.jpg) + +![](images/96dea49f53412598ec18853250923f4d03213e425a3bb8aabca0ddaf92e4c6f2.jpg) +(d) Backdoor image (e) Attention map for with imperceptible poisoned model noise:BPP + +![](images/81a949309702b253b548e673c7ebff5a6176a9ff65c7764ab11b0d179978edc4.jpg) +(f) Attention map for best model + +(e) Attention map for poisoned model + +![](images/b014b6cad5722d0206243b5266ddfeec4eb96a0fb97d0e9217995474d36a0690.jpg) +Figure 5. Attention map comparison between our model (weighted $\mathrm{CL} +$ attention) and backdoored models for three backdoor attacks. + +![](images/9a108b410b23238ab50b6051b1e4585be0daaa72bfe29b423abf1f5ed77787fd.jpg) +(g) Backdoor image (h) Attention map for with imperceptible poisoned model noise: Wanet + +![](images/7823a552ee20b4c5997ea4235f720f3ed64aebe245de2bcdac8f2d7bf7b7bd27.jpg) +(i) Attention map for best model + +(h) Attention map for poisoned model + +![](images/fd8ca58da0ecbffe144b12414cf62078fc9c91119b82353efe60be8750e26018.jpg) + +fended model's current state. In addition, we will explore how multimodal large language models could be used to extract more relevant KEs. While Semantic Shield is successful at defending against attacks on natural images for which there is a meaningful visual-KE alignment, it may be less successful for images such as charts or more abstract text for which clear KEs cannot be extracted. Moreover, it does not preclude the possibility of attacks against the language model via the caption. Future work should explore how the LLM can be jointly defended. + +# References + +[1] Hassan Akbari, Liangzhe Yuan, Rui Qian, Wei-Hong Chuang, Shih-Fu Chang, Yin Cui, and Boqing Gong. VATT: transformers for multimodal self-supervised learning from raw video, audio and text. In Advances in Neural Information Processing Systems 34: Annual Conference on Neural Information Processing Systems 2021, NeurIPS 2021, December 6-14, 2021, virtual, pages 24206-24221, 2021. 1 +[2] Hritik Bansal, Nishad Singhi, Yu Yang, Fan Yin, Aditya Grover, and Kai-Wei Chang. Cleanclip: Mitigating data poisoning attacks in multimodal contrastive learning. In ICLR 2023 Workshop on Trustworthy and Reliable Large-Scale Machine Learning Models, 2023. 3 +[3] Hritik Bansal, Nishad Singhi, Yu Yang, Fan Yin, Aditya Grover, and Kai-Wei Chang. Cleanclip: Mitigating data poisoning attacks in multimodal contrastive learning. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 112–123, 2023. 1, 3, 6, 7 +[4] Battista Biggio, Blaine Nelson, and Pavel Laskov. Poisoning attacks against support vector machines. In Proceedings of the 29th International Coference on International Conference on Machine Learning, pages 1467-1474, 2012. 2 +[5] Battista Biggio, Ignazio Pillai, Samuel Rota Bulò, Davide Ariu, Marcello Pelillo, and Fabio Roli. Is data clustering in adversarial settings secure? In Proceedings of the 2013 ACM Workshop on Artificial Intelligence and Security, page 87–98, New York, NY, USA, 2013. Association for Computing Machinery. 3 +[6] Battista Biggio, Ignazio Pillai, Samuel Rota Bulò, Davide Ariu, Marcello Pelillo, and Fabio Roli. Is data clustering in adversarial settings secure? In Proceedings of the 2013 ACM workshop on Artificial intelligence and security, pages 87–98, 2013. 2 +[7] Nicholas Carlini and Andreas Terzis. Poisoning and backdooring contrastive learning. In The Tenth International Conference on Learning Representations, ICLR 2022, Virtual Event, April 25-29, 2022. OpenReview.net, 2022. 3, 4, 6 +[8] Bryant Chen, Wilka Carvalho, Nathalie Baracaldo, Heiko Ludwig, Benjamin Edwards, Taesung Lee, Ian Molloy, and Biplav Srivastava. Detecting backdoor attacks on deep neural networks by activation clustering. In Workshop on Artificial Intelligence Safety. CEUR-WS, 2019. 3 +[9] Jian Chen, Xuxin Zhang, Rui Zhang, Chen Wang, and Ling Liu. De-pois: An attack-agnostic defense against data poisoning attacks. IEEE Transactions on Information Forensics and Security, 16:3412-3425, 2021. 3 +[10] Wei-Lin Chiang, Zhuohan Li, Zi Lin, Ying Sheng, Zhanghao Wu, Hao Zhang, Lianmin Zheng, Siyuan Zhuang, Yonghao Zhuang, Joseph E Gonzalez, et al. Vicuna: An open-source chatbot impressing gpt-4 with $90\%$ * chatgpt quality. 2023. URL https://lmsys.org/blog/2023-03-30-vicuna, 1(2):3. 2, 6 +[11] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. BERT: pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 Conference of the North American Chapter of the + +Association for Computational Linguistics: Human Language Technologies, NAACL-HLT 2019, Minneapolis, MN, USA, June 2-7, 2019, Volume 1 (Long and Short Papers), pages 4171-4186. Association for Computational Linguistics, 2019. 6 +[12] Khoa Doan, Yingjie Lao, and Ping Li. Backdoor attack with imperceptible input and latent modification. Advances in Neural Information Processing Systems, 34:18944-18957, 2021. 3 +[13] Khoa Doan, Yingjie Lao, Weijie Zhao, and Ping Li. Lira: Learnable, imperceptible and robust backdoor attacks. In Proceedings of the IEEE/CVF international conference on computer vision, pages 11966-11976, 2021. 3 +[14] Vishnu Sashank Dorbala, Gunnar A Sigurdsson, Jesse Thomason, Robinson Piramuthu, and Gaurav S Sukhatme. Clip-nav: Using clip for zero-shot vision-and-language navigation. In Workshop on Language and Robotics at CoRL 2022, 2022. 2 +[15] Fangxiang Feng, Xiaojie Wang, and Ruifan Li. Cross-modal retrieval with correspondence autoencoder. In Proceedings of the 22nd ACM international conference on Multimedia, pages 7-16, 2014. 2 +[16] Felipe González-Pizarro and Savvas Zannettou. Understanding and detecting hateful content using contrastive learning. In Proceedings of the International AAAI Conference on Web and Social Media, pages 257–268, 2023. 2 +[17] Tianyu Gu, Kang Liu, Brendan Dolan-Gavitt, and Siddharth Garg. Badnets: Evaluating backdooring attacks on deep neural networks. IEEE Access, 7:47230-47244, 2019. 3 +[18] Jonathan Hayase, Weihao Kong, Raghav Somani, and Sewoong Oh. Spectre: Defending against backdoor attacks using robust statistics. In International Conference on Machine Learning, pages 4129-4139. PMLR, 2021. 3 +[19] Chenguang Huang, Oier Mees, Andy Zeng, and Wolfram Burgard. Visual language maps for robot navigation. In 2023 IEEE International Conference on Robotics and Automation (ICRA), pages 10608-10615. IEEE, 2023. 2 +[20] Kunzhe Huang, Yiming Li, Baoyuan Wu, Zhan Qin, and Kui Ren. Backdoor defense via decoupling the training process. In International Conference on Learning Representations, 2021. 3 +[21] Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. In International conference on machine learning, pages 4904-4916. PMLR, 2021. 2 +[22] Marius Kloft and Pavel Laskov. Online anomaly detection under adversarial impact. In Proceedings of the thirteenth international conference on artificial intelligence and statistics, pages 405-412. JMLR Workshop and Conference Proceedings, 2010. 2 +[23] Pang Wei Koh and Percy Liang. Understanding black-box predictions via influence functions. In International conference on machine learning, pages 1885-1894. PMLR, 2017. 2 +[24] Changjiang Li, Ren Pang, Zhaohan Xi, Tianyu Du, Shouling Ji, Yuan Yao, and Ting Wang. An embarrassingly simple + +backdoor attack on self-supervised learning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4367-4378, 2023. 1 +[25] Junnan Li, Ramprasaath Selvaraju, Akhilesh Gotmare, Shafiq Joty, Caiming Xiong, and Steven Chu Hong Hoi. Align before fuse: Vision and language representation learning with momentum distillation. Advances in neural information processing systems, 34:9694-9705, 2021. 1, 2 +[26] Junnan Li, Dongxu Li, Caiming Xiong, and Steven C. H. Hoi. BLIP: bootstrapping language-image pre-training for unified vision-language understanding and generation. In International Conference on Machine Learning, ICML 2022, 17-23 July 2022, Baltimore, Maryland, USA, pages 12888-12900. PMLR, 2022. 1 +[27] Yige Li, Xixiang Lyu, Nodens Koren, Lingjuan Lyu, Bo Li, and Xingjun Ma. Anti-backdoor learning: Training clean models on poisoned data. In Advances in Neural Information Processing Systems 34: Annual Conference on Neural Information Processing Systems 2021, NeurIPS 2021, December 6-14, 2021, virtual, pages 14900-14912, 2021. 1, 3, 6, 7 +[28] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In Computer Vision-ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part V 13, pages 740-755. Springer, 2014. 6 +[29] Min Liu, Alberto Sangiovanni-Vincentelli, and Xiangyu Yue. Beating backdoor attack at its own game. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4620-4629, 2023. 3 +[30] Yang Liu, Mingyuan Fan, Cen Chen, Ximeng Liu, Zhuo Ma, Li Wang, and Jianfeng Ma. Backdoor defense with machine unlearning. In IEEE INFOCOM 2022-IEEE Conference on Computer Communications, pages 280-289. IEEE, 2022. 3 +[31] Arjun Majumdar, Gunjan Aggarwal, Bhavika Devnani, Judy Hoffman, and Dhruv Batra. Zson: Zero-shot object-goal navigation using multimodal goal embeddings. Advances in Neural Information Processing Systems, 35:32340-32352, 2022. 2 +[32] Sachit Menon and Carl Vondrick. Visual classification via description from large language models. In The Eleventh International Conference on Learning Representations, ICLR 2023, Kigali, Rwanda, May 1-5, 2023. OpenReview.net, 2023. 6 +[33] Tuan Anh Nguyen and Anh Tuan Tran. Wanet - imperceptible warping-based backdoor attack. In International Conference on Learning Representations, 2021. 3, 4 +[34] Huy Phan, Cong Shi, Yi Xie, Tianfang Zhang, Zhuohang Li, Tianming Zhao, Jian Liu, Yan Wang, Yingying Chen, and Bo Yuan. Ribac: Towards robust and imperceptible backdoor a stack against compact dnn. In European Conference on Computer Vision, pages 708-724. Springer, 2022. 3 +[35] Han Qiu, Yi Zeng, Shangwei Guo, Tianwei Zhang, Meikang Qiu, and Bhavani Thuraisingham. Deepsweep: An evaluation framework for mitigating dnn backdoor attacks using data augmentation. In Proceedings of the 2021 ACM + +Asia Conference on Computer and Communications Security, pages 363-377, 2021. 3 +[36] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In International Conference on Machine Learning, 2021. 1, 2, 6 +[37] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with CLIP latents. CoRR, abs/2204.06125, 2022. 1 +[38] Aniruddha Saha, Akshayvarun Subramanya, and Hamed Pirsiavash. Hidden trigger backdoor attacks. In Proceedings of the AAAI conference on artificial intelligence, pages 11957-11965, 2020. 3 +[39] Aniruddha Saha, Ajinkya Tejankar, Soroush Abbasi Koohpayegani, and Hamed Pirsivash. Backdoor attacks on self-supervised learning. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR 2022, New Orleans, LA, USA, June 18-24, 2022, pages 13327-13336. IEEE, 2022. 3 +[40] Ali Shafahi, W. Ronny Huang, Mahyar Najibi, Octavian Suciu, Christoph Studer, Tudor Dumitras, and Tom Goldstein. Poison frogs! targeted clean-label poisoning attacks on neural networks. In Proceedings of the 32nd International Conference on Neural Information Processing Systems, page 6106-6116, Red Hook, NY, USA, 2018. Curran Associates Inc. 3 +[41] Wonyoung Shin, Jonghun Park, Taekang Woo, Yongwoo Cho, Kwangjin Oh, and Hwanjun Song. e-clip: Large-scale vision-language representation learning in e-commerce. In Proceedings of the 31st ACM International Conference on Information & Knowledge Management, pages 3484–3494, 2022. 2 +[42] Andreas Steiner, Alexander Kolesnikov, Xiaohua Zhai, Ross Wightman, Jakob Uszkoreit, and Lucas Beyer. How to train your vit? data, augmentation, and regularization in vision transformers. Trans. Mach. Learn. Res., 2022, 2022. 6 +[43] Di Tang, XiaoFeng Wang, Haixu Tang, and Kehuan Zhang. Demon in the variant: Statistical analysis of {DNNs} for robust backdoor contamination detection. In 30th USENIX Security Symposium (USENIX Security 21), pages 1541-1558, 2021. 3 +[44] Christopher Thomas and Adriana Kovashka. Preserving semantic neighborhoods for robust cross-modal retrieval. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XVIII 16, pages 317-335. Springer, 2020. 2 +[45] Vale Tolpegin, Stacey Truex, Mehmet Emre Gursoy, and Ling Liu. Data poisoning attacks against federated learning systems. In Computer Security-ESORICS 2020: 25th European Symposium on Research in Computer Security, ES-ORICS 2020, Guildford, UK, September 14–18, 2020, Proceedings, Part I 25, pages 480–501. Springer, 2020. 2 +[46] Brandon Tran, Jerry Li, and Aleksander Madry. Spectral signatures in backdoor attacks. Advances in neural information processing systems, 31, 2018. 3 + +[47] Wei Lun Tsai, Jacob J Lin, and Shang-Hsien Hsieh. Generating construction safety observations via clip-based image-language embedding. In European Conference on Computer Vision, pages 366-381. Springer, 2022. 2 +[48] Haotao Wang, Junyuan Hong, Aston Zhang, Jiayu Zhou, and Zhangyang Wang. Trap and replace: Defending backdoor attacks by trapping them into an easy-to-replace subnetwork. Advances in neural information processing systems, 35:36026-36039, 2022. 3 +[49] Lin Wang and Jie Chen. Improving radiology report generation with adaptive attention. In Multimodal AI in healthcare: A paradigm shift in health intelligence, pages 293-305. Springer, 2022. 2 +[50] Longzheng Wang, Chuang Zhang, Hongbo Xu, Yongxiu Xu, Xiaohan Xu, and Siqi Wang. Cross-modal contrastive learning for multimodal fake news detection. In Proceedings of the 31st ACM International Conference on Multimedia, pages 5696-5704, 2023. 2 +[51] Zhenting Wang, Juan Zhai, and Shiqing Ma. Bppattack: Stealthy and efficient trojan attacks against deep neural networks via image quantization and contrastive adversarial learning. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR 2022, New Orleans, LA, USA, June 18-24, 2022, pages 15054-15063. IEEE, 2022. 4 +[52] Sandamal Weerasinghe, Tansu Alpcan, Sarah M Erfani, and Christopher Leckie. Defending support vector machines against data poisoning attacks. IEEE Transactions on Information Forensics and Security, 16:2566-2578, 2021. 3 +[53] Dongxian Wu and Yisen Wang. Adversarial neuron pruning purifies backdoored deep models. Advances in Neural Information Processing Systems, 34:16913-16925, 2021. 3 +[54] Huang Xiao, Battista Biggio, Gavin Brown, Giorgio Fumera, Claudia Eckert, and Fabio Roli. Is feature selection secure against training data poisoning? In International conference on machine learning, pages 1689-1698. PMLR, 2015. 2 +[55] Wenhan Yang, Jingdong Gao, and Baharan Mirzasoleiman. Robust contrastive language-image pretraining against data poisoning and backdoor attacks. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. 1 +[56] Ziqing Yang, Xinlei He, Zheng Li, Michael Backes, Mathias Humbert, Pascal Berrang, and Yang Zhang. Data poisoning attacks against multimodal encoders. In International Conference on Machine Learning, ICML 2023, 23-29 July 2023, Honolulu, Hawaii, USA, pages 39299-39313. PMLR, 2023. 6 +[57] Ziqing Yang, Xinlei He, Zheng Li, Michael Backes, Mathias Humbert, Pascal Berrang, and Yang Zhang. Data poisoning attacks against multimodal encoders. In International Conference on Machine Learning, ICML 2023, 23-29 July 2023, Honolulu, Hawaii, USA, pages 39299-39313. PMLR, 2023. 1 +[58] Peter Young, Alice Lai, Micah Hodosh, and Julia Hockenmaier. From image descriptions to visual denotations: New similarity metrics for semantic inference over event descriptions. Trans. Assoc. Comput. Linguistics, 2:67-78, 2014. 6 +[59] Jiahui Yu, Zirui Wang, Vijay Vasudevan, Legg Yeung, Mojtaba Seyedhosseini, and Yonghui Wu. Coca: Contrastive + +captioners are image-text foundation models. Transactions on Machine Learning Research, 2022. 1, 2 +[60] Yi Zeng, Si Chen, Won Park, Zhuoqing Mao, Ming Jin, and Ruoxi Jia. Adversarial unlearning of backdoors via implicit hypergradient. In International Conference on Learning Representations, 2021. 3 +[61] Xiaohua Zhai, Xiao Wang, Basil Mustafa, Andreas Steiner, Daniel Keysers, Alexander Kolesnikov, and Lucas Beyer. Lit: Zero-shot transfer with locked-image text tuning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18123-18133, 2022. 2 +[62] Hanwang Zhang, Yang Yang, Huanbo Luan, Shuicheng Yang, and Tat-Seng Chua. Start from scratch: Towards automatically identifying, modeling, and naming visual attributes. In Proceedings of the 22nd ACM international conference on Multimedia, pages 187-196, 2014. 2 +[63] Jinghuai Zhang, Hongbin Liu, Jinyuan Jia, and Neil Zhenqiang Gong. Corruptencoder: Data poisoning based backdoor attacks to contrastive learning. arXiv preprint arXiv:2211.08229, 2022. 3 +[64] Ying Zhang and Huchuan Lu. Deep cross-modal projection learning for image-text matching. In Proceedings of the European conference on computer vision (ECCV), pages 686-701, 2018. 2 +[65] Yuhao Zhang, Hang Jiang, Yasuhide Miura, Christopher D Manning, and Curtis P Langlotz. Contrastive learning of medical visual representations from paired images and text. In Machine Learning for Healthcare Conference, pages 2-25. PMLR, 2022. 2 +[66] Bingyin Zhao and Yingjie Lao. Towards class-oriented poisoning attacks against neural networks. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 3741-3750, 2022. 2 +[67] Yangming Zhou, Yuzhou Yang, Qichao Ying, Zhenxing Qian, and Xinpeng Zhang. Multimodal fake news detection via clip-guided learning. In 2023 IEEE International Conference on Multimedia and Expo (ICME), pages 2825-2830. IEEE, 2023. 2 \ No newline at end of file diff --git a/2024/Semantic Shield_ Defending Vision-Language Models Against Backdooring and Poisoning via Fine-grained Knowledge Alignment/images.zip b/2024/Semantic Shield_ Defending Vision-Language Models Against Backdooring and Poisoning via Fine-grained Knowledge Alignment/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..7c71ea9c0495826f8f0615bafa00ce1ce657107d --- /dev/null +++ b/2024/Semantic Shield_ Defending Vision-Language Models Against Backdooring and Poisoning via Fine-grained Knowledge Alignment/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:957cebc3f2c3def5ec88a309e0a79bdeb3a3345c23abb504fa7e8acd6bca0e0a +size 658496 diff --git a/2024/Semantic Shield_ Defending Vision-Language Models Against Backdooring and Poisoning via Fine-grained Knowledge Alignment/layout.json b/2024/Semantic Shield_ Defending Vision-Language Models Against Backdooring and Poisoning via Fine-grained Knowledge Alignment/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..ce5514b4985c79d829a3754e8f47adb2c4a855e2 --- /dev/null +++ b/2024/Semantic Shield_ Defending Vision-Language Models Against Backdooring and Poisoning via Fine-grained Knowledge Alignment/layout.json @@ -0,0 +1,9099 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 52, + 103, + 541, + 140 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 103, + 541, + 140 + ], + "spans": [ + { + "bbox": [ + 52, + 103, + 541, + 140 + ], + "type": "text", + "content": "Semantic Shield: Defending Vision-Language Models Against Backdoors and Poisoning via Fine-grained Knowledge Alignment" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 181, + 161, + 266, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 181, + 161, + 266, + 202 + ], + "spans": [ + { + "bbox": [ + 181, + 161, + 266, + 202 + ], + "type": "text", + "content": "Alvi Md Ishmam Virginia Tech alvi@vt.edu" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 310, + 161, + 410, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 161, + 410, + 202 + ], + "spans": [ + { + "bbox": [ + 310, + 161, + 410, + 202 + ], + "type": "text", + "content": "Christopher Thomas Virginia Tech christhomas@vt.edu" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 143, + 231, + 192, + 243 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 231, + 192, + 243 + ], + "spans": [ + { + "bbox": [ + 143, + 231, + 192, + 243 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 45, + 257, + 290, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 257, + 290, + 498 + ], + "spans": [ + { + "bbox": [ + 45, + 257, + 290, + 498 + ], + "type": "text", + "content": "In recent years there has been enormous interest in vision-language models trained using self-supervised objectives. However, the use of large-scale datasets scraped from the web for training also makes these models vulnerable to potential security threats, such as backdooring and poisoning attacks. In this paper, we propose a method for mitigating such attacks on contrastively trained vision-language models. Our approach leverages external knowledge extracted from a language model to prevent models from learning correlations between image regions which lack strong alignment with external knowledge. We do this by imposing constraints to enforce that attention paid by the model to visual regions is proportional to the alignment of those regions with external knowledge. We conduct extensive experiments using a variety of recent backdooring and poisoning attacks on multiple datasets and architectures. Our results clearly demonstrate that our proposed approach is highly effective at defending against such attacks across multiple settings, while maintaining model utility and without requiring any changes at inference time." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 523, + 128, + 536 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 523, + 128, + 536 + ], + "spans": [ + { + "bbox": [ + 47, + 523, + 128, + 536 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 544, + 287, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 544, + 287, + 640 + ], + "spans": [ + { + "bbox": [ + 46, + 544, + 287, + 640 + ], + "type": "text", + "content": "Recent years have seen enormous interest in vision-language models trained on web-scale image-captioning data using contrastive objectives [25, 36] and text generation objectives [59]. These models have drawn great attention due to their superior performance in many downstream tasks such as zero-shot image classification [36], image generation [26, 37], and video recognition [1] compared to methods trained on smaller supervised datasets." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 642, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 642, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 642, + 288, + 714 + ], + "type": "text", + "content": "Although such image-text foundation models have demonstrated remarkable performance, several recent studies have demonstrated that they are particularly vulnerable to adversarial attacks [24, 55, 57] by introducing a small amount of malicious data (e.g. 75 instances out of 3 million [57]) into the training data. Practically, this can be achieved" + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 313, + 234, + 549, + 376 + ], + "blocks": [ + { + "bbox": [ + 313, + 234, + 549, + 376 + ], + "lines": [ + { + "bbox": [ + 313, + 234, + 549, + 376 + ], + "spans": [ + { + "bbox": [ + 313, + 234, + 549, + 376 + ], + "type": "image", + "image_path": "9fc2ddff11e022fa7cd8f2b0fbae92c212ed2ea4baae6e487ec10a0e545d8670.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 390, + 547, + 468 + ], + "lines": [ + { + "bbox": [ + 305, + 390, + 547, + 468 + ], + "spans": [ + { + "bbox": [ + 305, + 390, + 547, + 468 + ], + "type": "text", + "content": "Figure 1. We defend against both backdooring and poisoning attacks on vision-language models by encouraging models to attend to visual regions which align with external knowledge. Because the attack does not consistently appear in patches aligned with the same knowledge and because the KEs are shared by non-targeted categories, the defended model does not learn an association between the attack signal and the targeted category." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 483, + 547, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 483, + 547, + 604 + ], + "spans": [ + { + "bbox": [ + 304, + 483, + 547, + 604 + ], + "type": "text", + "content": "by inserting imperceptible noise or a backdoor patch into some images, as shown in Fig. 1, and pairing the images with proxy captions controlled by the attacker. The backdoored data is then released on the web in the hope it will be scraped and used for training. Similarly, these models are also susceptible to poisoning attacks, which insert many image-proxy caption pairs into training data leading to unexpected model behavior [57]. Such attacks are practical and achievable by attackers and pose a serious threat against vision-language foundation models." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 605, + 548, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 605, + 548, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 605, + 548, + 715 + ], + "type": "text", + "content": "To defend against such attacks, a number of methods have been proposed. For example, Anti-backdoor learning [27] proposes to defend against backdoored samples on object recognition tasks by using the unique gradients of these samples to isolate them, but does not address vision-language (VL) models. More similar to our work, CleanCLIP [3] proposes a method for defending contrastive VL models against backdoorsing, but does not address nonbackdoored poisoning attacks as we do. While [57] propose" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 494, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 494, + 35 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 494, + 35 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "24820" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "content": "to clean labeled data to mitigate the impact of poisoning, no prior work has proposed a unified defense mechanism for contrastively trained VL models that is effective against both backdoorsing and poisoning attacks." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 122, + 288, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 122, + 288, + 253 + ], + "spans": [ + { + "bbox": [ + 46, + 122, + 288, + 253 + ], + "type": "text", + "content": "To address this urgent need, we propose a defense method for VL models that defends against both backdooring and poisoning attacks. Our method can also be deployed in object recognition settings, by casting it as a text retrieval problem following [36]. Our method is motivated by the following insight. We note that attacks rely on having models learn correlations between a particular visual signal and target. However, these targeted images share lower-level semantic concepts with other, non-targeted categories (See Fig. 1). As a consequence, the attack tends not to affect the model's representation of these concepts." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 256, + 287, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 256, + 287, + 483 + ], + "spans": [ + { + "bbox": [ + 46, + 256, + 287, + 483 + ], + "type": "text", + "content": "Moreover, in the case of backdoorsing, the attack signal is applied to various images whose semantics change in the region on which the attack is applied. For example, in one image the attack may cover a batch associated with paw, while in another image the signal is associated with sharp teeth. Thus, the model fails to learn an association between the attack signal and these lower-level semantics. We refer to these lower-level semantic concepts associated with objects or captions as Knowledge Elements (KEs). KEs consist of semantic attributes (e.g. round), but also subobjects (e.g. paw), and relations. Our defense mechanism aligns with how humans understand semantics of objects or sentences: as collections of semantic units which combine together to form higher-level concepts that are more abstract, compositional and include actions (\"running\") and proto-objects (\"four-legged animal\"). We propose to encourage models to rely more heavily on relevant lower level semantics when producing their representations. As a consequence, our models are much more resistant to attacks." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 486, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 486, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 486, + 287, + 713 + ], + "type": "text", + "content": "Our method works by learning an alignment between image patches from images and a set of KEs associated with each image caption. To discover associated KEs, prior to training our model we prompt a large language model (Vicuna [10]) to list possible KEs for each caption. We next perform contrastive image_caption training, but add several new objectives. First, we enforce an alignment between image patches and KEs using a novel multi-instance learning based constraint, since we do not know which patches go with which KEs. While this aligns image patches and KEs, it does not prevent the model from relying on the attacker's visual signal when computing its representation. Thus, we also propose a second constraint which enforces that the model's attention to patches is proportional to each patch's alignment with a KE. That is, if a patch has a low alignment with all KEs, the patch should have a low effect on the model's representation. Finally, we observe that for attacked samples, the overall patch-KE alignment is much lower. We thus introduce a dynamic per-sample weight term" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 304, + 72, + 545, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 191 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 191 + ], + "type": "text", + "content": "on the contrastive loss based on the overall alignment of the KEs with the image's patches. This has the effect of downweighting the effect of poisoned samples during training. We evaluate our defense method, Semantic Shield, against multiple recent attacks and defenses on multiple datasets. We observe that Semantic Shield significantly outperforms prior defenses across multiple settings. Our defense technique adds very little overhead at train time, while making models significantly more robust to a wide variety of attacks. The major contributions of this paper are as follows:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 306, + 193, + 545, + 323 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 306, + 193, + 545, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 193, + 545, + 239 + ], + "spans": [ + { + "bbox": [ + 306, + 193, + 545, + 239 + ], + "type": "text", + "content": "- We propose an approach, Semantic Shield for defending against backdoorsing and poisoning attacks on contrastively trained vision-language models by enforcing knowledge-guided train-time constraints." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 306, + 240, + 545, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 240, + 545, + 275 + ], + "spans": [ + { + "bbox": [ + 306, + 240, + 545, + 275 + ], + "type": "text", + "content": "- We propose a simple yet effective prompting technique using an open-source language model for extracting constituent knowledge elements for free from any caption." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 276, + 545, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 276, + 545, + 323 + ], + "spans": [ + { + "bbox": [ + 306, + 276, + 545, + 323 + ], + "type": "text", + "content": "- We perform a comprehensive experimental evaluation using a number of recent backdoors and poisoning attacks on two datasets. Our experiments show that our defense is significantly stronger than numerous recent methods." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 335, + 392, + 348 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 335, + 392, + 348 + ], + "spans": [ + { + "bbox": [ + 306, + 335, + 392, + 348 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 356, + 500, + 369 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 356, + 500, + 369 + ], + "spans": [ + { + "bbox": [ + 306, + 356, + 500, + 369 + ], + "type": "text", + "content": "2.1. Vision-language contrastive learning" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 374, + 545, + 626 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 374, + 545, + 626 + ], + "spans": [ + { + "bbox": [ + 304, + 374, + 545, + 626 + ], + "type": "text", + "content": "In recent years, large-scale contrastively trained vision-language foundation models have demonstrated remarkable performance on a number of downstream tasks, even surpassing the performance of supervised models in some cases [25, 36, 59, 61]. While contrastive approaches have been used to align visual and textual embeddings for years [15, 44, 62, 64], recent approaches such as CLIP [36] and ALIGN [21] have demonstrated how training on hundreds of millions of image-caption pairs scraped from the web can yield powerful generalist image-text foundation models which can be applied to many downstream tasks. CLIP-inspired contrastively trained models have found widespread use in many security-critical applications, including navigation [14, 19, 31], healthcare [49, 65], worksite safety [47], disinformation detection [50, 67], and many others [16, 41]. Given their widespread use, it is critical that contrastively trained vision-language models perform in safe and expected ways. Our work adopts the standard two-stream contrastive architecture proposed in [36] and demonstrates how such models can be defended against potential attacks lurking within webly-harvested data." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 635, + 479, + 647 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 635, + 479, + 647 + ], + "spans": [ + { + "bbox": [ + 306, + 635, + 479, + 647 + ], + "type": "text", + "content": "2.2. Poisoning and backdoor attacks" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 654, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 654, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 654, + 545, + 713 + ], + "type": "text", + "content": "Data poisoning attacks [4, 45, 54, 66], which have been proposed in both supervised [23] and unsupervised [6, 22] settings, involve introducing mistrabeled (or misaligned) data into the model's training set. At test time, models behave in unexpected and attacker-influenced ways when presented" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "24821" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 301 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 301 + ], + "type": "text", + "content": "with the poisoned examples seen during training. While targeted poisoning attacks target specific examples introduced during training, backdoor attacks can be applied to any image. Backdoorsing attacks are a type of data poisoning attack where an attacker introduces a spurious signal, such as patches [17, 38] or imperceptible perturbations [12, 13, 33, 34] into an image. Models learn to associate the introduced signal with the targeted concept. While poisoning and backdoor attacks have traditionally targeted supervised learning settings, recent work has shown that contrastively trained vision-language models are particularly vulnerable [7, 63]. [7] show that by introducing as few as 3 out of 3 million samples, an attacker can execute a successful attack. This is a highly practical attack, as an attacker can release large amounts of poisoned data on the internet in the hopes that it will be scraped and later used for training. In our work, we demonstrate that our method is highly effective against a number of recent backdoorsing methods and poisoning attacks on contrastive models." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 308, + 191, + 320 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 308, + 191, + 320 + ], + "spans": [ + { + "bbox": [ + 47, + 308, + 191, + 320 + ], + "type": "text", + "content": "2.3. Defending against attacks" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 327, + 287, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 327, + 287, + 495 + ], + "spans": [ + { + "bbox": [ + 46, + 327, + 287, + 495 + ], + "type": "text", + "content": "Given the large potential risks posed by attacks to models, extensive research has been conducted on approaches for defending models against both poisoning [9, 52] and backdooring [18, 20, 48] attacks. Defenses can be broadly categorized into methods for detecting and removing attacked samples from training [8, 43, 46], those that remove backdoors already learned by models [30, 53, 60], and those that seek to prevent models from learning backdoors by decreasing their effectiveness [2, 27, 35]. Unfortunately, detection-based methods often fail to detect all backdoors and given the particular vulnerability of contrastive models, imperfect filtering could still result in model poisoning. Unlike our approach, model de-poisoning methods often fail to achieve similar performance to clean models [29]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 495, + 288, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 495, + 288, + 615 + ], + "spans": [ + { + "bbox": [ + 46, + 495, + 288, + 615 + ], + "type": "text", + "content": "Of particular relevance to our work are methods aimed at defending against poisoning and backdooring for vision-language contrastive learning [3]. [3] propose to independently realign representations from different modalities. Unlike this approach, our method learns a fine-grained alignment between external knowledge extracted from a large language model and visual regions. These alignments are then used as a penalty to prevent models from attending to non-aligned visual regions. Our method substantially outperforms [3] across all settings." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 627, + 144, + 640 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 627, + 144, + 640 + ], + "spans": [ + { + "bbox": [ + 47, + 627, + 144, + 640 + ], + "type": "text", + "content": "3. Problem setting" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 646, + 134, + 658 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 646, + 134, + 658 + ], + "spans": [ + { + "bbox": [ + 47, + 646, + 134, + 658 + ], + "type": "text", + "content": "3.1. Threat model" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "text", + "content": "Adversary objective. Given a vision-language contrastive learning model " + }, + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "text", + "content": ", an adversary aims to compromise the model by injecting a small amount of poisoned data " + }, + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_p" + }, + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "text", + "content": " into a clean dataset " + }, + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_c" + }, + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "text", + "content": ", both of which constitute the training" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 72, + 545, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 251 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 251 + ], + "type": "text", + "content": "data " + }, + { + "bbox": [ + 304, + 72, + 545, + 251 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 304, + 72, + 545, + 251 + ], + "type": "text", + "content": ". The model trained on the poisoned training data is denoted as " + }, + { + "bbox": [ + 304, + 72, + 545, + 251 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_p" + }, + { + "bbox": [ + 304, + 72, + 545, + 251 + ], + "type": "text", + "content": ". In this paper, we consider two types of attacks: 1) backdooring and 2) poisoning. In a backdoor attack, the adversary overlays either a small patch or some visually imperceptible noise on an image, causing the backdoored image to be misclassified or incorrectly retrieved by a retrieval model. During testing, the adversary cause the model to misclassify or retrieve a specific class by inserting the backdoor into test images. In contrast, in a poisoning attack, the goal is to cause the model " + }, + { + "bbox": [ + 304, + 72, + 545, + 251 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_p" + }, + { + "bbox": [ + 304, + 72, + 545, + 251 + ], + "type": "text", + "content": " to associate a targeted set of text with images of a specified class by inserting many training instances which incorrectly associate visual content with concepts controlled by the adversary. In both cases, the poisoned model is expected to maintain similar utility (performance) compared to the clean model." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 252, + 546, + 419 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 252, + 546, + 419 + ], + "spans": [ + { + "bbox": [ + 304, + 252, + 546, + 419 + ], + "type": "text", + "content": "Adversary capabilities. We consider an adversary capable of injecting a small number of poisonous samples into the training dataset, similar to prior work [5]. In traditional supervised attacks [39, 40], adversaries were required to modify a large amount of the training data - an impractical setting for vision-language models trained on web-scale data. Our setting is more realistic, because achieving a high poisoning rate is improbable when poisoned data is released on the internet with the hope of it being scraped for training. Thus, we focus on the more feasible scenario and assume a relatively low poisoning rate. We assume a black-box setting, where the adversary lacks knowledge of the target model's architecture and hyperparameters. Additionally, the adversary lacks control over the training process." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 425, + 423, + 437 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 425, + 423, + 437 + ], + "spans": [ + { + "bbox": [ + 306, + 425, + 423, + 437 + ], + "type": "text", + "content": "3.2. Attack methodology" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 443, + 545, + 611 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 443, + 545, + 611 + ], + "spans": [ + { + "bbox": [ + 304, + 443, + 545, + 611 + ], + "type": "text", + "content": "Model training. We denote our training data as " + }, + { + "bbox": [ + 304, + 443, + 545, + 611 + ], + "type": "inline_equation", + "content": "(i,t)\\in" + }, + { + "bbox": [ + 304, + 443, + 545, + 611 + ], + "type": "inline_equation", + "content": "\\mathcal{D} = \\mathcal{I}\\times \\mathcal{T}" + }, + { + "bbox": [ + 304, + 443, + 545, + 611 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 304, + 443, + 545, + 611 + ], + "type": "inline_equation", + "content": "\\mathcal{D},\\mathcal{I}" + }, + { + "bbox": [ + 304, + 443, + 545, + 611 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 443, + 545, + 611 + ], + "type": "inline_equation", + "content": "\\mathcal{T}" + }, + { + "bbox": [ + 304, + 443, + 545, + 611 + ], + "type": "text", + "content": " represent the training set, image set, and text set, respectively. Within a collection of " + }, + { + "bbox": [ + 304, + 443, + 545, + 611 + ], + "type": "inline_equation", + "content": "\\mathcal{N}" + }, + { + "bbox": [ + 304, + 443, + 545, + 611 + ], + "type": "text", + "content": " image-text pairs, we identify " + }, + { + "bbox": [ + 304, + 443, + 545, + 611 + ], + "type": "inline_equation", + "content": "(i_j,t_k)" + }, + { + "bbox": [ + 304, + 443, + 545, + 611 + ], + "type": "text", + "content": " as a positive pair if " + }, + { + "bbox": [ + 304, + 443, + 545, + 611 + ], + "type": "inline_equation", + "content": "j = k" + }, + { + "bbox": [ + 304, + 443, + 545, + 611 + ], + "type": "text", + "content": " ; otherwise, it is considered a negative pair. The contrastive learning model concurrently optimizes the image encoder " + }, + { + "bbox": [ + 304, + 443, + 545, + 611 + ], + "type": "inline_equation", + "content": "\\mathcal{E}_i" + }, + { + "bbox": [ + 304, + 443, + 545, + 611 + ], + "type": "text", + "content": " and the text encoder " + }, + { + "bbox": [ + 304, + 443, + 545, + 611 + ], + "type": "inline_equation", + "content": "\\mathcal{E}_t" + }, + { + "bbox": [ + 304, + 443, + 545, + 611 + ], + "type": "text", + "content": " to maximize the similarity between the embeddings of positive pairs in a batch while minimizing that of negative pairs. Specifically, for a given batch of " + }, + { + "bbox": [ + 304, + 443, + 545, + 611 + ], + "type": "inline_equation", + "content": "\\mathcal{N}" + }, + { + "bbox": [ + 304, + 443, + 545, + 611 + ], + "type": "text", + "content": " image-text pairs, we obtain the image embedding " + }, + { + "bbox": [ + 304, + 443, + 545, + 611 + ], + "type": "inline_equation", + "content": "I_{j}^{e} = \\mathcal{E}_{i}(i_{j})" + }, + { + "bbox": [ + 304, + 443, + 545, + 611 + ], + "type": "text", + "content": " and the corresponding text embedding " + }, + { + "bbox": [ + 304, + 443, + 545, + 611 + ], + "type": "inline_equation", + "content": "T_{k}^{e} = \\mathcal{E}_{t}(t_{k})" + }, + { + "bbox": [ + 304, + 443, + 545, + 611 + ], + "type": "text", + "content": " for each pair, normalizing both embeddings using the " + }, + { + "bbox": [ + 304, + 443, + 545, + 611 + ], + "type": "inline_equation", + "content": "L_{2}" + }, + { + "bbox": [ + 304, + 443, + 545, + 611 + ], + "type": "text", + "content": " norm. The cross-modal contrastive loss " + }, + { + "bbox": [ + 304, + 443, + 545, + 611 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{CL}" + }, + { + "bbox": [ + 304, + 443, + 545, + 611 + ], + "type": "text", + "content": " is then computed as follows:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 319, + 615, + 545, + 685 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 615, + 545, + 685 + ], + "spans": [ + { + "bbox": [ + 319, + 615, + 545, + 685 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} _ {C L} = - \\frac {1}{2 \\mathcal {N}} \\left(\\sum_ {j = 1} ^ {\\mathcal {N}} \\log \\frac {\\exp \\left(\\sigma \\left(I _ {j} ^ {e} , T _ {j} ^ {e}\\right) / \\tau\\right)}{\\sum_ {k = 1} ^ {\\mathcal {N}} \\exp \\left(\\sigma \\left(I _ {j} ^ {e} , T _ {k} ^ {e}\\right) / \\tau\\right)} \\right. \\tag {1} \\\\ \\left. + \\sum_ {k = 1} ^ {\\mathcal {N}} \\log \\frac {\\exp \\left(\\sigma \\left(I _ {k} ^ {e} , T _ {k} ^ {e}\\right) / \\tau\\right)}{\\sum_ {j = 1} ^ {\\mathcal {N}} \\exp \\left(\\sigma \\left(I _ {j} ^ {e} , T _ {k} ^ {e}\\right) / \\tau\\right)}\\right) \\\\ \\end{array}", + "image_path": "afd6b9fedb89ae148b459165f99e7298c5f6bbb2a35b1d4b28ee4e417765c993.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 689, + 545, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 689, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 689, + 545, + 714 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 689, + 545, + 714 + ], + "type": "inline_equation", + "content": "\\sigma (.,.)" + }, + { + "bbox": [ + 304, + 689, + 545, + 714 + ], + "type": "text", + "content": " is the product between the image and text embeddings (their similarity) and " + }, + { + "bbox": [ + 304, + 689, + 545, + 714 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 304, + 689, + 545, + 714 + ], + "type": "text", + "content": " denotes the temperature." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "24822" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 289, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 289, + 304 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 289, + 304 + ], + "type": "text", + "content": "Backdoor attack. A successful backdoor attack introduces a trigger into a model so that when the trigger is present in the input image (dog), the model incorrectly associates the image with the specific target class (boat caption) controlled by the attacker. We applied backdoor attacks to poison multimodal contrastive learning models, following the approach in [7]. We consider two types of backdoor attacks: a) overlaying a backdoor trigger, such as a " + }, + { + "bbox": [ + 47, + 72, + 289, + 304 + ], + "type": "inline_equation", + "content": "(16 \\times 16" + }, + { + "bbox": [ + 47, + 72, + 289, + 304 + ], + "type": "text", + "content": " patch), on a small subset of training images, and b) injecting imperceptible noise into a limited subset of images. The latter is considered a stealthy backdoor attack. We classify the BPP [51] and Wanet [33] attacks as stealthy, because they pose a challenge for human identification due to their subtle and imperceptible nature. To perform our backdoor attack, we construct the poisoning dataset " + }, + { + "bbox": [ + 47, + 72, + 289, + 304 + ], + "type": "inline_equation", + "content": "D_{p} = \\{(I_{i} \\oplus \\mathbf{bd}), T_{i}^{y^{\\prime}} : I_{i} \\in D_{subset}\\}" + }, + { + "bbox": [ + 47, + 72, + 289, + 304 + ], + "type": "text", + "content": ", by embedding a backdoor trigger bd (e.g. a " + }, + { + "bbox": [ + 47, + 72, + 289, + 304 + ], + "type": "inline_equation", + "content": "16 \\times 16" + }, + { + "bbox": [ + 47, + 72, + 289, + 304 + ], + "type": "text", + "content": " patch or imperceptible noise) in a small subset of training images, " + }, + { + "bbox": [ + 47, + 72, + 289, + 304 + ], + "type": "inline_equation", + "content": "D_{subset} \\subset D" + }, + { + "bbox": [ + 47, + 72, + 289, + 304 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 47, + 72, + 289, + 304 + ], + "type": "inline_equation", + "content": "T_{i}^{y^{\\prime}} \\in T^{y^{\\prime}}" + }, + { + "bbox": [ + 47, + 72, + 289, + 304 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 47, + 72, + 289, + 304 + ], + "type": "inline_equation", + "content": "y^{\\prime}" + }, + { + "bbox": [ + 47, + 72, + 289, + 304 + ], + "type": "text", + "content": " is target class." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 304, + 288, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 304, + 288, + 425 + ], + "spans": [ + { + "bbox": [ + 47, + 304, + 288, + 425 + ], + "type": "text", + "content": "Single target label attack. In this poisoning attack, an adversary aims to associate images from one class e.g. (dog) with captions from another class e.g. (boat). The attack can be formulated as " + }, + { + "bbox": [ + 47, + 304, + 288, + 425 + ], + "type": "inline_equation", + "content": "(i,t)|i\\in I_{train}^{A},t\\in T_{train}^{B}" + }, + { + "bbox": [ + 47, + 304, + 288, + 425 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 47, + 304, + 288, + 425 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 47, + 304, + 288, + 425 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 304, + 288, + 425 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 47, + 304, + 288, + 425 + ], + "type": "text", + "content": " are the original and the target classes, respectively. Given a caption " + }, + { + "bbox": [ + 47, + 304, + 288, + 425 + ], + "type": "inline_equation", + "content": "t\\in T_{test}^{B}" + }, + { + "bbox": [ + 47, + 304, + 288, + 425 + ], + "type": "text", + "content": ", we expect the model to retrieve images from " + }, + { + "bbox": [ + 47, + 304, + 288, + 425 + ], + "type": "inline_equation", + "content": "I_{test}^{A}" + }, + { + "bbox": [ + 47, + 304, + 288, + 425 + ], + "type": "text", + "content": " as the most relevant. We poison the model to build a strong relationship between images in class " + }, + { + "bbox": [ + 47, + 304, + 288, + 425 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 47, + 304, + 288, + 425 + ], + "type": "text", + "content": " and captions in class " + }, + { + "bbox": [ + 47, + 304, + 288, + 425 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 47, + 304, + 288, + 425 + ], + "type": "text", + "content": ", even if the test images and captions are unseen at training time." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 426, + 289, + 522 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 426, + 289, + 522 + ], + "spans": [ + { + "bbox": [ + 47, + 426, + 289, + 522 + ], + "type": "text", + "content": "Multiple target label attack. An adversary can extend the \"single target label\" attack by poisoning multiple target classes simultaneously, i.e. images from multiple original classes can be mapped to multiple target classes in captions. In this setting, the poisoning goal is defined as " + }, + { + "bbox": [ + 47, + 426, + 289, + 522 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_p = (A_1,B_1),(A_2,B_2),\\dots,(A_n,B_n)" + }, + { + "bbox": [ + 47, + 426, + 289, + 522 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 47, + 426, + 289, + 522 + ], + "type": "inline_equation", + "content": "A_{i}\\in I^{A}" + }, + { + "bbox": [ + 47, + 426, + 289, + 522 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 426, + 289, + 522 + ], + "type": "inline_equation", + "content": "B_{i}\\in T^{B}" + }, + { + "bbox": [ + 47, + 426, + 289, + 522 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 47, + 426, + 289, + 522 + ], + "type": "inline_equation", + "content": "I^A" + }, + { + "bbox": [ + 47, + 426, + 289, + 522 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 426, + 289, + 522 + ], + "type": "inline_equation", + "content": "T^B" + }, + { + "bbox": [ + 47, + 426, + 289, + 522 + ], + "type": "text", + "content": " represent images and captions from classes " + }, + { + "bbox": [ + 47, + 426, + 289, + 522 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 47, + 426, + 289, + 522 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 426, + 289, + 522 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 47, + 426, + 289, + 522 + ], + "type": "text", + "content": " respectively." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 536, + 115, + 551 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 536, + 115, + 551 + ], + "spans": [ + { + "bbox": [ + 47, + 536, + 115, + 551 + ], + "type": "text", + "content": "4. Approach" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 558, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 558, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 47, + 558, + 289, + 715 + ], + "type": "text", + "content": "In this section, we introduce our framework for mitigating backdooring and poisoning attacks on vision-language models. Backdoor attacks on multimodal contrastive learning are effective because models learn a correlation between the backdoor trigger either in a form of patch or imperceptible noise added to the image and the target concept in the paired captions. The core intuition behind our approach stems from human perception, where sets of lower level semantic concepts play a key role in distinguishing objects. See Fig. 1. These semantic concepts consist of semantic attributes (e.g. \"thick fur\", \"rough green texture\"), but also parts of objects (e.g. paws, whiskers). We term these identifiable properties knowledge elements (KEs). Our core intu" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 72, + 547, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 547, + 168 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 547, + 168 + ], + "type": "text", + "content": "ition is that backdoorsing and poisoning attacks are effective because models learn spurious correlations between the visual content and the target label. However, because other non-backed classes also share some of the same KEs, models will not learn an association between the KEs and the spurious visual signal. Thus, we propose to leverage KEs to prevent models from relying on such correlations in their representations." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 305, + 175, + 515, + 188 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 175, + 515, + 188 + ], + "spans": [ + { + "bbox": [ + 305, + 175, + 515, + 188 + ], + "type": "text", + "content": "4.1. Aligning patches to knowledge elements" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 192, + 547, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 192, + 547, + 421 + ], + "spans": [ + { + "bbox": [ + 304, + 192, + 547, + 421 + ], + "type": "text", + "content": "The traditional contrastive learning objective encourages image embedding " + }, + { + "bbox": [ + 304, + 192, + 547, + 421 + ], + "type": "inline_equation", + "content": "\\mathcal{I}_i^e" + }, + { + "bbox": [ + 304, + 192, + 547, + 421 + ], + "type": "text", + "content": " and text embedding " + }, + { + "bbox": [ + 304, + 192, + 547, + 421 + ], + "type": "inline_equation", + "content": "\\mathcal{T}_i^e" + }, + { + "bbox": [ + 304, + 192, + 547, + 421 + ], + "type": "text", + "content": " to be close. However, in addition to this, we enforce that image patch embeddings " + }, + { + "bbox": [ + 304, + 192, + 547, + 421 + ], + "type": "inline_equation", + "content": "\\mathcal{I}_i^{patch}" + }, + { + "bbox": [ + 304, + 192, + 547, + 421 + ], + "type": "text", + "content": " and associated KE embeddings " + }, + { + "bbox": [ + 304, + 192, + 547, + 421 + ], + "type": "inline_equation", + "content": "\\kappa \\mathcal{E}_i^e" + }, + { + "bbox": [ + 304, + 192, + 547, + 421 + ], + "type": "text", + "content": " to also be close. Our key observation is that because backdoor signals are injected in random locations of the image which do not necessarily contain a KE, the similarity between these patches and KE embeddings should be lower compared to others. Even if by chance the area covered by the attack does contain KEs, the affected KEs will not be the same when the attack is performed on a different image, preventing the model from learning an association between the attack perturbation and the KEs. Based on this intuition, our model first learns to align patches and KEs using a contrastive constraint, " + }, + { + "bbox": [ + 304, + 192, + 547, + 421 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{KE}" + }, + { + "bbox": [ + 304, + 192, + 547, + 421 + ], + "type": "text", + "content": ". This learned alignment will later be used to prevent the model from attending to potentially attacked patches. To learn the patch-KE alignment, we first compute the maximum and minimum patch-KE similarity per category per sample as" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 347, + 426, + 545, + 460 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 347, + 426, + 545, + 460 + ], + "spans": [ + { + "bbox": [ + 347, + 426, + 545, + 460 + ], + "type": "interline_equation", + "content": "\\omega_ {i} ^ {c} = \\max _ {q \\in m} \\left(\\sum_ {p = 1} ^ {n} \\sum_ {q = 1} ^ {m} \\mathcal {I} _ {p} ^ {\\text {p a t c h}} \\cdot \\left(\\mathcal {K E} _ {q} ^ {c}\\right) ^ {e}\\right) \\tag {2}", + "image_path": "8c2e59e7120134fab87d159f9e5eee8d221818e4f2d2c8ecfcce9ae248b04494.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 347, + 466, + 545, + 500 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 347, + 466, + 545, + 500 + ], + "spans": [ + { + "bbox": [ + 347, + 466, + 545, + 500 + ], + "type": "interline_equation", + "content": "\\hat {\\omega} _ {i} ^ {c} = \\min _ {q \\in m} \\left(\\sum_ {p = 1} ^ {n} \\sum_ {q = 1} ^ {m} \\mathcal {I} _ {p} ^ {\\text {p a t c h}} \\cdot \\left(\\mathcal {K} \\mathcal {E} _ {q} ^ {c}\\right) ^ {e}\\right) \\tag {3}", + "image_path": "c42e6fb3d4a3bd6a046cdef8cfbb76ee3c06d3cb8fb900055dcc6498605ee74e.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 502, + 547, + 612 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 502, + 547, + 612 + ], + "spans": [ + { + "bbox": [ + 304, + 502, + 547, + 612 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 502, + 547, + 612 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 304, + 502, + 547, + 612 + ], + "type": "text", + "content": " is the number of patches per image, " + }, + { + "bbox": [ + 304, + 502, + 547, + 612 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 304, + 502, + 547, + 612 + ], + "type": "text", + "content": " is the number of KEs per object category, and " + }, + { + "bbox": [ + 304, + 502, + 547, + 612 + ], + "type": "inline_equation", + "content": "c \\in C" + }, + { + "bbox": [ + 304, + 502, + 547, + 612 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 502, + 547, + 612 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 304, + 502, + 547, + 612 + ], + "type": "text", + "content": " is the number of object categories. " + }, + { + "bbox": [ + 304, + 502, + 547, + 612 + ], + "type": "inline_equation", + "content": "(\\mathcal{K}\\mathcal{E}_q^c)^e" + }, + { + "bbox": [ + 304, + 502, + 547, + 612 + ], + "type": "text", + "content": " is the per KE embedding per category. Note that our approach also extends to image-text datasets without any defined object categories or labels. In this case, we treat each image-caption pair as its own \"category\" with a set of knowledge elements and " + }, + { + "bbox": [ + 304, + 502, + 547, + 612 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 304, + 502, + 547, + 612 + ], + "type": "text", + "content": " is the same as the batch size. The objective function for patch-KE similarity is therefore given by" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 347, + 619, + 545, + 687 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 347, + 619, + 545, + 687 + ], + "spans": [ + { + "bbox": [ + 347, + 619, + 545, + 687 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} _ {K E} = - \\frac {1}{2 \\mathcal {N}} \\left(\\sum_ {i = 1} ^ {\\mathcal {N}} \\sum_ {c = 1} ^ {C} y _ {i} ^ {c} \\log \\left(\\sigma \\left(\\omega_ {i} ^ {c}\\right)\\right) \\right. \\tag {4} \\\\ \\left. + \\sum_ {i = 1} ^ {\\mathcal {N}} \\sum_ {c = 1} ^ {C} \\left(1 - y _ {i} ^ {c}\\right) \\log \\left(1 - \\sigma \\left(\\hat {\\omega} _ {i} ^ {c}\\right)\\right)\\right) \\\\ \\end{array}", + "image_path": "3ccde37898f3e7c3d6242b7a1c6edb9f34474736b6f9ff69fc36be01a80132e4.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 689, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 689, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 305, + 689, + 547, + 715 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 689, + 547, + 715 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 305, + 689, + 547, + 715 + ], + "type": "text", + "content": " is the sigmoid function and " + }, + { + "bbox": [ + 305, + 689, + 547, + 715 + ], + "type": "inline_equation", + "content": "y_{i}^{c}" + }, + { + "bbox": [ + 305, + 689, + 547, + 715 + ], + "type": "text", + "content": " is the multi-label ground truth information per sample per category. Note" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "24823" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 50, + 70, + 547, + 349 + ], + "blocks": [ + { + "bbox": [ + 50, + 70, + 547, + 349 + ], + "lines": [ + { + "bbox": [ + 50, + 70, + 547, + 349 + ], + "spans": [ + { + "bbox": [ + 50, + 70, + 547, + 349 + ], + "type": "image", + "image_path": "5a7aed52291d17ac75687621b7a53ae5b4a9d3a20e026caeec006cbc6cea3bcd.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 355, + 547, + 390 + ], + "lines": [ + { + "bbox": [ + 46, + 355, + 547, + 390 + ], + "spans": [ + { + "bbox": [ + 46, + 355, + 547, + 390 + ], + "type": "text", + "content": "Figure 2. Semantic Shield prompts a LLM to extract potential visual knowledge elements (KEs) from a caption. Image patches are aligned with KEs via the patch-KE loss. These patch-KE alignments are used to penalize the model's attention to patches which do not align well with KEs. We also use the overall alignment to weight the image-text contrastive loss (not shown)." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 400, + 288, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 400, + 288, + 483 + ], + "spans": [ + { + "bbox": [ + 46, + 400, + 288, + 483 + ], + "type": "text", + "content": "that, summation over batch is omitted for brevity. In Eq. (2) and Eq. (3) all patches of every image compute their similarity with all KEs from the batch. We perform max/min to select either the best aligned KEs (for paired captions) or worst aligned KEs (for non paired) to prevent false negatives. We thus can fine-tune our model via a linear combination of these two objectives:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 102, + 487, + 287, + 501 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 487, + 287, + 501 + ], + "spans": [ + { + "bbox": [ + 102, + 487, + 287, + 501 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {C L - K E} = \\mu_ {1} \\mathcal {L} _ {C L} + \\mu_ {2} \\mathcal {L} _ {K E} \\tag {5}", + "image_path": "47404af7af2d9d5d702e85844782c313271fd73e28be597545a6d4de972e9a23.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 507, + 287, + 531 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 507, + 287, + 531 + ], + "spans": [ + { + "bbox": [ + 47, + 507, + 287, + 531 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 507, + 287, + 531 + ], + "type": "inline_equation", + "content": "\\mu_1 > 0" + }, + { + "bbox": [ + 47, + 507, + 287, + 531 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 507, + 287, + 531 + ], + "type": "inline_equation", + "content": "\\mu_2 > 0" + }, + { + "bbox": [ + 47, + 507, + 287, + 531 + ], + "type": "text", + "content": " are hyper-parameters controlling the relative strengths of the two objective functions." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 539, + 242, + 552 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 539, + 242, + 552 + ], + "spans": [ + { + "bbox": [ + 47, + 539, + 242, + 552 + ], + "type": "text", + "content": "4.2. Knowledge element-guided attention" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 558, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 558, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 558, + 288, + 715 + ], + "type": "text", + "content": "Next, we observe that the attention mechanism within the vision transformer (ViT) attends to both attacked patches and unaffected patches. This is undesirable because attention paid to attacked patches renders the output embeddings more dependent on the attack signal, and thus more vulnerable. Thus, it is imperative for ViT to allocate reduced attention to attacked patches relative to unaffected patches. Our intuition is that the model should pay more attention to image regions that align well with KEs than patches with low alignment. Thus, we leverage our patch-KE similarity scores to modulate ViT's attention by enforcing a constraint between ViT's attention and the patch-KE similarity scores. Given ViT's query, key, and value denoted as " + }, + { + "bbox": [ + 46, + 558, + 288, + 715 + ], + "type": "inline_equation", + "content": "Q, K, V" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 305, + 400, + 547, + 524 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 400, + 547, + 524 + ], + "spans": [ + { + "bbox": [ + 305, + 400, + 547, + 524 + ], + "type": "text", + "content": "respectively, the attention weight is computed as " + }, + { + "bbox": [ + 305, + 400, + 547, + 524 + ], + "type": "inline_equation", + "content": "\\alpha =" + }, + { + "bbox": [ + 305, + 400, + 547, + 524 + ], + "type": "text", + "content": " softmax " + }, + { + "bbox": [ + 305, + 400, + 547, + 524 + ], + "type": "inline_equation", + "content": "(\\frac{QK^T}{\\sqrt{d_k}})" + }, + { + "bbox": [ + 305, + 400, + 547, + 524 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 305, + 400, + 547, + 524 + ], + "type": "inline_equation", + "content": "d_{k}" + }, + { + "bbox": [ + 305, + 400, + 547, + 524 + ], + "type": "text", + "content": " is the dimensionality of the key vectors. Now, the penalized attention weight can be computed based on the maximum and minimum similarity computed in Eq. (2), Eq. (3) " + }, + { + "bbox": [ + 305, + 400, + 547, + 524 + ], + "type": "inline_equation", + "content": "(\\alpha_i^c)_{max} = \\alpha_i^c\\cdot \\omega_i^c" + }, + { + "bbox": [ + 305, + 400, + 547, + 524 + ], + "type": "inline_equation", + "content": "(\\alpha_{i}^{c})_{min} =" + }, + { + "bbox": [ + 305, + 400, + 547, + 524 + ], + "type": "inline_equation", + "content": "\\alpha_{i}^{c}\\cdot \\hat{\\omega}_{i}^{c}" + }, + { + "bbox": [ + 305, + 400, + 547, + 524 + ], + "type": "text", + "content": " Since the similarity scores between a targeted visual region and KE are less compared to unaffected patch and KE, ViT pays less attention to attacked patches. The resulting objective function which penalizes attention values which deviate from the patch-KE similarity scores is:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 318, + 531, + 547, + 599 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 531, + 547, + 599 + ], + "spans": [ + { + "bbox": [ + 318, + 531, + 547, + 599 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} _ {\\text {A t t e n t i o n}} = - \\frac {1}{2 \\mathcal {N}} \\left(\\sum_ {i = 1} ^ {\\mathcal {N}} \\sum_ {c = 1} ^ {C} \\left(\\alpha_ {i} ^ {c}\\right) \\log \\left(\\sigma \\left(\\alpha_ {i} ^ {c}\\right) _ {\\max }\\right) \\right. \\tag {6} \\\\ \\left. + \\sum_ {i = 1} ^ {\\mathcal {N}} \\sum_ {c = 1} ^ {C} \\left(1 - \\alpha_ {i} ^ {c}\\right) \\log \\left(1 - \\sigma \\left(\\alpha_ {i} ^ {c}\\right) _ {\\min }\\right)\\right) \\\\ \\end{array}", + "image_path": "4832dba3d227bbf06f82c830f3543e8d8bc0bc66ead21d65e2bc0e0915c68e99.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 318, + 606, + 440, + 618 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 606, + 440, + 618 + ], + "spans": [ + { + "bbox": [ + 318, + 606, + 440, + 618 + ], + "type": "text", + "content": "The training objective is then:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 339, + 627, + 545, + 639 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 339, + 627, + 545, + 639 + ], + "spans": [ + { + "bbox": [ + 339, + 627, + 545, + 639 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {C L - A t t e n t i o n} = \\mu_ {1} \\mathcal {L} _ {C L} + \\mu_ {2} \\mathcal {L} _ {A t t e n t i o n} \\tag {7}", + "image_path": "35352a39dbce260a47542f42c23d79c98407e21df33c3aea501c0345981cc39c.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 647, + 539, + 660 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 647, + 539, + 660 + ], + "spans": [ + { + "bbox": [ + 306, + 647, + 539, + 660 + ], + "type": "text", + "content": "4.3. Knowledge element weighted contrastive loss" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 665, + 547, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 547, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 547, + 714 + ], + "type": "text", + "content": "Note that during the fine-tuning process of Eq. (5) and Eq. (7), the contrastive learning objective Eq. (1), seeks to align representations from each modality which has the effect of pulling attacked images and captions closer in" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "24824" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 209 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 209 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 209 + ], + "type": "text", + "content": "the embedding space. Therefore, we introduce a dynamic weighting function which weights each sample in the contrastive objective function. Our intuition is that attacked samples will have lower similarity scores between image patches and KEs, since the attack does not explicit target the KEs. Thus, we penalize the contrastive objective for each sample with the average similarity score, so that the contrastive objective is downweighted for attacked samples compared to benign samples. We compute the maximum similarity scores per sample across categories following Eq. (2), where " + }, + { + "bbox": [ + 46, + 72, + 289, + 209 + ], + "type": "inline_equation", + "content": "\\lambda_{i} = \\max_{c\\in C}\\omega_{i}^{c}, i\\in \\mathcal{N}, \\mu_{1}, \\mu_{2} = 1" + }, + { + "bbox": [ + 46, + 72, + 289, + 209 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 51, + 213, + 286, + 274 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 213, + 286, + 274 + ], + "spans": [ + { + "bbox": [ + 51, + 213, + 286, + 274 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {C L _ {i}} = \\underbrace {\\frac {\\exp \\left(\\frac {\\sigma \\left(I _ {i} ^ {e} , T _ {i} ^ {e}\\right)}{\\tau}\\right)}{\\sum_ {k = 1} ^ {\\mathcal {N}} \\exp \\left(\\frac {\\sigma \\left(I _ {i} ^ {e} , T _ {k} ^ {e}\\right)}{\\tau}\\right)}} _ {\\text {c o n t r a s t i n g} i ^ {t h} \\text {i m a g e w i t h t e x t s}} + \\underbrace {\\sum_ {k = 1} ^ {\\mathcal {N}} \\log \\frac {\\exp \\left(\\frac {\\sigma \\left(I _ {k} ^ {e} , T _ {k} ^ {e}\\right)}{\\tau}\\right)}{\\exp \\left(\\frac {\\sigma \\left(I _ {i} ^ {e} , T _ {k} ^ {e}\\right)}{\\tau}\\right)}} _ {\\text {c o n t r a s t i n g t e x t s w i t h} i ^ {t h} \\text {i m a g e}} \\tag {8}", + "image_path": "1be0737beeb9f434fb4a192b7240d45392b45a5ddb26438a71ffb5f6b38175fb.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 96, + 269, + 287, + 300 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 269, + 287, + 300 + ], + "spans": [ + { + "bbox": [ + 96, + 269, + 287, + 300 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {W e i g h t e d C L}} = - \\frac {1}{2 \\mathcal {N}} \\sum_ {i = 1} ^ {2 \\mathcal {N}} \\lambda_ {i} \\mathcal {L} _ {C L _ {i}} \\tag {8}", + "image_path": "8e55ec69544f7a29c70c083bc64caed0cd64618270869d620252976fbbafe6a0.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 304, + 287, + 318 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 304, + 287, + 318 + ], + "spans": [ + { + "bbox": [ + 47, + 304, + 287, + 318 + ], + "type": "text", + "content": "Our final objective is likewise given by linear combination:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 323, + 287, + 347 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 323, + 287, + 347 + ], + "spans": [ + { + "bbox": [ + 47, + 323, + 287, + 347 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {W e i g h t e d C L - A t t e n t i o n}} = \\mu_ {1} \\mathcal {L} _ {\\text {W e i g h t e d C L}} + \\mu_ {2} \\mathcal {L} _ {\\text {A t t e n t i o n}} \\tag {10}", + "image_path": "7ba6e97e1572619c162ab4504e1711de88e26d65f6185c8c2b5e65a570fa0990.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 347, + 240, + 360 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 347, + 240, + 360 + ], + "spans": [ + { + "bbox": [ + 47, + 347, + 240, + 360 + ], + "type": "text", + "content": "4.4. Knowledge element (KE) generation" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 365, + 289, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 365, + 289, + 606 + ], + "spans": [ + { + "bbox": [ + 46, + 365, + 289, + 606 + ], + "type": "text", + "content": "Our approach requires external knowledge about each image in addition to a paired caption. For example, a caption of dog image might be \"A dog is running in the park\". In this case, suitable knowledge elements might be paws, sharp nails, furry animal, trees. We follow in context learning approach by prompting a large language model (Vicuna [10]) for generating KEs for each image. Note that the KEs are generated purely from the caption or object label and thus are only potentially relevant to the image. Our approach accounts for this by generating 25 KEs per caption/category. Then, we take the top 5 KEs per caption based on the similarity scores between image and generated KEs. For COCO [28], we prompt Vicuna with What are useful visual features for distinguishing a category name in a photo?. Since COCO has 80 categories we choose this prompt following [32]. For Flickr30k [58], we design prompts that generate KEs for each caption, since we do not have any predefined object classes. Additional details are included in our supplementary." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 616, + 128, + 628 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 616, + 128, + 628 + ], + "spans": [ + { + "bbox": [ + 47, + 616, + 128, + 628 + ], + "type": "text", + "content": "5. Experiments" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 635, + 163, + 647 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 635, + 163, + 647 + ], + "spans": [ + { + "bbox": [ + 47, + 635, + 163, + 647 + ], + "type": "text", + "content": "5.1. Experimental Setup" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 653, + 287, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 653, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 653, + 287, + 715 + ], + "type": "text", + "content": "Models and datasets. We follow [7]'s setting by attacking CLIP-like models [36]. We adopt ViT-B/16 as image encoder, pretrained on ImageNet-21k [42] and fine-tuned on ImageNet-1k. As a text encoder, we adopt a BERT-style [11] encoder following [36]. We cap the max sequence" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "type": "text", + "content": "length of text to 100. We use AdamW with weight decay using a cosine scheduler from " + }, + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "type": "inline_equation", + "content": "10^{-4}" + }, + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "type": "text", + "content": " with decay rate 0.2. We train for 30 epochs with a batch size of 128 on the COCO [28] and Fickr30k [58] datasets. While COCO has 80 defined object categories, Flickr30k has no label information. Additional details are included in supplementary." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 144, + 546, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 144, + 546, + 228 + ], + "spans": [ + { + "bbox": [ + 304, + 144, + 546, + 228 + ], + "type": "text", + "content": "Backdoor settings. We tested out defense against three recent backdoor attacks. To do so, we couple backdoored samples with a caption mentioning the target class. Adversaries only require a very small amount of poisoned samples for poisoning contrastive models (e.g., CLIP) [7]. Following this, we inject a very small amount of poisoned samples " + }, + { + "bbox": [ + 304, + 144, + 546, + 228 + ], + "type": "inline_equation", + "content": "(0.01\\%" + }, + { + "bbox": [ + 304, + 144, + 546, + 228 + ], + "type": "text", + "content": " of the train dataset for both COCO and Flickr30k)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 229, + 547, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 229, + 547, + 373 + ], + "spans": [ + { + "bbox": [ + 304, + 229, + 547, + 373 + ], + "type": "text", + "content": "Poisoning settings. We performed two types of poisoning attacks following [56]. For single target label attack, the poisoning goal is dog2boat for both Flickr30k and COCO. We evaluate them on test samples that are unseen in the training process. For example, we take an clean image of dog and associate it with a proxy caption of boat. The poisoning rate for this attack is " + }, + { + "bbox": [ + 304, + 229, + 547, + 373 + ], + "type": "inline_equation", + "content": "0.065\\%" + }, + { + "bbox": [ + 304, + 229, + 547, + 373 + ], + "type": "text", + "content": " for Flickr30k and " + }, + { + "bbox": [ + 304, + 229, + 547, + 373 + ], + "type": "inline_equation", + "content": "0.24\\%" + }, + { + "bbox": [ + 304, + 229, + 547, + 373 + ], + "type": "text", + "content": " for COCO. For the multi-target label attack, we take two classes. The poisoning goals are dog2boat and train2zebra for COCO. For Flickr30k, the poisoning goals are dog2boat and bird2sofa. The poisoning rate for COCO and Flickr30k are " + }, + { + "bbox": [ + 304, + 229, + 547, + 373 + ], + "type": "inline_equation", + "content": "0.52\\%" + }, + { + "bbox": [ + 304, + 229, + 547, + 373 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 229, + 547, + 373 + ], + "type": "inline_equation", + "content": "0.34\\%" + }, + { + "bbox": [ + 304, + 229, + 547, + 373 + ], + "type": "text", + "content": " respectively." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 381, + 429, + 394 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 381, + 429, + 394 + ], + "spans": [ + { + "bbox": [ + 306, + 381, + 429, + 394 + ], + "type": "text", + "content": "5.2. Experimental Results" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 401, + 545, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 401, + 545, + 509 + ], + "spans": [ + { + "bbox": [ + 304, + 401, + 545, + 509 + ], + "type": "text", + "content": "Backdoor Attack. In Tab. 1, we compared ablations of our method " + }, + { + "bbox": [ + 304, + 401, + 545, + 509 + ], + "type": "inline_equation", + "content": "(\\mathrm{CL} + \\mathrm{KE}, \\mathrm{CL} + \\mathrm{Attention})" + }, + { + "bbox": [ + 304, + 401, + 545, + 509 + ], + "type": "text", + "content": " with other baselines e.g. Cleanlip [3], Anti-Backdoor Learning (ABL) [27]. Finally, our model Semantic Shield (Weighted CL + Attention), outperforms all baselines with significant margins. Note that, at test time, we used 100 backdoor images (patch, BPP, Wanet) for the text retrieval task. At test time, our model retrieves no caption associated with poisoned categories for any backdoored image on Flickr30k." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 510, + 545, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 510, + 545, + 617 + ], + "spans": [ + { + "bbox": [ + 304, + 510, + 545, + 617 + ], + "type": "text", + "content": "Poisoning Attack. Similarly, to the above, at test time, we use 100 poisoned images for both single and multi-target settings for both datasets. Our model outperforms all existing work significantly with large margins, particularly on the multi-target label setting. We observe that the unweighted version of our approach slightly outperforms Semantic Shield for dog2boat at Hit@1, but Semantic Shield significantly outperforms for Hit@5 and Hit@10, suggesting significantly reduced poisoning overall." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 617, + 545, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 617, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 617, + 545, + 714 + ], + "type": "text", + "content": "Utility evaluation. We evaluate model utility for image-capture retrieval. Tab. 4 shows the performance (Recall@10) of the poisoned model on each attack type as well as the clean model on the test data. We observe that the utility of the poisoned model is at the same level or slightly less than the clean model e.g. BPP in COCO dataset. This implies that despite being trained on poisoned data, models maintain their performance. We show the model utility" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "24825" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 50, + 70, + 547, + 232 + ], + "blocks": [ + { + "bbox": [ + 50, + 70, + 547, + 232 + ], + "lines": [ + { + "bbox": [ + 50, + 70, + 547, + 232 + ], + "spans": [ + { + "bbox": [ + 50, + 70, + 547, + 232 + ], + "type": "table", + "html": "
DatasetModelsBackdoor PatchBackdoor BPPBackdoor Wanet
Hit@1 ↓Hit@5↓Hit@10↓Hit@1 ↓Hit@5↓Hit@10↓Hit@1 ↓Hit@5↓Hit@10↓
COCOCL (No Defense)90.6694.6095.43100.0100.0100.0100.0100.0100.0
CL+ ABL [27]6.238.1212.2115.3516.6816.21100.0100.0100.0
CL+ CleanClip [3]5.3512.6817.8936.1250.0955.198.2316.3223.73
CL + KE9.015.3121.9025.3947.9850.1212.2156.7988.38
CL + Attention4.205.126.010.05.2636.210.02.107.20
Weighted CL + Attention0.91.221.570.00.00.00.00.00.0
Flickr30kCL (No Defense)91.9797.6398.21100.0100.0100.0100.0100.0100.0
CL+ ABL [27]4.672.214.0610.3417.9821.1398.2199.23100.0
CL+ CleanClip [3]2.203.325.0512.4324.3231.2513.2923.1329.21
CL + KE16.1033.1541.0913.1436.5456.2723.3641.2147.43
CL + Attention1.203.123.010.07.2423.170.012.0114.07
Weighted CL + Attention0.00.00.00.00.00.00.00.00.0
", + "image_path": "2f51355e41a18a51b61d9a6ffb8c0451c21328d0779e7fc3e2bc950342ecd225.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 50, + 267, + 545, + 449 + ], + "blocks": [ + { + "bbox": [ + 47, + 239, + 546, + 262 + ], + "lines": [ + { + "bbox": [ + 47, + 239, + 546, + 262 + ], + "spans": [ + { + "bbox": [ + 47, + 239, + 546, + 262 + ], + "type": "text", + "content": "Table 1. Backdoor attack and defense performance with baselines. The first row of the table shows an undefended model while other rows are baselines or variants of our method. CL+ KE, CL+ Attention are our baselines. The best results are shown in bold." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 50, + 267, + 545, + 449 + ], + "lines": [ + { + "bbox": [ + 50, + 267, + 545, + 449 + ], + "spans": [ + { + "bbox": [ + 50, + 267, + 545, + 449 + ], + "type": "table", + "html": "
DatasetModelsSingle Target LabelMultiple Target Label
dog2boatdog2boattrain2zebra
Hit@1 ↓Hit@5↓Hit@10↓Hit@1 ↓Hit@5↓Hit@10↓Hit@1 ↓Hit@5↓Hit@10↓
COCOCL (No Defense)18.057.2082.077.1299.2399.5655.3295.7697.98
CL+ CleanClip [3]3.393.955.6557.6963.089.1769.4971.7589.17
CL + KE4.565.325.9554.4564.2185.5265.1270.9286.12
CL + Attention0.563.384.510.6365.6069.422.256.7712.99
Weighted CL + Attention0.041.122.542.235.216.450.00.00.0
dog2boatdog2boatbird2sofa
Hit@1 ↓Hit@5↓Hit@10↓Hit@1 ↓Hit@5↓Hit@10↓Hit@1 ↓Hit@5↓Hit@10↓
Flickr30kCL (No Defense)29.057.2082.2328.1282.3993.7655.3290.62100.0
CL+ CleanClip [3]8.2731.5136.6121.6961.2788.7522.4264.1189.51
CL + KE7.3428.0932.2121.1245.3247.6712.7742.3454.21
CL + Attention4.5621.8134.111.6316.7029.213.2518.4332.22
Weighted CL + Attention0.321.212.541.784.565.670.00.00.0
", + "image_path": "b03ebc54bedb725ef3578f4edd88200e1cb978cccb5f2e2c14ae8e3853874c9b.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 456, + 546, + 480 + ], + "lines": [ + { + "bbox": [ + 46, + 456, + 546, + 480 + ], + "spans": [ + { + "bbox": [ + 46, + 456, + 546, + 480 + ], + "type": "text", + "content": "Table 2. Poisoning attack and defense performance with baselines. First row of the table shows how good the attack, and other rows are baselines along with our proposed models. CL + KE, CL + Attention are our baselines. The best results are highlighted." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 46, + 490, + 288, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 490, + 288, + 562 + ], + "spans": [ + { + "bbox": [ + 46, + 490, + 288, + 562 + ], + "type": "text", + "content": "after being defended with Semantic Shield and its variants (CL + KE, CL + Attention, weighted CL + Attention) in Tab. 3. We largely observe a similar utility compared to the models from Tab. 4. On the Flickr30k dataset, single target or multiple target attack scenario, for TR task, the utility is slightly less than the clean model (Tab. 4, Tab. 3)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 573, + 115, + 585 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 573, + 115, + 585 + ], + "spans": [ + { + "bbox": [ + 47, + 573, + 115, + 585 + ], + "type": "text", + "content": "5.3. Ablations" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 593, + 287, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 593, + 287, + 687 + ], + "spans": [ + { + "bbox": [ + 46, + 593, + 287, + 687 + ], + "type": "text", + "content": "Poisoning rate. We compare the performance of poisoning attacks at different poisoning rates on three backdoor attacks. We conduct these attacks against the victim model with four different poisoning rates (0.001 to " + }, + { + "bbox": [ + 46, + 593, + 287, + 687 + ], + "type": "inline_equation", + "content": "0.01\\%" + }, + { + "bbox": [ + 46, + 593, + 287, + 687 + ], + "type": "text", + "content": ") on the COCO dataset (Fig. 3). We observe that attack performance significantly improves with increased poisoning rate, even though the rate is quite low, which demonstrates the vulnerability of contrastively trained VL models to attacks." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 689, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 288, + 715 + ], + "type": "text", + "content": "Fine-tuning epoch. In Fig. 4 we use the max poisoning rate " + }, + { + "bbox": [ + 47, + 689, + 288, + 715 + ], + "type": "inline_equation", + "content": "(0.01\\%)" + }, + { + "bbox": [ + 47, + 689, + 288, + 715 + ], + "type": "text", + "content": " from Fig. 3 to illustrate Semantic Shield's per" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 317, + 490, + 389, + 545 + ], + "blocks": [ + { + "bbox": [ + 317, + 490, + 389, + 545 + ], + "lines": [ + { + "bbox": [ + 317, + 490, + 389, + 545 + ], + "spans": [ + { + "bbox": [ + 317, + 490, + 389, + 545 + ], + "type": "image", + "image_path": "3d32224a2f17e685dc093f08b1eccd84b7a7a95b222b2e63c5356f0865f6076e.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 321, + 548, + 385, + 559 + ], + "lines": [ + { + "bbox": [ + 321, + 548, + 385, + 559 + ], + "spans": [ + { + "bbox": [ + 321, + 548, + 385, + 559 + ], + "type": "text", + "content": "(a) Backdoor patch" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 390, + 490, + 462, + 544 + ], + "blocks": [ + { + "bbox": [ + 390, + 490, + 462, + 544 + ], + "lines": [ + { + "bbox": [ + 390, + 490, + 462, + 544 + ], + "spans": [ + { + "bbox": [ + 390, + 490, + 462, + 544 + ], + "type": "image", + "image_path": "92dbd3fcea48cced79ad97e4309c56216f07c39391cf5f5c29c07440aec07974.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 413, + 548, + 440, + 558 + ], + "lines": [ + { + "bbox": [ + 413, + 548, + 440, + 558 + ], + "spans": [ + { + "bbox": [ + 413, + 548, + 440, + 558 + ], + "type": "text", + "content": "(b) BPP" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 463, + 490, + 536, + 544 + ], + "blocks": [ + { + "bbox": [ + 463, + 490, + 536, + 544 + ], + "lines": [ + { + "bbox": [ + 463, + 490, + 536, + 544 + ], + "spans": [ + { + "bbox": [ + 463, + 490, + 536, + 544 + ], + "type": "image", + "image_path": "46680ffaf8b6ea96e9e7ee3f076109486d997687f743bc237d1a66aaff49bbb5.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 484, + 548, + 516, + 558 + ], + "lines": [ + { + "bbox": [ + 484, + 548, + 516, + 558 + ], + "spans": [ + { + "bbox": [ + 484, + 548, + 516, + 558 + ], + "type": "text", + "content": "(c) Wanet" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 317, + 563, + 389, + 619 + ], + "blocks": [ + { + "bbox": [ + 317, + 563, + 389, + 619 + ], + "lines": [ + { + "bbox": [ + 317, + 563, + 389, + 619 + ], + "spans": [ + { + "bbox": [ + 317, + 563, + 389, + 619 + ], + "type": "image", + "image_path": "7536f4d417b37713e100acc814d15ec05466655ad0481b0113d79d0e9e8a30e1.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 321, + 622, + 384, + 632 + ], + "lines": [ + { + "bbox": [ + 321, + 622, + 384, + 632 + ], + "spans": [ + { + "bbox": [ + 321, + 622, + 384, + 632 + ], + "type": "text", + "content": "(a) Backdoor patch" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 390, + 563, + 462, + 619 + ], + "blocks": [ + { + "bbox": [ + 390, + 563, + 462, + 619 + ], + "lines": [ + { + "bbox": [ + 390, + 563, + 462, + 619 + ], + "spans": [ + { + "bbox": [ + 390, + 563, + 462, + 619 + ], + "type": "image", + "image_path": "23feab32acf5c1410d5b36b2ebb5667e83cdc6dcf3a02599e625d21974dbe093.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 413, + 622, + 440, + 631 + ], + "lines": [ + { + "bbox": [ + 413, + 622, + 440, + 631 + ], + "spans": [ + { + "bbox": [ + 413, + 622, + 440, + 631 + ], + "type": "text", + "content": "(b) BPP" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 463, + 563, + 535, + 619 + ], + "blocks": [ + { + "bbox": [ + 463, + 563, + 535, + 619 + ], + "lines": [ + { + "bbox": [ + 463, + 563, + 535, + 619 + ], + "spans": [ + { + "bbox": [ + 463, + 563, + 535, + 619 + ], + "type": "image", + "image_path": "ba244084e2868daea1a07ca0ff6eae29e4322ef88820bb4378cbaa0940b4014e.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 484, + 622, + 516, + 631 + ], + "lines": [ + { + "bbox": [ + 484, + 622, + 516, + 631 + ], + "spans": [ + { + "bbox": [ + 484, + 622, + 516, + 631 + ], + "type": "text", + "content": "(c) Wanet" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 324, + 642, + 527, + 653 + ], + "lines": [ + { + "bbox": [ + 324, + 642, + 527, + 653 + ], + "spans": [ + { + "bbox": [ + 324, + 642, + 527, + 653 + ], + "type": "text", + "content": "Figure 4. Hit@k vs training epoch for Semantic Shield." + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "bbox": [ + 305, + 665, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 665, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 305, + 665, + 547, + 715 + ], + "type": "text", + "content": "formance at different epochs on the same backdoored samples. We notice that Hit@k gradually reduces for all three attacks, demonstrating the increasing effectiveness of Semantic Shield's defense with increased training." + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "type": "text", + "content": "24826" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 50, + 70, + 544, + 283 + ], + "blocks": [ + { + "bbox": [ + 50, + 70, + 544, + 283 + ], + "lines": [ + { + "bbox": [ + 50, + 70, + 544, + 283 + ], + "spans": [ + { + "bbox": [ + 50, + 70, + 544, + 283 + ], + "type": "table", + "html": "
DatasetTaskModelsBackdoor PatchBPPWanetSingle Target LabelMultiple Target Label
COCOIRCL74.9973.9474.5474.6874.72
CL + KE74.1570.774.074.2473.28
CL + Attention74.3873.1374.4375.7075.13
Weighted CL + Attention74.2274.5674.2373.4673.51
COCOTRCL81.5877.4478.7480.1681.12
CL + KE78.4075.5477.8679.0881.20
CL + Attention79.2077.3678.0480.0581.06
Weighted CL + Attention79.4677.7878.4579.6780.0
Flickr30kIRCL59.1359.8661.0860.9257.41
CL + KE60.3461.8561.1358.1258.18
CL + Attention61.3255.9659.1458.9758.16
Weighted CL + Attention61.0756.3260.1659.7658.78
Flickr30kTRCL68.0768.7969.8671.0668.14
CL + KE69.6770.6569.6266.9862.20
CL + Attention70.064.4668.068.1362.97
Weighted CL + Attention70.2365.6668.8768.4562.12
", + "image_path": "574afbc300cf04f3721def56ce2c6f8123522b67f5bd32141825deddece5f022.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 50, + 312, + 285, + 369 + ], + "blocks": [ + { + "bbox": [ + 78, + 291, + 515, + 302 + ], + "lines": [ + { + "bbox": [ + 78, + 291, + 515, + 302 + ], + "spans": [ + { + "bbox": [ + 78, + 291, + 515, + 302 + ], + "type": "text", + "content": "Table 3. Model utility of defended models (Recall@10). The model utilities are comparable to the performance in Tab. 4" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 50, + 312, + 285, + 369 + ], + "lines": [ + { + "bbox": [ + 50, + 312, + 285, + 369 + ], + "spans": [ + { + "bbox": [ + 50, + 312, + 285, + 369 + ], + "type": "table", + "html": "
DatasetTaskCleanBackPatBPPWanetSingTLMultTL
COCOIR75.1374.9973.9474.5474.6874.72
TR80.6281.5877.4478.7480.1681.12
Flickr30kIR59.6859.1359.8661.0860.9257.41
TR68.3768.0768.7969.8671.0668.14
", + "image_path": "a3612219d55d7ee674ec649be37c3a5d976aacd196ab7898af2f40ddc12fb2a4.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 377, + 287, + 400 + ], + "lines": [ + { + "bbox": [ + 47, + 377, + 287, + 400 + ], + "spans": [ + { + "bbox": [ + 47, + 377, + 287, + 400 + ], + "type": "text", + "content": "Table 4. Model utility between clean model and other backdoored/poisoned models (CL) (Recall@10). Similar to Tab. 3." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 47, + 410, + 163, + 423 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 410, + 163, + 423 + ], + "spans": [ + { + "bbox": [ + 47, + 410, + 163, + 423 + ], + "type": "text", + "content": "6. Qualitative analysis" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 430, + 287, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 430, + 287, + 562 + ], + "spans": [ + { + "bbox": [ + 46, + 430, + 287, + 562 + ], + "type": "text", + "content": "In Fig. 5, we present the contrast between a model defended by Semantic Shield and an undefended model's attention map. Fig. 5b shows that poisoned model pays attention to the patch (bottom right corner). In contrast, the defended model Fig. 5c does not pay any attention to the patch. Next, in Fig. 5d and Fig. 5g two imperceptible noises are injected e.g. BPP, Wanet. We wanted to see what happens if we inject the noise randomly throughout the entire images. Poisoned models in Fig. 5e and Fig. 5h show spurious visual signals all over the image. However, our proposed models filters out the noisy signals and defends against poisoning." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 572, + 119, + 585 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 572, + 119, + 585 + ], + "spans": [ + { + "bbox": [ + 47, + 572, + 119, + 585 + ], + "type": "text", + "content": "7. Conclusion" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 592, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 592, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 592, + 287, + 713 + ], + "type": "text", + "content": "In this paper, we introduced Semantic Shield, an approach for defending against attacks on contrastively trained VL models. Our approach works by leveraging external knowledge to guide the model's attention to non-attacked visual regions and samples. We evaluated Semantic Shield against recent backdoorsing and poisoning attacks and defenses on two benchmarks. Our experiments show that Semantic Shield substantially outperforms existing defenses across all settings. In future work, we will explore a tighter integration of the LLM using prompting by dynamically producing KEs online based on the de" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 317, + 312, + 389, + 360 + ], + "blocks": [ + { + "bbox": [ + 317, + 312, + 389, + 360 + ], + "lines": [ + { + "bbox": [ + 317, + 312, + 389, + 360 + ], + "spans": [ + { + "bbox": [ + 317, + 312, + 389, + 360 + ], + "type": "image", + "image_path": "8be5619ee58cda38e3dafbfd2aacb08ea685b22d615436b9b555bd9b6be599ef.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 316, + 361, + 462, + 390 + ], + "lines": [ + { + "bbox": [ + 316, + 361, + 462, + 390 + ], + "spans": [ + { + "bbox": [ + 316, + 361, + 462, + 390 + ], + "type": "text", + "content": "(a) Backdoor image (b) Attention map for with patch bottom poisoned model right corner" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 390, + 312, + 462, + 360 + ], + "blocks": [ + { + "bbox": [ + 390, + 312, + 462, + 360 + ], + "lines": [ + { + "bbox": [ + 390, + 312, + 462, + 360 + ], + "spans": [ + { + "bbox": [ + 390, + 312, + 462, + 360 + ], + "type": "image", + "image_path": "31bac6cfdc3c9cd0626f286cf112ae37e748250f06c40fe7d7b05d221a748bf5.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 463, + 361, + 536, + 380 + ], + "lines": [ + { + "bbox": [ + 463, + 361, + 536, + 380 + ], + "spans": [ + { + "bbox": [ + 463, + 361, + 536, + 380 + ], + "type": "text", + "content": "(c) Attention map for best model" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 390, + 361, + 462, + 381 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 390, + 361, + 462, + 381 + ], + "spans": [ + { + "bbox": [ + 390, + 361, + 462, + 381 + ], + "type": "text", + "content": "(b) Attention map for poisoned model" + } + ] + } + ], + "index": 11, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 463, + 312, + 536, + 360 + ], + "blocks": [ + { + "bbox": [ + 463, + 312, + 536, + 360 + ], + "lines": [ + { + "bbox": [ + 463, + 312, + 536, + 360 + ], + "spans": [ + { + "bbox": [ + 463, + 312, + 536, + 360 + ], + "type": "image", + "image_path": "58719e3fe72244da814b7a548835d094ab6b6cdb26a51259eb50cfcc66bd4ca6.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 317, + 392, + 389, + 438 + ], + "blocks": [ + { + "bbox": [ + 317, + 392, + 389, + 438 + ], + "lines": [ + { + "bbox": [ + 317, + 392, + 389, + 438 + ], + "spans": [ + { + "bbox": [ + 317, + 392, + 389, + 438 + ], + "type": "image", + "image_path": "96dea49f53412598ec18853250923f4d03213e425a3bb8aabca0ddaf92e4c6f2.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 316, + 441, + 462, + 468 + ], + "lines": [ + { + "bbox": [ + 316, + 441, + 462, + 468 + ], + "spans": [ + { + "bbox": [ + 316, + 441, + 462, + 468 + ], + "type": "text", + "content": "(d) Backdoor image (e) Attention map for with imperceptible poisoned model noise:BPP" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 390, + 392, + 462, + 438 + ], + "blocks": [ + { + "bbox": [ + 390, + 392, + 462, + 438 + ], + "lines": [ + { + "bbox": [ + 390, + 392, + 462, + 438 + ], + "spans": [ + { + "bbox": [ + 390, + 392, + 462, + 438 + ], + "type": "image", + "image_path": "81a949309702b253b548e673c7ebff5a6176a9ff65c7764ab11b0d179978edc4.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 463, + 441, + 536, + 460 + ], + "lines": [ + { + "bbox": [ + 463, + 441, + 536, + 460 + ], + "spans": [ + { + "bbox": [ + 463, + 441, + 536, + 460 + ], + "type": "text", + "content": "(f) Attention map for best model" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "bbox": [ + 390, + 441, + 462, + 460 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 390, + 441, + 462, + 460 + ], + "spans": [ + { + "bbox": [ + 390, + 441, + 462, + 460 + ], + "type": "text", + "content": "(e) Attention map for poisoned model" + } + ] + } + ], + "index": 17, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 463, + 392, + 536, + 438 + ], + "blocks": [ + { + "bbox": [ + 463, + 392, + 536, + 438 + ], + "lines": [ + { + "bbox": [ + 463, + 392, + 536, + 438 + ], + "spans": [ + { + "bbox": [ + 463, + 392, + 536, + 438 + ], + "type": "image", + "image_path": "b014b6cad5722d0206243b5266ddfeec4eb96a0fb97d0e9217995474d36a0690.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 306, + 558, + 545, + 578 + ], + "lines": [ + { + "bbox": [ + 306, + 558, + 545, + 578 + ], + "spans": [ + { + "bbox": [ + 306, + 558, + 545, + 578 + ], + "type": "text", + "content": "Figure 5. Attention map comparison between our model (weighted " + }, + { + "bbox": [ + 306, + 558, + 545, + 578 + ], + "type": "inline_equation", + "content": "\\mathrm{CL} +" + }, + { + "bbox": [ + 306, + 558, + 545, + 578 + ], + "type": "text", + "content": " attention) and backdoored models for three backdoor attacks." + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 317, + 471, + 389, + 517 + ], + "blocks": [ + { + "bbox": [ + 317, + 471, + 389, + 517 + ], + "lines": [ + { + "bbox": [ + 317, + 471, + 389, + 517 + ], + "spans": [ + { + "bbox": [ + 317, + 471, + 389, + 517 + ], + "type": "image", + "image_path": "9a108b410b23238ab50b6051b1e4585be0daaa72bfe29b423abf1f5ed77787fd.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 316, + 520, + 462, + 548 + ], + "lines": [ + { + "bbox": [ + 316, + 520, + 462, + 548 + ], + "spans": [ + { + "bbox": [ + 316, + 520, + 462, + 548 + ], + "type": "text", + "content": "(g) Backdoor image (h) Attention map for with imperceptible poisoned model noise: Wanet" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 390, + 471, + 462, + 517 + ], + "blocks": [ + { + "bbox": [ + 390, + 471, + 462, + 517 + ], + "lines": [ + { + "bbox": [ + 390, + 471, + 462, + 517 + ], + "spans": [ + { + "bbox": [ + 390, + 471, + 462, + 517 + ], + "type": "image", + "image_path": "7823a552ee20b4c5997ea4235f720f3ed64aebe245de2bcdac8f2d7bf7b7bd27.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 463, + 520, + 536, + 538 + ], + "lines": [ + { + "bbox": [ + 463, + 520, + 536, + 538 + ], + "spans": [ + { + "bbox": [ + 463, + 520, + 536, + 538 + ], + "type": "text", + "content": "(i) Attention map for best model" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_caption" + } + ], + "index": 22 + }, + { + "bbox": [ + 390, + 520, + 462, + 538 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 390, + 520, + 462, + 538 + ], + "spans": [ + { + "bbox": [ + 390, + 520, + 462, + 538 + ], + "type": "text", + "content": "(h) Attention map for poisoned model" + } + ] + } + ], + "index": 23, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 463, + 470, + 536, + 517 + ], + "blocks": [ + { + "bbox": [ + 463, + 470, + 536, + 517 + ], + "lines": [ + { + "bbox": [ + 463, + 470, + 536, + 517 + ], + "spans": [ + { + "bbox": [ + 463, + 470, + 536, + 517 + ], + "type": "image", + "image_path": "fd8ca58da0ecbffe144b12414cf62078fc9c91119b82353efe60be8750e26018.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + } + ], + "index": 24 + }, + { + "bbox": [ + 304, + 602, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 602, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 602, + 545, + 713 + ], + "type": "text", + "content": "fended model's current state. In addition, we will explore how multimodal large language models could be used to extract more relevant KEs. While Semantic Shield is successful at defending against attacks on natural images for which there is a meaningful visual-KE alignment, it may be less successful for images such as charts or more abstract text for which clear KEs cannot be extracted. Moreover, it does not preclude the possibility of attacks against the language model via the caption. Future work should explore how the LLM can be jointly defended." + } + ] + } + ], + "index": 27 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "24827" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 76, + 106, + 88 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 76, + 106, + 88 + ], + "spans": [ + { + "bbox": [ + 48, + 76, + 106, + 88 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 96, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 53, + 96, + 287, + 173 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 96, + 287, + 173 + ], + "spans": [ + { + "bbox": [ + 53, + 96, + 287, + 173 + ], + "type": "text", + "content": "[1] Hassan Akbari, Liangzhe Yuan, Rui Qian, Wei-Hong Chuang, Shih-Fu Chang, Yin Cui, and Boqing Gong. VATT: transformers for multimodal self-supervised learning from raw video, audio and text. In Advances in Neural Information Processing Systems 34: Annual Conference on Neural Information Processing Systems 2021, NeurIPS 2021, December 6-14, 2021, virtual, pages 24206-24221, 2021. 1" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 175, + 288, + 229 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 175, + 288, + 229 + ], + "spans": [ + { + "bbox": [ + 53, + 175, + 288, + 229 + ], + "type": "text", + "content": "[2] Hritik Bansal, Nishad Singhi, Yu Yang, Fan Yin, Aditya Grover, and Kai-Wei Chang. Cleanclip: Mitigating data poisoning attacks in multimodal contrastive learning. In ICLR 2023 Workshop on Trustworthy and Reliable Large-Scale Machine Learning Models, 2023. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 231, + 288, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 231, + 288, + 285 + ], + "spans": [ + { + "bbox": [ + 53, + 231, + 288, + 285 + ], + "type": "text", + "content": "[3] Hritik Bansal, Nishad Singhi, Yu Yang, Fan Yin, Aditya Grover, and Kai-Wei Chang. Cleanclip: Mitigating data poisoning attacks in multimodal contrastive learning. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 112–123, 2023. 1, 3, 6, 7" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 286, + 288, + 331 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 286, + 288, + 331 + ], + "spans": [ + { + "bbox": [ + 53, + 286, + 288, + 331 + ], + "type": "text", + "content": "[4] Battista Biggio, Blaine Nelson, and Pavel Laskov. Poisoning attacks against support vector machines. In Proceedings of the 29th International Coference on International Conference on Machine Learning, pages 1467-1474, 2012. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 332, + 288, + 398 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 332, + 288, + 398 + ], + "spans": [ + { + "bbox": [ + 53, + 332, + 288, + 398 + ], + "type": "text", + "content": "[5] Battista Biggio, Ignazio Pillai, Samuel Rota Bulò, Davide Ariu, Marcello Pelillo, and Fabio Roli. Is data clustering in adversarial settings secure? In Proceedings of the 2013 ACM Workshop on Artificial Intelligence and Security, page 87–98, New York, NY, USA, 2013. Association for Computing Machinery. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 399, + 288, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 399, + 288, + 453 + ], + "spans": [ + { + "bbox": [ + 53, + 399, + 288, + 453 + ], + "type": "text", + "content": "[6] Battista Biggio, Ignazio Pillai, Samuel Rota Bulò, Davide Ariu, Marcello Pelillo, and Fabio Roli. Is data clustering in adversarial settings secure? In Proceedings of the 2013 ACM workshop on Artificial intelligence and security, pages 87–98, 2013. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 456, + 288, + 509 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 456, + 288, + 509 + ], + "spans": [ + { + "bbox": [ + 53, + 456, + 288, + 509 + ], + "type": "text", + "content": "[7] Nicholas Carlini and Andreas Terzis. Poisoning and backdooring contrastive learning. In The Tenth International Conference on Learning Representations, ICLR 2022, Virtual Event, April 25-29, 2022. OpenReview.net, 2022. 3, 4, 6" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 511, + 288, + 566 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 511, + 288, + 566 + ], + "spans": [ + { + "bbox": [ + 53, + 511, + 288, + 566 + ], + "type": "text", + "content": "[8] Bryant Chen, Wilka Carvalho, Nathalie Baracaldo, Heiko Ludwig, Benjamin Edwards, Taesung Lee, Ian Molloy, and Biplav Srivastava. Detecting backdoor attacks on deep neural networks by activation clustering. In Workshop on Artificial Intelligence Safety. CEUR-WS, 2019. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 568, + 288, + 611 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 568, + 288, + 611 + ], + "spans": [ + { + "bbox": [ + 53, + 568, + 288, + 611 + ], + "type": "text", + "content": "[9] Jian Chen, Xuxin Zhang, Rui Zhang, Chen Wang, and Ling Liu. De-pois: An attack-agnostic defense against data poisoning attacks. IEEE Transactions on Information Forensics and Security, 16:3412-3425, 2021. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 613, + 287, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 613, + 287, + 668 + ], + "spans": [ + { + "bbox": [ + 48, + 613, + 287, + 668 + ], + "type": "text", + "content": "[10] Wei-Lin Chiang, Zhuohan Li, Zi Lin, Ying Sheng, Zhanghao Wu, Hao Zhang, Lianmin Zheng, Siyuan Zhuang, Yonghao Zhuang, Joseph E Gonzalez, et al. Vicuna: An open-source chatbot impressing gpt-4 with " + }, + { + "bbox": [ + 48, + 613, + 287, + 668 + ], + "type": "inline_equation", + "content": "90\\%" + }, + { + "bbox": [ + 48, + 613, + 287, + 668 + ], + "type": "text", + "content": " * chatgpt quality. 2023. URL https://lmsys.org/blog/2023-03-30-vicuna, 1(2):3. 2, 6" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 670, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 670, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 670, + 287, + 713 + ], + "type": "text", + "content": "[11] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. BERT: pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 Conference of the North American Chapter of the" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 308, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 326, + 73, + 545, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 545, + 127 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 545, + 127 + ], + "type": "text", + "content": "Association for Computational Linguistics: Human Language Technologies, NAACL-HLT 2019, Minneapolis, MN, USA, June 2-7, 2019, Volume 1 (Long and Short Papers), pages 4171-4186. Association for Computational Linguistics, 2019. 6" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 308, + 129, + 545, + 171 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 129, + 545, + 171 + ], + "spans": [ + { + "bbox": [ + 308, + 129, + 545, + 171 + ], + "type": "text", + "content": "[12] Khoa Doan, Yingjie Lao, and Ping Li. Backdoor attack with imperceptible input and latent modification. Advances in Neural Information Processing Systems, 34:18944-18957, 2021. 3" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 308, + 173, + 545, + 216 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 173, + 545, + 216 + ], + "spans": [ + { + "bbox": [ + 308, + 173, + 545, + 216 + ], + "type": "text", + "content": "[13] Khoa Doan, Yingjie Lao, Weijie Zhao, and Ping Li. Lira: Learnable, imperceptible and robust backdoor attacks. In Proceedings of the IEEE/CVF international conference on computer vision, pages 11966-11976, 2021. 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 217, + 545, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 217, + 545, + 270 + ], + "spans": [ + { + "bbox": [ + 308, + 217, + 545, + 270 + ], + "type": "text", + "content": "[14] Vishnu Sashank Dorbala, Gunnar A Sigurdsson, Jesse Thomason, Robinson Piramuthu, and Gaurav S Sukhatme. Clip-nav: Using clip for zero-shot vision-and-language navigation. In Workshop on Language and Robotics at CoRL 2022, 2022. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 271, + 545, + 316 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 271, + 545, + 316 + ], + "spans": [ + { + "bbox": [ + 308, + 271, + 545, + 316 + ], + "type": "text", + "content": "[15] Fangxiang Feng, Xiaojie Wang, and Ruifan Li. Cross-modal retrieval with correspondence autoencoder. In Proceedings of the 22nd ACM international conference on Multimedia, pages 7-16, 2014. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 316, + 545, + 360 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 316, + 545, + 360 + ], + "spans": [ + { + "bbox": [ + 308, + 316, + 545, + 360 + ], + "type": "text", + "content": "[16] Felipe González-Pizarro and Savvas Zannettou. Understanding and detecting hateful content using contrastive learning. In Proceedings of the International AAAI Conference on Web and Social Media, pages 257–268, 2023. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 361, + 545, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 361, + 545, + 392 + ], + "spans": [ + { + "bbox": [ + 308, + 361, + 545, + 392 + ], + "type": "text", + "content": "[17] Tianyu Gu, Kang Liu, Brendan Dolan-Gavitt, and Siddharth Garg. Badnets: Evaluating backdooring attacks on deep neural networks. IEEE Access, 7:47230-47244, 2019. 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 393, + 545, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 393, + 545, + 437 + ], + "spans": [ + { + "bbox": [ + 308, + 393, + 545, + 437 + ], + "type": "text", + "content": "[18] Jonathan Hayase, Weihao Kong, Raghav Somani, and Sewoong Oh. Spectre: Defending against backdoor attacks using robust statistics. In International Conference on Machine Learning, pages 4129-4139. PMLR, 2021. 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 438, + 545, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 438, + 545, + 480 + ], + "spans": [ + { + "bbox": [ + 308, + 438, + 545, + 480 + ], + "type": "text", + "content": "[19] Chenguang Huang, Oier Mees, Andy Zeng, and Wolfram Burgard. Visual language maps for robot navigation. In 2023 IEEE International Conference on Robotics and Automation (ICRA), pages 10608-10615. IEEE, 2023. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 482, + 545, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 482, + 545, + 525 + ], + "spans": [ + { + "bbox": [ + 308, + 482, + 545, + 525 + ], + "type": "text", + "content": "[20] Kunzhe Huang, Yiming Li, Baoyuan Wu, Zhan Qin, and Kui Ren. Backdoor defense via decoupling the training process. In International Conference on Learning Representations, 2021. 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 526, + 545, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 526, + 545, + 590 + ], + "spans": [ + { + "bbox": [ + 308, + 526, + 545, + 590 + ], + "type": "text", + "content": "[21] Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. In International conference on machine learning, pages 4904-4916. PMLR, 2021. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 592, + 545, + 646 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 592, + 545, + 646 + ], + "spans": [ + { + "bbox": [ + 308, + 592, + 545, + 646 + ], + "type": "text", + "content": "[22] Marius Kloft and Pavel Laskov. Online anomaly detection under adversarial impact. In Proceedings of the thirteenth international conference on artificial intelligence and statistics, pages 405-412. JMLR Workshop and Conference Proceedings, 2010. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 647, + 545, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 647, + 545, + 689 + ], + "spans": [ + { + "bbox": [ + 308, + 647, + 545, + 689 + ], + "type": "text", + "content": "[23] Pang Wei Koh and Percy Liang. Understanding black-box predictions via influence functions. In International conference on machine learning, pages 1885-1894. PMLR, 2017. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 691, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 691, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 308, + 691, + 545, + 713 + ], + "type": "text", + "content": "[24] Changjiang Li, Ren Pang, Zhaohan Xi, Tianyu Du, Shouling Ji, Yuan Yao, and Ting Wang. An embarrassingly simple" + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "text", + "content": "24828" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 73, + 287, + 713 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 67, + 73, + 287, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 73, + 287, + 106 + ], + "spans": [ + { + "bbox": [ + 67, + 73, + 287, + 106 + ], + "type": "text", + "content": "backdoor attack on self-supervised learning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4367-4378, 2023. 1" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 107, + 287, + 163 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 107, + 287, + 163 + ], + "spans": [ + { + "bbox": [ + 48, + 107, + 287, + 163 + ], + "type": "text", + "content": "[25] Junnan Li, Ramprasaath Selvaraju, Akhilesh Gotmare, Shafiq Joty, Caiming Xiong, and Steven Chu Hong Hoi. Align before fuse: Vision and language representation learning with momentum distillation. Advances in neural information processing systems, 34:9694-9705, 2021. 1, 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 164, + 287, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 164, + 287, + 228 + ], + "spans": [ + { + "bbox": [ + 48, + 164, + 287, + 228 + ], + "type": "text", + "content": "[26] Junnan Li, Dongxu Li, Caiming Xiong, and Steven C. H. Hoi. BLIP: bootstrapping language-image pre-training for unified vision-language understanding and generation. In International Conference on Machine Learning, ICML 2022, 17-23 July 2022, Baltimore, Maryland, USA, pages 12888-12900. PMLR, 2022. 1" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 230, + 287, + 306 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 230, + 287, + 306 + ], + "spans": [ + { + "bbox": [ + 48, + 230, + 287, + 306 + ], + "type": "text", + "content": "[27] Yige Li, Xixiang Lyu, Nodens Koren, Lingjuan Lyu, Bo Li, and Xingjun Ma. Anti-backdoor learning: Training clean models on poisoned data. In Advances in Neural Information Processing Systems 34: Annual Conference on Neural Information Processing Systems 2021, NeurIPS 2021, December 6-14, 2021, virtual, pages 14900-14912, 2021. 1, 3, 6, 7" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 308, + 287, + 374 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 308, + 287, + 374 + ], + "spans": [ + { + "bbox": [ + 48, + 308, + 287, + 374 + ], + "type": "text", + "content": "[28] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In Computer Vision-ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part V 13, pages 740-755. Springer, 2014. 6" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 376, + 287, + 421 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 376, + 287, + 421 + ], + "spans": [ + { + "bbox": [ + 48, + 376, + 287, + 421 + ], + "type": "text", + "content": "[29] Min Liu, Alberto Sangiovanni-Vincentelli, and Xiangyu Yue. Beating backdoor attack at its own game. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4620-4629, 2023. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 422, + 287, + 465 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 422, + 287, + 465 + ], + "spans": [ + { + "bbox": [ + 48, + 422, + 287, + 465 + ], + "type": "text", + "content": "[30] Yang Liu, Mingyuan Fan, Cen Chen, Ximeng Liu, Zhuo Ma, Li Wang, and Jianfeng Ma. Backdoor defense with machine unlearning. In IEEE INFOCOM 2022-IEEE Conference on Computer Communications, pages 280-289. IEEE, 2022. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 467, + 287, + 520 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 467, + 287, + 520 + ], + "spans": [ + { + "bbox": [ + 48, + 467, + 287, + 520 + ], + "type": "text", + "content": "[31] Arjun Majumdar, Gunjan Aggarwal, Bhavika Devnani, Judy Hoffman, and Dhruv Batra. Zson: Zero-shot object-goal navigation using multimodal goal embeddings. Advances in Neural Information Processing Systems, 35:32340-32352, 2022. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 522, + 287, + 576 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 522, + 287, + 576 + ], + "spans": [ + { + "bbox": [ + 48, + 522, + 287, + 576 + ], + "type": "text", + "content": "[32] Sachit Menon and Carl Vondrick. Visual classification via description from large language models. In The Eleventh International Conference on Learning Representations, ICLR 2023, Kigali, Rwanda, May 1-5, 2023. OpenReview.net, 2023. 6" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 578, + 287, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 578, + 287, + 612 + ], + "spans": [ + { + "bbox": [ + 48, + 578, + 287, + 612 + ], + "type": "text", + "content": "[33] Tuan Anh Nguyen and Anh Tuan Tran. Wanet - imperceptible warping-based backdoor attack. In International Conference on Learning Representations, 2021. 3, 4" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 613, + 287, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 613, + 287, + 668 + ], + "spans": [ + { + "bbox": [ + 48, + 613, + 287, + 668 + ], + "type": "text", + "content": "[34] Huy Phan, Cong Shi, Yi Xie, Tianfang Zhang, Zhuohang Li, Tianming Zhao, Jian Liu, Yan Wang, Yingying Chen, and Bo Yuan. Ribac: Towards robust and imperceptible backdoor a stack against compact dnn. In European Conference on Computer Vision, pages 708-724. Springer, 2022. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 670, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 670, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 670, + 287, + 713 + ], + "type": "text", + "content": "[35] Han Qiu, Yi Zeng, Shangwei Guo, Tianwei Zhang, Meikang Qiu, and Bhavani Thuraisingham. Deepsweep: An evaluation framework for mitigating dnn backdoor attacks using data augmentation. In Proceedings of the 2021 ACM" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 713 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "text", + "content": "Asia Conference on Computer and Communications Security, pages 363-377, 2021. 3" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 96, + 545, + 163 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 96, + 545, + 163 + ], + "spans": [ + { + "bbox": [ + 307, + 96, + 545, + 163 + ], + "type": "text", + "content": "[36] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In International Conference on Machine Learning, 2021. 1, 2, 6" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 163, + 545, + 196 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 163, + 545, + 196 + ], + "spans": [ + { + "bbox": [ + 307, + 163, + 545, + 196 + ], + "type": "text", + "content": "[37] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with CLIP latents. CoRR, abs/2204.06125, 2022. 1" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 198, + 547, + 241 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 198, + 547, + 241 + ], + "spans": [ + { + "bbox": [ + 307, + 198, + 547, + 241 + ], + "type": "text", + "content": "[38] Aniruddha Saha, Akshayvarun Subramanya, and Hamed Pirsiavash. Hidden trigger backdoor attacks. In Proceedings of the AAAI conference on artificial intelligence, pages 11957-11965, 2020. 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 243, + 545, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 243, + 545, + 308 + ], + "spans": [ + { + "bbox": [ + 307, + 243, + 545, + 308 + ], + "type": "text", + "content": "[39] Aniruddha Saha, Ajinkya Tejankar, Soroush Abbasi Koohpayegani, and Hamed Pirsivash. Backdoor attacks on self-supervised learning. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR 2022, New Orleans, LA, USA, June 18-24, 2022, pages 13327-13336. IEEE, 2022. 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 310, + 545, + 386 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 310, + 545, + 386 + ], + "spans": [ + { + "bbox": [ + 307, + 310, + 545, + 386 + ], + "type": "text", + "content": "[40] Ali Shafahi, W. Ronny Huang, Mahyar Najibi, Octavian Suciu, Christoph Studer, Tudor Dumitras, and Tom Goldstein. Poison frogs! targeted clean-label poisoning attacks on neural networks. In Proceedings of the 32nd International Conference on Neural Information Processing Systems, page 6106-6116, Red Hook, NY, USA, 2018. Curran Associates Inc. 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 388, + 545, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 388, + 545, + 453 + ], + "spans": [ + { + "bbox": [ + 307, + 388, + 545, + 453 + ], + "type": "text", + "content": "[41] Wonyoung Shin, Jonghun Park, Taekang Woo, Yongwoo Cho, Kwangjin Oh, and Hwanjun Song. e-clip: Large-scale vision-language representation learning in e-commerce. In Proceedings of the 31st ACM International Conference on Information & Knowledge Management, pages 3484–3494, 2022. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 456, + 545, + 499 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 456, + 545, + 499 + ], + "spans": [ + { + "bbox": [ + 307, + 456, + 545, + 499 + ], + "type": "text", + "content": "[42] Andreas Steiner, Alexander Kolesnikov, Xiaohua Zhai, Ross Wightman, Jakob Uszkoreit, and Lucas Beyer. How to train your vit? data, augmentation, and regularization in vision transformers. Trans. Mach. Learn. Res., 2022, 2022. 6" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 501, + 545, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 501, + 545, + 555 + ], + "spans": [ + { + "bbox": [ + 307, + 501, + 545, + 555 + ], + "type": "text", + "content": "[43] Di Tang, XiaoFeng Wang, Haixu Tang, and Kehuan Zhang. Demon in the variant: Statistical analysis of {DNNs} for robust backdoor contamination detection. In 30th USENIX Security Symposium (USENIX Security 21), pages 1541-1558, 2021. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 557, + 545, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 557, + 545, + 612 + ], + "spans": [ + { + "bbox": [ + 307, + 557, + 545, + 612 + ], + "type": "text", + "content": "[44] Christopher Thomas and Adriana Kovashka. Preserving semantic neighborhoods for robust cross-modal retrieval. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XVIII 16, pages 317-335. Springer, 2020. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 613, + 545, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 613, + 545, + 679 + ], + "spans": [ + { + "bbox": [ + 307, + 613, + 545, + 679 + ], + "type": "text", + "content": "[45] Vale Tolpegin, Stacey Truex, Mehmet Emre Gursoy, and Ling Liu. Data poisoning attacks against federated learning systems. In Computer Security-ESORICS 2020: 25th European Symposium on Research in Computer Security, ES-ORICS 2020, Guildford, UK, September 14–18, 2020, Proceedings, Part I 25, pages 480–501. Springer, 2020. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 681, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 681, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 307, + 681, + 545, + 713 + ], + "type": "text", + "content": "[46] Brandon Tran, Jerry Li, and Aleksander Madry. Spectral signatures in backdoor attacks. Advances in neural information processing systems, 31, 2018. 3" + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "text", + "content": "24829" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 713 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "type": "text", + "content": "[47] Wei Lun Tsai, Jacob J Lin, and Shang-Hsien Hsieh. Generating construction safety observations via clip-based image-language embedding. In European Conference on Computer Vision, pages 366-381. Springer, 2022. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 118, + 287, + 172 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 118, + 287, + 172 + ], + "spans": [ + { + "bbox": [ + 48, + 118, + 287, + 172 + ], + "type": "text", + "content": "[48] Haotao Wang, Junyuan Hong, Aston Zhang, Jiayu Zhou, and Zhangyang Wang. Trap and replace: Defending backdoor attacks by trapping them into an easy-to-replace subnetwork. Advances in neural information processing systems, 35:36026-36039, 2022. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 175, + 287, + 218 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 175, + 287, + 218 + ], + "spans": [ + { + "bbox": [ + 48, + 175, + 287, + 218 + ], + "type": "text", + "content": "[49] Lin Wang and Jie Chen. Improving radiology report generation with adaptive attention. In Multimodal AI in healthcare: A paradigm shift in health intelligence, pages 293-305. Springer, 2022. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 220, + 287, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 220, + 287, + 274 + ], + "spans": [ + { + "bbox": [ + 48, + 220, + 287, + 274 + ], + "type": "text", + "content": "[50] Longzheng Wang, Chuang Zhang, Hongbo Xu, Yongxiu Xu, Xiaohan Xu, and Siqi Wang. Cross-modal contrastive learning for multimodal fake news detection. In Proceedings of the 31st ACM International Conference on Multimedia, pages 5696-5704, 2023. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 276, + 287, + 341 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 276, + 287, + 341 + ], + "spans": [ + { + "bbox": [ + 48, + 276, + 287, + 341 + ], + "type": "text", + "content": "[51] Zhenting Wang, Juan Zhai, and Shiqing Ma. Bppattack: Stealthy and efficient trojan attacks against deep neural networks via image quantization and contrastive adversarial learning. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR 2022, New Orleans, LA, USA, June 18-24, 2022, pages 15054-15063. IEEE, 2022. 4" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 342, + 287, + 386 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 342, + 287, + 386 + ], + "spans": [ + { + "bbox": [ + 48, + 342, + 287, + 386 + ], + "type": "text", + "content": "[52] Sandamal Weerasinghe, Tansu Alpcan, Sarah M Erfani, and Christopher Leckie. Defending support vector machines against data poisoning attacks. IEEE Transactions on Information Forensics and Security, 16:2566-2578, 2021. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 388, + 287, + 420 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 388, + 287, + 420 + ], + "spans": [ + { + "bbox": [ + 48, + 388, + 287, + 420 + ], + "type": "text", + "content": "[53] Dongxian Wu and Yisen Wang. Adversarial neuron pruning purifies backdoored deep models. Advances in Neural Information Processing Systems, 34:16913-16925, 2021. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 422, + 287, + 465 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 422, + 287, + 465 + ], + "spans": [ + { + "bbox": [ + 48, + 422, + 287, + 465 + ], + "type": "text", + "content": "[54] Huang Xiao, Battista Biggio, Gavin Brown, Giorgio Fumera, Claudia Eckert, and Fabio Roli. Is feature selection secure against training data poisoning? In International conference on machine learning, pages 1689-1698. PMLR, 2015. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 467, + 287, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 467, + 287, + 510 + ], + "spans": [ + { + "bbox": [ + 48, + 467, + 287, + 510 + ], + "type": "text", + "content": "[55] Wenhan Yang, Jingdong Gao, and Baharan Mirzasoleiman. Robust contrastive language-image pretraining against data poisoning and backdoor attacks. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. 1" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 512, + 287, + 576 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 512, + 287, + 576 + ], + "spans": [ + { + "bbox": [ + 48, + 512, + 287, + 576 + ], + "type": "text", + "content": "[56] Ziqing Yang, Xinlei He, Zheng Li, Michael Backes, Mathias Humbert, Pascal Berrang, and Yang Zhang. Data poisoning attacks against multimodal encoders. In International Conference on Machine Learning, ICML 2023, 23-29 July 2023, Honolulu, Hawaii, USA, pages 39299-39313. PMLR, 2023. 6" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 579, + 287, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 579, + 287, + 643 + ], + "spans": [ + { + "bbox": [ + 48, + 579, + 287, + 643 + ], + "type": "text", + "content": "[57] Ziqing Yang, Xinlei He, Zheng Li, Michael Backes, Mathias Humbert, Pascal Berrang, and Yang Zhang. Data poisoning attacks against multimodal encoders. In International Conference on Machine Learning, ICML 2023, 23-29 July 2023, Honolulu, Hawaii, USA, pages 39299-39313. PMLR, 2023. 1" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 646, + 287, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 646, + 287, + 689 + ], + "spans": [ + { + "bbox": [ + 48, + 646, + 287, + 689 + ], + "type": "text", + "content": "[58] Peter Young, Alice Lai, Micah Hodosh, and Julia Hockenmaier. From image descriptions to visual denotations: New similarity metrics for semantic inference over event descriptions. Trans. Assoc. Comput. Linguistics, 2:67-78, 2014. 6" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 691, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 691, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 691, + 287, + 713 + ], + "type": "text", + "content": "[59] Jiahui Yu, Zirui Wang, Vijay Vasudevan, Legg Yeung, Mojtaba Seyedhosseini, and Yonghui Wu. Coca: Contrastive" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 308, + 73, + 545, + 496 + ], + "type": "list", + "angle": 0, + "index": 23, + "blocks": [ + { + "bbox": [ + 327, + 73, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 327, + 73, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 327, + 73, + 545, + 95 + ], + "type": "text", + "content": "captioners are image-text foundation models. Transactions on Machine Learning Research, 2022. 1, 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 308, + 96, + 545, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 96, + 545, + 140 + ], + "spans": [ + { + "bbox": [ + 308, + 96, + 545, + 140 + ], + "type": "text", + "content": "[60] Yi Zeng, Si Chen, Won Park, Zhuoqing Mao, Ming Jin, and Ruoxi Jia. Adversarial unlearning of backdoors via implicit hypergradient. In International Conference on Learning Representations, 2021. 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 141, + 545, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 141, + 545, + 195 + ], + "spans": [ + { + "bbox": [ + 308, + 141, + 545, + 195 + ], + "type": "text", + "content": "[61] Xiaohua Zhai, Xiao Wang, Basil Mustafa, Andreas Steiner, Daniel Keysers, Alexander Kolesnikov, and Lucas Beyer. Lit: Zero-shot transfer with locked-image text tuning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18123-18133, 2022. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 197, + 545, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 197, + 545, + 251 + ], + "spans": [ + { + "bbox": [ + 308, + 197, + 545, + 251 + ], + "type": "text", + "content": "[62] Hanwang Zhang, Yang Yang, Huanbo Luan, Shuicheng Yang, and Tat-Seng Chua. Start from scratch: Towards automatically identifying, modeling, and naming visual attributes. In Proceedings of the 22nd ACM international conference on Multimedia, pages 187-196, 2014. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 253, + 545, + 295 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 253, + 545, + 295 + ], + "spans": [ + { + "bbox": [ + 308, + 253, + 545, + 295 + ], + "type": "text", + "content": "[63] Jinghuai Zhang, Hongbin Liu, Jinyuan Jia, and Neil Zhenqiang Gong. Corruptencoder: Data poisoning based backdoor attacks to contrastive learning. arXiv preprint arXiv:2211.08229, 2022. 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 297, + 545, + 341 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 297, + 545, + 341 + ], + "spans": [ + { + "bbox": [ + 308, + 297, + 545, + 341 + ], + "type": "text", + "content": "[64] Ying Zhang and Huchuan Lu. Deep cross-modal projection learning for image-text matching. In Proceedings of the European conference on computer vision (ECCV), pages 686-701, 2018. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 342, + 545, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 342, + 545, + 396 + ], + "spans": [ + { + "bbox": [ + 308, + 342, + 545, + 396 + ], + "type": "text", + "content": "[65] Yuhao Zhang, Hang Jiang, Yasuhide Miura, Christopher D Manning, and Curtis P Langlotz. Contrastive learning of medical visual representations from paired images and text. In Machine Learning for Healthcare Conference, pages 2-25. PMLR, 2022. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 398, + 545, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 398, + 545, + 441 + ], + "spans": [ + { + "bbox": [ + 308, + 398, + 545, + 441 + ], + "type": "text", + "content": "[66] Bingyin Zhao and Yingjie Lao. Towards class-oriented poisoning attacks against neural networks. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 3741-3750, 2022. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 443, + 545, + 496 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 443, + 545, + 496 + ], + "spans": [ + { + "bbox": [ + 308, + 443, + 545, + 496 + ], + "type": "text", + "content": "[67] Yangming Zhou, Yuzhou Yang, Qichao Ying, Zhenxing Qian, and Xinpeng Zhang. Multimodal fake news detection via clip-guided learning. In 2023 IEEE International Conference on Multimedia and Expo (ICME), pages 2825-2830. IEEE, 2023. 2" + } + ] + } + ], + "index": 22 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "type": "text", + "content": "24830" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/Semantic-Aware Multi-Label Adversarial Attacks/a7635c9b-bbb0-4721-869c-8ded1f1d58af_content_list.json b/2024/Semantic-Aware Multi-Label Adversarial Attacks/a7635c9b-bbb0-4721-869c-8ded1f1d58af_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..1b9d4d2855e633b4c936a27875d464084f984d6b --- /dev/null +++ b/2024/Semantic-Aware Multi-Label Adversarial Attacks/a7635c9b-bbb0-4721-869c-8ded1f1d58af_content_list.json @@ -0,0 +1,1833 @@ +[ + { + "type": "text", + "text": "Semantic-Aware Multi-Label Adversarial Attacks", + "text_level": 1, + "bbox": [ + 233, + 130, + 736, + 150 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Hassan Mahmood Northeastern University", + "bbox": [ + 210, + 181, + 423, + 215 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "mahmood.h@northeastern.edu", + "bbox": [ + 212, + 219, + 444, + 232 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Ehsan Elhamifar Northeastern University", + "bbox": [ + 531, + 181, + 723, + 217 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "e.elhamifar@northeastern.edu", + "bbox": [ + 501, + 219, + 751, + 232 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 268, + 313, + 284 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Despite its importance, generating attacks for multi-label learning (MLL) models has received much less attention compared to multi-class recognition. Attacking an MLL model by optimizing a loss on the target set of labels has often the undesired consequence of changing the predictions for other labels. On the other hand, adding a loss on the remaining labels to keep them fixed leads to highly negatively correlated gradient directions, reducing the attack effectiveness. In this paper, we develop a framework for crafting effective and semantic-aware adversarial attacks for MLL. First, to obtain an attack that leads to semantically consistent predictions across all labels, we find a minimal super-set of the target labels, referred to as consistent target set. To do so, we develop an efficient search algorithm over a knowledge graph, which encodes label dependencies. Next, we propose an optimization that searches for an attack that modifies the predictions of labels in the consistent target set while ensuring other labels will not get affected. This leads to an efficient algorithm that projects the gradient of the consistent target set loss onto the orthogonal direction of the gradient of the loss on other labels. Our framework can generate attacks on different target set sizes and for MLL with thousands of labels (as in OpenImages). Finally, by extensive experiments on three datasets and several MLL models, we show that our method generates both successful and semantically consistent attacks.1", + "bbox": [ + 75, + 301, + 473, + 694 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 718, + 209, + 734 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Despite the tremendous success of Deep Neural Networks (DNNs) for image recognition, DNNs are vulnerable to adversarial attacks, i.e., imperceptible image perturbations that result in incorrect prediction with high confidence [9, 25, 27, 30, 35, 39, 53, 60, 69, 70, 98]. Understanding and improving the robustness of DNNs has motivated a large body of research on generating adversarial perturbations and subsequently using them to design defense mech-", + "bbox": [ + 75, + 739, + 468, + 863 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/c0b229c86348da89cef4ce1a9fb2b18faf52001d2f7849124935241c76f5a36c.jpg", + "image_caption": [ + "Figure 1. Generating effective attacks for an MLL model is challenging. Top: Two groups of semantically related labels. Green nodes show labels predicted as present before the attack. Bottom: While an attack on the target label 'bicycle' succeeds, it fails to turn off 'vehicle' and 'wheeled vehicle' for $\\epsilon < 0.2$ . On the other hand, for $\\epsilon > 0.125$ , the attack changes the prediction for the non-target label 'person', which is undesired." + ], + "image_footnote": [], + "bbox": [ + 522, + 272, + 867, + 508 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "anisms, e.g., by detecting attacks or retraining the model using perturbed images. The majority of existing works, however, have focused on multi-class recognition (MCR), in which only one class must be predicted in an image [14, 21, 26, 31, 37, 82, 85].", + "bbox": [ + 496, + 609, + 890, + 684 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "On the other hand, many real-world applications require finding multiple labels in an image. This includes human-object interaction learning (e.g., recognizing hands and interacting objects), autonomous driving (e.g., recognizing cars, bikes, pedestrians, roads, signs, etc), assistive robotics and surveillance. Therefore, multi-label learning (MLL) aims at recognizing all labels in an image [14, 26, 38, 50, 61, 85, 94]. However, despite its importance and fundamental differences with respect to attacks for MCR (see Figure 1), adversarial attacks for MLL has received much less attention in the literature [1, 2, 36, 71, 86, 87].", + "bbox": [ + 496, + 686, + 892, + 852 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The main difference between attacks for MCR and MLL stems from the different ways decision boundaries between labels is learned and structured for the two problems. In", + "bbox": [ + 498, + 854, + 893, + 900 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "1The code of this work is available at https://github.com/hassan-mahmood/SemanticMLLAttacks.git", + "bbox": [ + 76, + 875, + 468, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "24251", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "MCR, different labels compete with each other as only one label must be present/predicted. Therefore, attacking an on present label leads to turning it off while automatically turning on another label, see Figure 2 (left). On the other hand, in MLL, labels do not compete, where none, some or all labels can be predicted as present in an image. Thus, attacking a present or an absent label can lead to changing the predictions for none, several or all other labels, as shown in Figure 2 (right). This often has the undesired effect of inconsistent predictions, which can simply be used to detect the attack (e.g., turning off 'pedestrian' can turn on 'bike' and 'stop sign' while turning off 'road').", + "bbox": [ + 75, + 90, + 472, + 271 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "One can try to prevent changing predictions of other labels by crafting the attack while including a loss that enforces predictions of other labels to stay intact. However, as we show, the gradient of the loss for fixing other labels often is highly negatively correlated with the gradient of the loss on the label we want to attack, hence, counteracting the effect of each other. This problem gets more pronounced when the number of labels increases (e.g., in Open Images dataset [40] with 9,600 labels) and the gradient of this additional loss gets larger too. Also, fixing predictions for all other labels still may lead to semantic inconsistency among predictions (e.g., turning off 'vehicle' requires turning off 'car' and 'truck' too, otherwise 'vehicle' being absent while 'car' being present can be used to detect the attack).", + "bbox": [ + 75, + 272, + 472, + 484 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Paper Contributions. We develop a framework for crafting adversarial attacks for MLL that addresses the above challenges. First, to obtain an attack on a target set of labels that leads to semantically consistent predictions across all labels, we find a minimal superset of the target set (referred to as consistent target set) to be attacked/modified. To do so, we develop an efficient search algorithm over a knowledge graph, which encodes label dependencies. Second, we show that finding the attack by optimizing the sum of two losses, one over the consistent target set and the other over other labels, has opposite gradient directions for the two losses, which leads to inefficient perturbations. Third, we propose an optimization that searches for an attack that modifies the predictions of labels in the consistent target set while ensuring that other labels will not get affected. Our optimization leads to a projected gradient algorithm that projects the gradient of the loss for the consistent target set onto the orthogonal direction of the gradient of the loss on other labels. Finally, by extensive experiments on three datasets and several MLL models, we show that our framework generates both successful and semantically consistent attacks.", + "bbox": [ + 75, + 487, + 472, + 806 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 76, + 810, + 218, + 825 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1. Multi-Label Recognition", + "text_level": 1, + "bbox": [ + 76, + 830, + 303, + 848 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The goal of multi-label learning (MLL) is to find all classes of objects (or even abstract concepts) in an image. As compared to multi-class classification, which finds a sin", + "bbox": [ + 75, + 854, + 470, + 900 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/53b5d79a90093b316d4c54826ab27b08b7e4bd15af32c11749d91528b87ac8f4.jpg", + "image_caption": [ + "Figure 2. Left: In multi-class recognition (MCR), attacking the present label leads to automatically turning on another label, as labels compete with each other. Right: In multi-label learning (MLL), attacking a label can lead to none $(\\pmb{x}_1')$ , some $(\\pmb{x}_2')$ or all $(\\pmb{x}_3')$ other labels changing." + ], + "image_footnote": [], + "bbox": [ + 521, + 88, + 671, + 202 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/3bf7f6d733be83068af386401b041a3736562a9480ef00e21e8647d49b1ca177.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 681, + 88, + 870, + 202 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "gle dominant class in an image, MLL is a harder task, since any combination of labels can be present in an image and many labels often correspond to small image regions. This has motivated a large body of research for designing effective MLL methods, using graphical models[44, 46], different loss functions for handling label imbalance [6, 18, 48, 49, 76, 91], exploiting external knowledge, label correlations, and hierarchical relations among labels [13, 19, 33, 43, 56, 78, 88, 89, 92, 97], or using a combination of label and image feature correlations [41, 45, 47, 77, 79, 83] to improve the multi-label performance.", + "bbox": [ + 496, + 282, + 893, + 449 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.2. Adversarial Attacks", + "text_level": 1, + "bbox": [ + 500, + 457, + 691, + 470 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Deep Neural Networks (DNNs) have been shown to be vulnerable to small adversarial perturbations, which can easily fool the model [3, 12, 66, 73, 81]. Therefore, many works have studied different ways to design efficient attacks and defense mechanisms for DNNs [4, 5, 10, 11, 20, 22, 23, 28, 29, 34, 42, 51, 57–59, 62, 67–69, 74, 75, 84, 93]. The adversarial attacks can be divided into several categories based on different criteria [90] such as white-box and black-box, image agnostic and image-specific, targeted and untargeted, or restricted to perturb small image regions and unrestricted attacks. In the paper, we generate white-box attacks for multi-label recognition, i.e., assume access to the MLL model.", + "bbox": [ + 496, + 477, + 893, + 657 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.2.1 Multi-Label Adversarial Attacks", + "text_level": 1, + "bbox": [ + 500, + 669, + 782, + 683 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Motivated by the increasing interest in the multi-label recognition problem, few works have recently studied MLL attacks. [71] studies a framework for attacking multi-label recognition and ranking systems. However, it does not exploit any relationships among labels to design attacks, which as we show is important to design effective attacks. We use the attacks from this work as baselines in our experiments. Yang et al. [86, 87] designed untargeted attacks for multi-label classification to change as many labels as possible and proposed a framework to measure how well an MLL model can be attacked. In comparison, our focus is targeted multi-label attacks with semantic relationships. Hu et al. [32] proposed to exploit ranking relations to design attacks for top- $k$ multi-label models and [96] proposed an attack to", + "bbox": [ + 496, + 688, + 893, + 902 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "24252", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/38f2981baa0ae6ca00ec3d871a97df4efe9dcdb8d23775e72217fa4424416c23.jpg", + "image_caption": [ + "Figure 3. Multi-label learning predicts several labels for an image (see \"MLL Output\"). Attacking a target set ('vehicle' on the top or 'person' and 'bird' on the bottom) using a naive multi-label attack leads to prediction semantic inconsistencies ('car' and 'motorcycle' being on while 'vehicle' is off or 'person' and 'bird' being off while 'animal' is on). However, GMLA handles a large number of labels while achieving semantic consistency." + ], + "image_footnote": [], + "bbox": [ + 109, + 93, + 431, + 303 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "hide all labels present in an image, whereas we consider the minimal set of semantically related labels to be attacked. Aich et al. [2] leveraged local patch differences of different objects to generate multi-object attacks and [1] proposed a CLIP-based generative model to generate multi-object attacks in the black-box setting. Jia et al. [36] proposed theoretical robustness guarantees to defend against multi-label adversarial attacks and [52] exploited domain knowledge context to detect adversarial attacks. Context-aware attacks [7, 8] fool context-aware attack detection methods by attacking the label and its context simultaneously. The context in these works is defined in terms of cooccurring labels. In comparison, we propose to attack labels based on their semantic relationships. Moreover, none of these works have addressed the problem of negative gradient correlation in generating large-scale dataset attacks. Among the existing literature, Nan et al. [95] is also comparable to our attack method, and we use it as a baseline. They proposed a fast linear programming-based adversarial example generation algorithm for MLL to minimize the perturbation norm required to achieve a target label.", + "bbox": [ + 75, + 404, + 470, + 720 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Multi-Label Learning Attack (MLA)", + "text_level": 1, + "bbox": [ + 76, + 734, + 408, + 752 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Problem Setting", + "text_level": 1, + "bbox": [ + 76, + 756, + 238, + 772 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We study generating adversarial attacks for the Multi-Label Learning (MLL) task. In MLL, multiple labels can appear in an image, see Figure 3, as opposed to the multi-class recognition (MCR), where each image has only one label. Let $\\mathcal{C}$ denote the set of all labels. For an image $x\\in \\mathbb{R}^d$ , let $\\pmb {y}\\in \\{0,1\\}^{|\\mathcal{C}|}$ denote the set of its labels, indicating the presence (1) or absence (0) of each label in $\\mathcal{C}$ in the image. Let $\\mathcal{F}:\\mathbb{R}^d\\to \\mathbb{R}^{|\\mathcal{C}|}$ be a multi-label classifier, which we assume", + "bbox": [ + 75, + 779, + 470, + 900 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "has already been learned using training images. The multi-label classifier $\\mathcal{F} = \\{f_1, f_2, \\ldots, f_{|\\mathcal{C}|}\\}$ consists of $|\\mathcal{C}|$ binary classifiers for each label, where $f_c(\\pmb{x}) \\in (-\\infty, +\\infty)$ is the score of the classifier $c$ . Therefore, the probability of label $c$ being present in the image $\\pmb{x}$ is given by $\\hat{y}_c = \\sigma(f_c(\\pmb{x}))$ , where $\\sigma(\\cdot)$ is the sigmoid function. Finally, let $\\Omega_{\\pmb{x}} \\subseteq \\mathcal{C}$ denote the target set of labels in the image $\\pmb{x}$ which we want to attack, i.e., after the attack the present labels in $\\Omega_{\\pmb{x}}$ must become absent and vice versa. In the next subsection, we study the existing approaches [71] to generate multi-label attacks and identify their drawbacks.", + "bbox": [ + 496, + 90, + 890, + 257 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. Naive Multi-Label Attack (MLA)", + "text_level": 1, + "bbox": [ + 498, + 263, + 792, + 279 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "For an attack on $\\pmb{x}$ that modifies the labels in $\\Omega_{\\pmb{x}}$ , one can generate a small perturbation $e \\in \\mathbb{R}^d$ by minimizing the negative multi-label learning loss for labels in $\\Omega_{\\pmb{x}}$ while restricting the magnitude of $e$ . More precisely, we can solve", + "bbox": [ + 496, + 284, + 890, + 345 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\text {M L A - U :} \\min _ {\\boldsymbol {e}} - \\mathcal {L} _ {b c e} (\\boldsymbol {x} + \\boldsymbol {e}, \\Omega_ {\\boldsymbol {x}}) \\text {s . t .} \\| \\boldsymbol {e} \\| _ {p} \\leq \\epsilon , \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 514, + 359, + 890, + 380 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\| \\cdot \\| _p$ is the $\\ell_p$ -norm and $\\mathcal{L}_{ce}(\\boldsymbol{x}',\\Gamma_{\\boldsymbol{x}'})$ is the binary cross-entropy loss for image $\\boldsymbol{x}'$ on labels in $\\Gamma_{\\boldsymbol{x}'}$ , defined as", + "bbox": [ + 496, + 391, + 890, + 422 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {b c e} \\left(\\boldsymbol {x} ^ {\\prime}, \\Omega_ {\\boldsymbol {x} ^ {\\prime}}\\right) \\triangleq\n$$\n", + "text_format": "latex", + "bbox": [ + 514, + 436, + 622, + 454 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\sum_ {c \\in \\Omega_ {\\boldsymbol {x} ^ {\\prime}}} - y _ {c} \\log \\sigma \\left(f _ {c} \\left(\\boldsymbol {x} ^ {\\prime}\\right)\\right) - \\left(1 - y _ {c}\\right) \\log \\left(1 - \\sigma \\left(f _ {c} \\left(\\boldsymbol {x} ^ {\\prime}\\right)\\right)\\right). \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 526, + 455, + 890, + 487 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The drawback of (1) is that attack on $\\Omega_{\\mathbf{x}}$ can lead to changing the predictions for other labels too, see Figure 2 (right). This often leads to inconsistent predictions, which can simply be used to detect the attack (e.g., turning off 'pedestrian' can turn on 'bike' and 'stop sign' while turning off 'road'), hence significantly reducing the effectiveness of the attack.", + "bbox": [ + 496, + 498, + 890, + 588 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To address this drawback, one can try to prevent changing predictions of other labels $(\\bar{\\Omega}_{\\pmb{x}})$ , which is the complement of $\\Omega_{\\pmb{x}}$ with respect to $\\mathcal{C}$ by crafting the attack while including a loss that enforces predictions of other labels to stay intact. More precisely, one can solve", + "bbox": [ + 496, + 589, + 890, + 662 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\text {M L A - C :} \\min _ {\\boldsymbol {e}} - \\mathcal {L} _ {b c e} (\\boldsymbol {x} + \\boldsymbol {e}, \\Omega_ {\\boldsymbol {x}}) + \\mathcal {L} _ {b c e} (\\boldsymbol {x} + \\boldsymbol {e}, \\bar {\\Omega} _ {\\boldsymbol {x}}), \\tag {3} \\\\ \\begin{array}{l} \\text {s . t .} \\| e \\| _ {p} \\leq \\epsilon , \\end{array} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 519, + 672, + 890, + 710 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where the first term in the objective function tries to flip the labels in $\\Omega_{x}$ while the second term preserves the labels in $\\bar{\\Omega}_{x}$ . Notice that with the additional objective, the space of perturbations in (3) is smaller than that in (1), yet it ensures not modifying labels outside the target set. However, as we verify by empirical results, the gradient of the loss for fixing other labels often is highly negatively correlated with the gradient of the loss on the target labels, hence, counteracting the effect of each other. We hypothesize that this effect is due to strong spurious correlations among labels, learnt by the model during training. Given two highly-correlated labels in an image, attacking one label while fixing the other", + "bbox": [ + 496, + 719, + 890, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "24253", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "using (3) would lead to opposite gradients. This problem gets more pronounced when the number of labels increases (e.g., in Open Images dataset [40] with 9,600 labels) and the gradient of this additional loss gets larger too. Moreover, fixing predictions for labels in $\\bar{\\Omega}_{\\pmb{x}}$ still may lead to semantic inconsistencies in predictions (e.g., turning off 'vehicle' requires turning off 'car' and 'truck', otherwise 'vehicle' being off while 'car' being on can be used to detect the attack), hence, reducing the attack effectiveness.", + "bbox": [ + 75, + 90, + 472, + 227 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4. Generalized Multi-Label Attack (GMLA)", + "text_level": 1, + "bbox": [ + 76, + 238, + 449, + 255 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We develop a framework for crafting adversarial attacks for MLL that addresses the challenges of conventional MLA, discussed above. First, to obtain an attack on a target label set $\\Omega_{x}$ that leads to semantically consistent predictions across all labels, we find a minimal superset of the target set $\\Psi_{x}$ (referred to as consistent target set) that needs to be attacked/modified. Given that there are often multiple such superset, we develop an efficient search algorithm over a knowledge graph $\\mathcal{G}$ that encodes label dependencies. We denote by $\\Psi_{x} = h\\bigl (\\Omega_{x},\\mathcal{G}\\bigr)$ the output of the search algorithm, which we will describe in detail later in this section.", + "bbox": [ + 75, + 263, + 472, + 430 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.1. Proposed Optimization", + "text_level": 1, + "bbox": [ + 76, + 436, + 290, + 454 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We then study a projection-based optimization that searches for an attack that modifies the predictions of labels in $\\Psi_{\\pmb{x}}$ while ensuring that other labels $\\bar{\\Psi}_{\\pmb{x}}$ will not get affected. More specifically, we propose to solve", + "bbox": [ + 75, + 460, + 468, + 521 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\text {G M L A :} \\min _ {\\boldsymbol {e}} - \\mathcal {L} _ {b c e} (\\boldsymbol {x} + \\boldsymbol {e}, \\Psi_ {\\boldsymbol {x}}), \\\\ \\text {s . t .} \\mathcal {L} _ {b c e} (\\boldsymbol {x} + \\boldsymbol {e}, \\bar {\\Psi} _ {\\boldsymbol {x}}) = \\mathcal {L} _ {b c e} (\\boldsymbol {x}, \\bar {\\Psi} _ {\\boldsymbol {x}}), \\tag {4} \\\\ \\| \\boldsymbol {e} \\| _ {p} \\leq \\epsilon , \\Psi_ {\\boldsymbol {x}} = h (\\Omega_ {\\boldsymbol {x}}, \\mathcal {G}), \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 98, + 527, + 468, + 590 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where we only minimize the attack loss on the consistent target set $\\Psi_{\\mathbf{x}}$ , while requiring that the binary cross-entropy loss on other labels $\\bar{\\Psi}_{\\mathbf{x}}$ stay the same after the attack. This means that instead of trying to make the predictions on other labels more confident as in (3), we try to keep them stay the same after the attack. As we also show in the experiments (see Figure 8), this significantly boosts the attack by resolving the high negative correlation of the gradients of the two losses in (3) and finding better attack directions.", + "bbox": [ + 75, + 594, + 468, + 729 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Since solving the optimization in (4) that ensures the first constraint is satisfied is difficult, we take a first-order approximation on this constraint around $x$ (as $e$ is small),", + "bbox": [ + 75, + 729, + 468, + 776 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {L} _ {b c e} (\\boldsymbol {x} + \\boldsymbol {e}, \\bar {\\Psi} _ {\\boldsymbol {x}}) \\approx \\mathcal {L} _ {b c e} (\\boldsymbol {x}, \\bar {\\Psi} _ {\\boldsymbol {x}}) + \\boldsymbol {g} _ {\\boldsymbol {x}, \\bar {\\Psi} _ {\\boldsymbol {x}}} ^ {\\top} \\boldsymbol {e}, \\\\ \\text {w h e r e ,} \\quad \\boldsymbol {g} _ {\\boldsymbol {x}, \\bar {\\Psi} _ {\\boldsymbol {x}}} \\triangleq \\frac {\\partial \\mathcal {L} _ {b c e} (\\boldsymbol {x} , \\bar {\\Psi} _ {\\boldsymbol {x}})}{\\partial \\boldsymbol {x}}. \\end{array} \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 125, + 780, + 468, + 835 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Thus, we can rewrite (4) as", + "bbox": [ + 76, + 839, + 261, + 853 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\min _ {\\boldsymbol {e}} - \\mathcal {L} _ {b c e} (\\boldsymbol {x} + \\boldsymbol {e}, \\Psi_ {\\boldsymbol {x}}), \\tag {6} \\\\ \\begin{array}{l} \\text {s . t .} \\quad \\boldsymbol {g} _ {\\boldsymbol {x}, \\bar {\\Psi} _ {\\boldsymbol {x}}} ^ {\\top} \\boldsymbol {e} = \\mathbf {0}, \\| \\boldsymbol {e} \\| _ {p} \\leq \\epsilon , \\Psi_ {\\boldsymbol {x}} = h \\big (\\Omega_ {\\boldsymbol {x}}, \\mathcal {G} \\big). \\end{array} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 99, + 859, + 468, + 904 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The constraint $g_{x,\\bar{\\Psi}_x}^\\top e = 0$ implies that $e$ must be in the orthogonal space to the gradient direction $g_{x,\\bar{\\Psi}_x}$ , hence not changing other labels. Thus, we can write", + "bbox": [ + 498, + 90, + 890, + 136 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {e} = \\boldsymbol {P} _ {\\boldsymbol {x}, \\bar {\\Psi} _ {\\boldsymbol {x}}} \\alpha , \\quad \\boldsymbol {P} _ {\\boldsymbol {x}, \\bar {\\Psi} _ {\\boldsymbol {x}}} \\triangleq \\boldsymbol {I} - \\frac {\\boldsymbol {g} _ {\\boldsymbol {x} , \\bar {\\Psi} _ {\\boldsymbol {x}}} \\boldsymbol {g} _ {\\boldsymbol {x} , \\bar {\\Psi} _ {\\boldsymbol {x}}} ^ {\\top}}{\\| \\boldsymbol {g} _ {\\boldsymbol {x} , \\bar {\\Psi} _ {\\boldsymbol {x}}} \\| _ {2} ^ {2}}, \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 540, + 140, + 890, + 179 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "for some $\\alpha \\in \\mathbb{R}^d$ , where $P_{x,\\bar{\\Psi}_x}$ is the orthogonal projection matrix on the gradient $g_{x,\\bar{\\Psi}_x}$ . Thus, we can write the optimization in (4) as", + "bbox": [ + 498, + 190, + 890, + 236 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\text {G M L A :} \\min _ {\\boldsymbol {\\alpha}} - \\mathcal {L} _ {b c e} (\\boldsymbol {x} + \\boldsymbol {P} _ {\\boldsymbol {x}, \\bar {\\Psi} _ {\\boldsymbol {x}}} \\boldsymbol {\\alpha}, \\Psi_ {\\boldsymbol {x}}), \\tag {8} \\\\ \\begin{array}{l} \\text {s . t .} \\| P _ {\\boldsymbol {x}, \\bar {\\Psi} _ {\\boldsymbol {x}}} \\boldsymbol {\\alpha} \\| _ {p} \\leq \\epsilon , \\quad \\Psi_ {\\boldsymbol {x}} = h (\\Omega_ {\\boldsymbol {x}}, \\mathcal {G}). \\end{array} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 513, + 247, + 890, + 290 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We follow AutoPGD [17] to iteratively solve (8). At each iteration, we linearly approximate the objective function and solve $(\\pmb{g}_{\\pmb{x},\\Psi_{\\pmb{x}}}$ is the gradient of $\\mathcal{L}_{bce}(\\pmb {x},\\Psi_{\\pmb{x}}))$", + "bbox": [ + 498, + 299, + 890, + 347 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\min _ {\\boldsymbol {\\alpha}} - \\mathbf {g} _ {\\boldsymbol {x}, \\Psi_ {\\boldsymbol {x}}} ^ {\\top} \\left(\\boldsymbol {P} _ {\\boldsymbol {x}, \\bar {\\Psi} _ {\\boldsymbol {x}}} \\boldsymbol {\\alpha}\\right), \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 555, + 357, + 890, + 386 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\text {s . t .} \\| P _ {\\boldsymbol {x}, \\bar {\\Psi} _ {\\boldsymbol {x}}} \\boldsymbol {\\alpha} \\| _ {p} \\leq \\epsilon , \\quad \\Psi_ {\\boldsymbol {x}} = h (\\Omega_ {\\boldsymbol {x}}, \\mathcal {G}). \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 560, + 383, + 836, + 402 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "As we show in the supplementary materials, we can solve (9) for $p = \\infty$ and get the closed form update for $e$ as", + "bbox": [ + 500, + 412, + 890, + 441 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\ne = \\epsilon \\cdot \\frac {\\boldsymbol {P} _ {\\boldsymbol {x} , \\bar {\\Psi} _ {\\boldsymbol {x}}} \\boldsymbol {\\nu}}{\\| \\boldsymbol {P} _ {\\boldsymbol {x} , \\bar {\\Psi} _ {\\boldsymbol {x}}} \\boldsymbol {\\nu} \\| _ {\\infty}}, \\quad \\boldsymbol {\\nu} \\triangleq \\operatorname {s g n} \\left(\\boldsymbol {g} _ {\\boldsymbol {x}, \\Psi_ {\\boldsymbol {x}}}\\right). \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 526, + 452, + 890, + 488 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We further enhance the effectiveness of the attack, especially for the case when the gradients of both the targeted and non-targeted classes are aligned (have positive correlation). In such instances, our approach involves finding the direction $e$ using", + "bbox": [ + 498, + 497, + 890, + 571 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\min _ {e} e ^ {T} \\left(- \\frac {\\mathbf {g} _ {\\mathbf {x} , \\Psi_ {\\mathbf {x}}}}{\\| \\mathbf {g} _ {\\mathbf {x} , \\Psi_ {\\mathbf {x}}} \\| _ {2}} + \\frac {\\mathbf {g} _ {\\mathbf {x} , \\bar {\\Psi} _ {\\mathbf {x}}}}{\\| \\mathbf {g} _ {\\mathbf {x} , \\bar {\\Psi} _ {\\mathbf {x}}} \\| _ {2}}\\right) \\text {s . t .} \\| e \\| _ {p} \\leq \\epsilon . \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 506, + 583, + 890, + 616 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We provide more details and analysis in the supplementary.", + "bbox": [ + 500, + 626, + 890, + 641 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.2. Consistent Target Set via Knowledge Graph", + "text_level": 1, + "bbox": [ + 498, + 650, + 872, + 667 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We obtain a consistent target set by developing an efficient search algorithm over a knowledge graph $\\mathcal{G}$ that encodes label dependencies. Assume $\\mathcal{G} = (\\mathcal{C},\\mathcal{E})$ is a directed acyclic knowledge graph built on the labels $\\mathcal{C}$ , where $\\mathcal{E}$ denotes the set of edges (see below for details about building this graph). A consistent target set $\\Psi_{x}$ is defined as a superset of the target nodes/labels $\\Omega_{x}$ that if attacked successfully leads to MLL outputs so that $i)$ when MLL predicts 1 for a parent node/label, then at least one of its children is also predicted as 1; $ii)$ when all children of a node/label are predicted as 0, then the parent is predicted as 0.", + "bbox": [ + 496, + 674, + 890, + 839 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Algorithm 1 shows our algorithm and the time complexity for each step to obtain the consistent target set. The algorithm works as follows. Given the target set $\\Omega_{x}$ , MLL predictions $\\mathcal{S}$ , and the adjacency matrix $\\mathcal{E}$ of the knowledge", + "bbox": [ + 496, + 840, + 890, + 901 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "24254", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "graph, the algorithm finds the minimal superset of $\\Omega_{\\mathbf{x}}$ to be modified. While attacking a label, we need to maintain its consistency with respect to its children and parents. To maintain children consistency, each child of the target node must be turned OFF unless that child has multiple parents ON. We parse the path from target node to the leaf nodes and perform the same operation on every node. Similarly, to maintain parents consistency, all parents must be turned OFF unless some parent has more than one child ON. We perform this process for each node along the path from target node to the root until there are no more nodes to modify. The upper bound of algorithm's time complexity is $\\mathcal{O}(\\Omega \\mathcal{C})$ . As Figure 4 shows, on the same graph, consistent target sets depend on the MLL predictions.", + "bbox": [ + 75, + 90, + 472, + 303 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Knowledge Graph Construction. To construct $\\mathcal{G}$ , we use WordNet [54], which contains rich semantic relationships between labels $^2$ . One can also use other sources, such as ConceptNet [72] or OpenImages semantic hierarchy [40]. We build a tree $\\mathcal{G} = (\\mathcal{C}, \\mathcal{E})$ on all labels $\\mathcal{C}$ using hypernym and hyponym relations of labels. This can also be easily extended to other relationships e.g., antonymy, entailment, etc. For each label in $\\mathcal{C}$ , we use WordNet to extract its parent and child labels (e.g., for 'car', we obtain 'vehicle' as parent using its hybernyms). Since a word can be associated with several synsets, we choose the synset with the closest match to the label description. To build the tree, we use the maximum WUP similarity [80] between a child and multiple parent nodes to select a single parent.", + "bbox": [ + 75, + 310, + 472, + 522 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5. Experiments", + "text_level": 1, + "bbox": [ + 76, + 536, + 209, + 553 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5.1. Experimental Setup", + "text_level": 1, + "bbox": [ + 76, + 561, + 266, + 578 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Datasets. We use Pascal-VOC [24], NUS-WIDE [16] and OpenImages [40] for studying the effectiveness of multi-label attacks. For Pascal-VOC, we trained each MLL model on 8,000 images from the training sets of PASCAL-VOC 2007 and PASCAL-VOC 2012 and created the adversarial examples for the test set of PASCAL-VOC 2007. To build $\\mathcal{G}$ , we extracted abstract classes from WordNet using which and the original 20 labels, we obtained 35 labels/nodes. For NUS-WIDE, we trained each MLL model on 150K images from the training set and attacked the models using the test set of the dataset. We used Wordnet to extract abstract classes and built a tree on labels. The total number of labels are 116, which includes 80 original labels and 36 additional abstract classes from WordNet. For OpenImages, we used pre-trained model from [64] and used test images to generate the attacks. We use the official class hierarchy provided in OpenImages as semantic relationship information.", + "bbox": [ + 75, + 584, + 468, + 843 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Multi-Label Recognition Models. We investigate the ef", + "bbox": [ + 76, + 849, + 467, + 864 + ], + "page_idx": 4 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 1: Consistent Target Set Construction" + ], + "code_body": "Input: $\\Omega$ : Target Set, $S$ : MLL Label Predictions, $\\mathcal{E}$ : Knowledge Graph's Adjacency Matrix Output: $\\Gamma$ Expanded Target Set Procedure: $f_{select}(X)$ : return $\\{i:X_i = True\\}$ Procedure $f_{child.}(n,\\mathcal{E},\\mathcal{S})$ .. return $f_{select}(\\mathcal{E}_{[n,:]}\\odot \\mathcal{S} == 1)$ Procedure $f_{par.}(n,\\mathcal{E},\\mathcal{S})$ .. return $f_{select}(\\mathcal{E}_{[:n]}\\odot \\mathcal{S} == 1)$ \nProcedure Consistent_Comp $(n,V,\\Gamma ,f_1,f_2)$ . Queue Q I $\\leftarrow f_1(n,\\mathcal{E},\\mathcal{S})$ $\\triangleright \\mathcal{O}(1)$ Q.enqueue(I) $\\triangleright \\mathcal{O}(1)$ while $\\mathcal{Q}$ is not empty do $\\triangleright \\mathcal{O}(\\mathcal{C})$ $v_{n} = \\mathcal{Q}.dequeue()$ if $v_{n}\\notin \\mathcal{V}$ then $\\nu \\gets \\nu \\cup \\{v_n\\}$ $\\triangleright \\mathcal{O}(1)$ $I\\gets f_2(v_n,\\mathcal{E},\\mathcal{S})\\backslash \\Gamma$ if $|I| < 2$ then $\\Gamma \\leftarrow \\Gamma \\cup \\{v_n\\}$ $\\triangleright \\mathcal{O}(1)$ $I\\gets f_1(v_n,\\mathcal{E},\\mathcal{S})$ Q.enqueue(I) \n $\\Gamma = \\{\\}$ \nforeach $n\\in \\Omega$ do $\\triangleright \\mathcal{O}(\\Omega)$ V = {n} \n $\\Gamma \\leftarrow$ Consistent_Comp(n,V,Γ,fchild., $f_{par.})$ $\\triangleright \\mathcal{O}(\\mathcal{C})$ $\\Gamma \\leftarrow$ Consistent_Comp(n,V,Γ,fpar., $f_{child.})$ $\\triangleright \\mathcal{O}(\\mathcal{C})$", + "bbox": [ + 500, + 111, + 890, + 434 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/493e62d6104bbb3d867c5856ea4fe00074275e03170d6275985f5b07d42b3833.jpg", + "image_caption": [ + "Figure 4. Examples of different consistent target sets obtained by Algorithm 1. Green nodes show the present labels predicted by the MLL and $\\Omega = \\{t\\}$ is the target. The labels to be modified, $\\Psi$ are shown within the red region and the labels to be fixed $\\bar{\\Psi}$ are shown within the green region." + ], + "image_footnote": [], + "bbox": [ + 511, + 449, + 879, + 539 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "fectiveness of multi-label attacks on three MLL models.", + "bbox": [ + 498, + 611, + 870, + 626 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- ML-GCN [15]: It explicitly learns relationships among labels using Graph Convolutional Networks (GCN). It builds a graph using the word embeddings and the cooccurrence matrix of labels and uses a GCN to extract information about label relationships. We trained the model using the binary cross-entropy loss.", + "- Asymmetric Loss (ASL) [64]: It is an effective multi-label learning method that uses a novel loss for better optimization over highly imbalanced positive and negative class distributions. Following their experimental setting, we trained the TResNet-L [63] backbone.", + "- ML-Decoder [65]: It is an attention-based unified decoder architecture for zero-shot, single-label, and multi-label classification. It uses a group-decoding scheme to alleviate the problem of scaling to large number of classes." + ], + "bbox": [ + 496, + 630, + 890, + 864 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Perturbation Generation. For PASCAL-VOC and NUS-WIDE, we show results on a range of perturbation budgets.", + "bbox": [ + 500, + 869, + 890, + 901 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "2WordNet is a lexical database for the English language, containing 155,327 words organized in 175,979 synsets.", + "bbox": [ + 76, + 875, + 468, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "24255", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/a766f3d13113a029f1ff315e8ec21514cfe74780e4f57afb984469b3a7ae8182.jpg", + "image_caption": [ + "Figure 5. Naive fooling rate $(\\mathrm{FR}_N)$ and graph-based fooling rate $(\\mathrm{FR}_S)$ of different attacks on ML-GCN model, trained on PASCAL-VOC for one and two label/node attacks. The x-axis shows the upper bound on the $l_{\\infty}$ -norm of perturbations $(\\epsilon)$ ." + ], + "image_footnote": [], + "bbox": [ + 117, + 89, + 297, + 212 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/e1d49b422ba75be5eb73d3a2b25c250a34ba77532aea66c9e6c041fe11b9a9ca.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 302, + 90, + 480, + 212 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/1f9e5078065947deb5521b92330279779a76f8db0c34f4b06724f4fe3c6ab520.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 486, + 90, + 663, + 212 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/b7eff2bc4297de6a49667ff653d328d72bcfb2ee6f694a4fd1e7e731acacb9a4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 668, + 90, + 846, + 212 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/aa60567be53d1241dba89384f11135bed19b7234b00317ff37d72e4439351929.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
DatasetPASCAL-VOCNUS-WIDE
Target Set Size|Ω| = 1|Ω| = 2|Ω| = 1|Ω| = 2
ModelAttack↑FRN↑FRS↓NTR↑SSIM↑FRN↑FRS↓NTR↑SSIM↑FRN↑FRS↓NTR↑SSIM↑FRN↑FRS↓NTR↑SSIM
ML-GEN [15]MLA-U [71]100.075.54.90.97100.068.04.50.9799.743.51.50.9699.331.71.60.96
MLA-C [71]99.968.93.00.9699.860.22.80.9796.427.40.40.9792.418.50.40.97
MLA-LP [65]56.16.700.10.9946.76.000.30.9919.33.500.10.9811.43.300.00.98
GMLA (Ours)100.099.42.70.97100.098.42.50.9899.295.80.50.9799.191.30.40.97
ASL [64]MLA-U [71]100.052.84.60.97100.048.34.80.98100.050.02.00.97100.043.32.10.97
MLA-C [71]100.039.72.30.9799.733.22.10.98100.035.50.70.97100.030.00.70.96
MLA-LP [65]15.82.400.10.9911.92.900.50.9920.84.800.00.9816.13.100.00.98
GMLA (Ours)100.098.82.20.97100.098.82.00.98100.096.10.80.97100.093.20.70.97
ML-Dec [65]MLA-U [71]99.766.25.30.9799.862.05.70.9898.856.44.10.9797.950.44.60.98
MLA-C [71]99.150.62.70.9897.540.72.40.9773.630.41.00.9768.226.70.90.97
MLA-LP [65]19.43.700.10.9817.63.200.20.9813.34.100.00.979.72.900.00.98
GMLA (Ours)99.196.22.70.9899.397.12.50.9795.184.91.10.9793.982.01.00.98
", + "bbox": [ + 84, + 253, + 888, + 460 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 1. Experimental evaluation of the four attack methods on three models for $\\epsilon = 0.01$ . The values represent the mean computed using the attack performance across all the combinations of target classes of size $|\\Omega|$ .", + "bbox": [ + 75, + 464, + 892, + 489 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "For OpenImages with 9,600 labels, we perform experiments for large-scale attacks with different sizes of the target set for a fixed epsilon value. To generate the target sets for attack, we randomly draw 100 samples of size $k$ labels. For each draw from OpenImages, we randomly sample $k / 2$ leaf nodes (labels) from the graph $\\mathcal{G}$ and sample the remaining labels which are not part of the graph.", + "bbox": [ + 75, + 516, + 468, + 622 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Baselines. We use MLA-U and MLA-C as baselines, following Song et al. [71]. Additionally, we use MLA-LP [65] as a baseline, which generates adversarial perturbation for multi-label recognition by solving a linear programming problem using the interior point method while minimizing the $l_{\\infty}$ norm. In contrast to other methods, it requires computing the Jacobian at each optimization step. In our experiments, MLA-LP did not converge for OpenImages. To provide a comprehensive comparison, we extend our evaluation to ML-DP [71], a greedy algorithm that computes multi-label attack perturbations using constraint linearization as introduced in DeepFool [55]. We show the results for ML-DP in supplementary material.", + "bbox": [ + 75, + 632, + 468, + 830 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Evaluation Metrics. Let $\\mathcal{I}$ be the set of images that are attacked and $\\mathcal{A} \\subseteq \\mathcal{I}$ denote the set of images that are successfully attacked, i.e., for $x \\in \\mathcal{A}$ , all labels in $\\Omega_x$ change after the attack. Let $\\mathcal{A}_{\\mathcal{G}} \\subseteq \\mathcal{A}$ denote the subset of $\\mathcal{A}$ for", + "bbox": [ + 75, + 839, + 468, + 902 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "which the attack produces semantically consistent predictions in the output of MLL according to $\\mathcal{G}$", + "bbox": [ + 496, + 516, + 888, + 546 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We define naive fooling rate, $FR_{N}$ and semantic-based fooling rate, $FR_{S}$ , as", + "bbox": [ + 496, + 547, + 890, + 577 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nF R _ {N} = \\frac {| \\mathcal {A} |}{| \\mathcal {I} |}, F R _ {S} = \\frac {| \\mathcal {A} _ {\\mathcal {G}} |}{| \\mathcal {I} |}. \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 601, + 583, + 890, + 613 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Thus, $FR_{N}$ measures fraction of attacked images whose attacks has been successful, without considering whether the MLL predictions are semantically consistent. On the other hand, $FR_{S}$ captures fraction of attacked images whose attacks have been successful and produced semantically consistent MLL predictions. We also define non-target flip rate, $NT_{R}$ , which is the percentage of semantically unrelated labels (labels in $\\bar{\\Psi}_{k}$ ) which were flipped by the attack, i.e.,", + "bbox": [ + 496, + 619, + 890, + 741 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nN T _ {R} = \\frac {1}{| \\mathcal {A} |} \\sum_ {k \\in \\mathcal {A}} \\frac {\\sum_ {i \\in \\bar {\\Psi} _ {k}} \\left(1 - \\delta \\left(f _ {i} ^ {(k)} , y _ {i} ^ {(k)}\\right)\\right)}{| \\bar {\\Psi} _ {k} |}, \\tag {13}\n$$\n", + "text_format": "latex", + "bbox": [ + 553, + 750, + 890, + 789 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where, $\\delta$ is Kronecker delta function that equals 1 when the two inputs are equal and 0 otherwise, $y_{i}^{(k)},f_{i}^{(k)}\\in \\{0,1\\}$ are the model predictions on clean and adversarial images respectively, of $i^{th}$ non-target class of $k^{th}$ successfully attacked image. Finally, we measure the imperceptibility of the perturbations using average structural similarity (SSIM) between pairs of original and adversarial images.", + "bbox": [ + 496, + 792, + 890, + 901 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "24256", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/32ef6eeb8e4b544a25824f5f8631165aa5bc7b21672169b575d3abea9dde8c17.jpg", + "image_caption": [ + "Figure 6. Performance of different multi-label attacks with fixed $\\epsilon = 0.05$ on OpenImages as we increase the target set size." + ], + "image_footnote": [], + "bbox": [ + 84, + 90, + 269, + 228 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/6dd053c946d34acba1843bbb6779842a7f3b9d734715ca6194ca6bb8c7faadfe.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 274, + 90, + 455, + 228 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Note that $FR_{N}, FR_{S}$ , and $SSIM$ should be high while $NT_{R}$ should be low for a good attack method.", + "bbox": [ + 76, + 284, + 467, + 315 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.2. Experimental Results", + "text_level": 1, + "bbox": [ + 76, + 325, + 277, + 340 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Figure 5 shows the performance of different attack methods on PASCAL-VOC for one- and two-node attacks for different epsilon values using ML-GCN classifier. In Table 1, we show the evaluation across the three MLL models for a fixed $\\epsilon = 0.01$ for which the performance of all attacks has plateaued3. We also show the evaluation on OpenImages for different target sizes in Fig. 6 and Tab. 2. From the results, we make the following conclusions:", + "bbox": [ + 75, + 349, + 467, + 470 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "- As Fig. 5 shows, all methods achieve high naive fooling rate $FR_{N}$ given large enough perturbation budget, yet once we filter out the attacks leading to semantically inconsistent predictions, the performance $(FR_{S})$ of all baselines significantly decreases. However, our GMLA achieves very high semantic-based fooling rate than baselines. From Tab. 1 and 2, our method achieves naive fooling rate $FR_{N}$ comparable to the other methods but outperforms them over $FR_{S}$ by a significant margin.", + "bbox": [ + 75, + 474, + 467, + 609 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "- Notice from Fig. 5 and 6 that MLA-U has higher naive and semantic-based fooling rates than MLA-C. The reason is the strong positive correlations learned among related cooccurring labels during model training, which MLA-U implicitly exploits. However, MLA-U being oblivious to the relationships among labels can inevitably affect unrelated labels, as shown in Tab. 1 and 2. This explains why MLA-U has the highest $N T_{R}$ across different settings. The difference becomes more apparent as we move to attack larger datasets e.g. OpenImages. This is because, a larger number of labels increases the chances of learning spurious correlations among unrelated labels.", + "bbox": [ + 75, + 614, + 467, + 796 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "- Based on Fig. 5, MLA-LP achieves lowest performance compared to other attack methods for both fooling rates on PASCAL-VOC and NUS-WIDE datasets, and does not converge for OpenImages experiments. This is because MLA-LP uses interior point method at each iteration to solve a", + "bbox": [ + 75, + 800, + 467, + 875 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/d0423635a6e5826d15830a97174e14d50ccda97393cd19844b098cb255bb2056.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Attack|Ω|=1|Ω|=2|Ω|=3|Ω|=4|Ω|=5
MLA-U0.47 ± 0.020.57 ± 0.030.66 ± 0.030.75 ± 0.040.87 ± 0.03
MLA-C0.32 ± 0.090.31 ± 0.090.09 ± 0.070.06 ± 0.040.0 ± 0.0
GMLA (Ours)0.32 ± 0.140.16 ± 0.120.21 ± 0.130.11 ± 0.070.06 ± 0.04
", + "bbox": [ + 504, + 89, + 883, + 143 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 2. Percentage of semantically unrelated labels $(NT_R)$ affected at $\\epsilon = 0.05$ for ASL[64] on OpenImages.", + "bbox": [ + 500, + 148, + 890, + 174 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/6c9bf462a4ff3740658d5cf5d127a6069bae81a6e3fab69cd1b1a2f29d732a36.jpg", + "image_caption": [ + "Figure 7. Transferability across models on PASCAL-VOC. The y-axis shows the source model which generates the perturbation and x-axis shows the target model evaluated on that perturbation." + ], + "image_footnote": [], + "bbox": [ + 506, + 191, + 880, + 349 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "system of equations, which define the constraints on the target and non-target labels. Because of the complex relationships among different labels, the feasible region for the given linear problem might be empty. This has also been identified by [96]. When the LP problem has a feasible solution, MLA-LP successfully finds the perturbation that satisfy the attack constraints. This explains why, for the small number of successfully attacked images, MLA-LP affects the least percentage of non-targeted labels, achieving low $\\mathrm{NT}_R$ as shown in Tab. 1.", + "bbox": [ + 496, + 412, + 890, + 561 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "- Each attack method produces imperceptible perturbations, as we constrain the maximum infinity norm of the perturbation to 0.01 (on images with pixel values between 0 to 1). Notice also from Table 1 that the average SSIM scores between the adversarial and original images is very close to 1, showing imperceptibility of perturbations.", + "bbox": [ + 496, + 568, + 890, + 659 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "- Notice from Fig. 6 that MLA-C fails to successfully attack large-scale datasets and its performance drops drastically as we increase the target set size. As mentioned earlier, this is attributed to the observation that gradients of target and non-targeted classes are often opposite (as shown in Fig. 8) and as MLA-C optimizes the target and non-target loss simultaneously, the resulting perturbations are sub-optimal. From Tab. 2, MLA-C achieves lowest $\\mathrm{NT}_R$ for target sizes greater than 2 but also performs poorly on fooling rates. Note that despite achieving high fooling rates $\\mathrm{FR}_N$ and $\\mathrm{FR}_G$ , our GMLA method affects very small percentage of semantically unrelated labels, which shows the success of our constraint proposed in (6).", + "bbox": [ + 496, + 664, + 890, + 859 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Attack Transferability. Figure 7 shows the cross-model transferability of different attacks. For each source model,", + "bbox": [ + 498, + 869, + 890, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_footnote", + "text": "3We show results of ablation experiment on GMLA in supplementary.", + "bbox": [ + 94, + 886, + 465, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "24257", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/cf4f16901dbae9f6a2bf53647b471bfe5733bb9aa4dc2b1fd0440a56da6f002d.jpg", + "image_caption": [ + "Figure 8. Stacked bar charts showing the correlation between the gradient of the loss on target labels $g_{\\boldsymbol{x}, \\Psi_{\\boldsymbol{x}}}$ and on other labels $g_{\\boldsymbol{x}, \\Psi_{\\boldsymbol{x}}}$ for different sizes of the target set on OpenImages. Left: Using (3) as objective. Right: using our proposed (6) that optimizes the loss on target labels while keeping the loss on non-target labels the same (as a constraint)." + ], + "image_footnote": [], + "bbox": [ + 84, + 87, + 267, + 190 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/809578826816b7b0766edb079e1ba6eff1f7f675c241b895830aa9e5f59b8d7a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 276, + 88, + 455, + 190 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/a2058a7676f0f1da527cdc506401ccaeaa97740fbe2f35c5679231a0eda71ac2.jpg", + "image_caption": [ + "Figure 9. Results of attacking ML-GCN on PASCAL-VOC (first two columns) and NUS-WIDE (last two columns). Each column shows the model predictions for clean $(\\epsilon = 0)$ and attacked images. Rounded rectangles group semantically related labels. Inconsistent predictions caused around target labels are shown with red rectangles. The red labels at the top are targeted labels and the arrows show the relationships." + ], + "image_footnote": [], + "bbox": [ + 86, + 272, + 460, + 417 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "we compute the perturbations (scaled to $\\epsilon = 0.1$ ) for images and evaluate the target models exclusively on the images that were successfully attacked by the respective source model (hence the diagonal values are all 1). Notice that although all attacks, other than MLA-LP, are transferable, GMLA semantic attack transfers better and achieves the highest $\\mathrm{FR}_N$ and $\\mathrm{FR}_S$ . From Table 1, notice that all attacks were able to achieve non-trivial graph-based fooling rate. However, GMLA is the most effective method to generate semantically consistent and generally transferrable attacks.", + "bbox": [ + 75, + 523, + 468, + 676 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Gradient Correlations. Figure 8 shows the correlation between the gradient of the loss on target labels (to be modified), $g_{x,\\Psi_x}$ , and on other labels (to be fixed), $g_{x,\\bar{\\Psi}_x}$ , for different sizes of the target set on OpenImages. Notice that adding the two losses leads to highly negatively correlated gradients for them. However, only optimizing the loss on target labels while keeping the loss on non-target labels the same (as a constraint) leads to significant increase in gradient correlations, which can justify the success of GMLA.", + "bbox": [ + 75, + 681, + 468, + 819 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Qualitative Results. Figure 9 shows qualitative results of attacking ML-GCN using PASCAL-VOC and NUS-WIDE. Notice that in all four cases, respectively, MLA-U and MLA-C lead to inconsistencies. For example, to turn off the boat label in the first image, MLA-U attacks the boat and", + "bbox": [ + 75, + 824, + 470, + 902 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/9dc3923be7314c7ad03040db56f9d9cdbf2019286621f15d0504a561eba448eb.jpg", + "image_caption": [ + "Figure 10. Since the adversarial images have imperceptible changes, we visualize the perturbations computed using different methods for various target classes of PASCAL-VOC. The perturbations are computed by setting the maximum budget $\\epsilon = 0.01$ and are scaled for visualization. For each perturbation, we compute it's dot product (D) with the perturbation computed using our proposed attack - GMLA, and the structural similarity (S) of the original and the adversarial image (after adding the perturbation)." + ], + "image_footnote": [], + "bbox": [ + 506, + 88, + 883, + 305 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "craft labels but does not attack the vehicle label, leading to semantically inconsistent prediction. MLA-C successfully attacks boat, but keeps all other labels fixed, causing inconsistent predictions. For the second image, MLA-U successfully kept consistency around one group of labels but causes inconsistency in the other group. Similar to MLA-C, MLA-LP causes semantic inconsistencies for all images. Notice that in all cases, GMLA successfully modifies the necessary labels to ensure semantic consistency.", + "bbox": [ + 496, + 416, + 890, + 551 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In Figure 10, we visualize the perturbations computed by different methods and compare the SSIM (S) of baselines with GMLA. We also show the dot product (D) between the perturbation computed using each baseline method and the one computed using GMLA. We can see that GMLA finds different attack directions than the baseline methods, which results in semantically consistent and transferable attacks.", + "bbox": [ + 496, + 553, + 890, + 659 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6. Conclusions", + "text_level": 1, + "bbox": [ + 500, + 674, + 627, + 690 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We developed an efficient framework to generate attacks for multi-label recognition that ensures semantic consistency of the output labels based on relationships among labels while effectively attacking a large number of labels. By extensive experiments on three datasets and several MLL models, we showed that our method generates both semantically consistent and successful adversarial attacks.", + "bbox": [ + 496, + 699, + 890, + 806 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgements", + "text_level": 1, + "bbox": [ + 500, + 821, + 668, + 838 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This work is sponsored by Khoury College of Northeastern funds, DARPA (HR00112220001), NSF (IIS-2115110), ARO (W911NF2110276). Content does not necessarily reflect the position/policy of the Government.", + "bbox": [ + 496, + 839, + 890, + 901 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "24258", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 174, + 106 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Abhishek Aich, Calvin-Khang Ta, Akash Gupta, Chengyu Song, Srikanth Krishnamurthy, Salman Asif, and Amit Roy-Chowdhury. Gama: Generative adversarial multi-object scene attacks. Advances in Neural Information Processing Systems, 35:36914-36930, 2022. 1, 3", + "[2] Abhishek Aich, Shasha Li, Chengyu Song, M Salman Asif, Srikanth V Krishnamurthy, and Amit K Roy-Chowdhury. Leveraging local patch differences in multi-object scenes for generative adversarial attacks. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 1308-1318, 2023. 1, 3", + "[3] N. Akhtar and A. Mian. Threat of adversarial attacks on deep learning in computer vision: A survey. arXiv, 2018. 2", + "[4] A. Athalye, N. Carlini, and D. A. Wagner. Obfuscated gradients give a false sense of security: Circumventing defenses to adversarial examples. 2018. 2", + "[5] Yuanhao Ban and Yinpeng Dong. Pre-trained adversarial perturbations. In Advances in Neural Information Processing Systems, pages 1196-1209. Curran Associates, Inc., 2022. 2", + "[6] Emanuel Ben-Baruch, Tal Ridnik, Itamar Friedman, Avi Ben-Cohen, Nadav Zamir, Asaf Noy, and Lihi Zelnik-Manor. Multi-label classification with partial annotations using class-aware selective loss. 2022. 2", + "[7] Zikui Cai, Xinxin Xie, Shasha Li, Mingjun Yin, Chengyu Song, Srikanth V. Krishnamurthy, Amit K. Roy-Chowdhury, and M. Salman Asif. Context-aware transfer attacks for object detection. ArXiv, 2021. 3", + "[8]Zikui Cai, Shantanu Rane, Alejandro E. Brito, Chengyu Song,Srikanth V.Krishnamurthy,Amit K.Roy-Chowdhury, and M.Salman Asif.Zero-query transfer attacks on context-aware object detectors.IEEE Conference on Computer Vision and Pattern Recognition,2022.3", + "[9] N. Carlini and D. Wagner. Adversarial examples are not easily detected: Bypassing ten detection methods. Workshop on Artificial Intelligence and Security, 2017. 1", + "[10] N. Carlini and D. Wagner. Towards evaluating the robustness of neural networks. IEEE Symposium on Security and Privacy, 2017. 2", + "[11] Y. Carmon, A. Raghunathan, L. Schmidt, P. Liang, and J. C. Duchi. Unlabeled data improves adversarial robustness. Neural Information Processing Systems, 2019. 2", + "[12] P.-Y. Chen, Y. Sharma, H. Zhang, J. Yi, and C.-J. Hsieh. Ead: Elastic-net attacks to deep neural networks via adversarial examples. AAAI Conference on Artificial Intelligence, 2018. 2", + "[13] T. Chen, M. Xu, X. Hui, H. Wu, and L. Lin. Learning semantic-specific graph representation for multi-label image recognition. IEEE International Conference on Computer Vision, 2019. 2", + "[14] Zhao-Min Chen, Xiu-Shen Wei, Xin Jin, and Yanwen Guo. Multi-label image recognition with joint class-aware map disentangling and label correlation embedding. IEEE International Conference on Multimedia and Expo, 2019. 1", + "[15] Z. M. Chen, X. S. Wei, P. Wang, and Y. Guo. Multi-label image recognition with graph convolutional networks. IEEE" + ], + "bbox": [ + 78, + 114, + 470, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Conference on Computer Vision and Pattern Recognition, abs/1904.03582, 2019. 5, 6", + "[16] T. S. Chua, J. Tang, R. Hong, H. Li, Z. Luo, and Y. T. Zheng. Nus-wide: A real-world web image database from national university of bangalore. ACM International Conference on Image and Video Retrieval, 2009. 5", + "[17] F. Croce and M. Hein. Reliable evaluation of adversarial robustness with an ensemble of diverse parameter-free attacks. ArXiv, 2020. 4", + "[18] S. D. Dao, E. Zhao, D. Phung, and J. Cai. Multi-label image classification with contrastive learning. arXiv preprint, arXiv:2107.11626, 2021. 2", + "[19] J. Deng, N. Ding, Y. Jia, A. Frome, K. Murphy, S. Bengio, Y. Li, H. Neven, and H. Adam. Large-scale object classification using label relation graphs. European Conference on Computer Vision, 2014. 2", + "[20] G. W. Ding, Y. Sharma, K. Y. Lui, and R. Huang. Max-margin adversarial (mma) training: Direct input space margin maximization through adversarial training. arXiv, 2020. 2", + "[21] Zixuan Ding, Ao Wang, Hui Chen, Qiang Zhang, Pengzhang Liu, Yongjun Bao, Weipeng Yan, and Jungong Han. Exploring structured semantic prior for multi label recognition with incomplete labels. 2023. 1", + "[22] Junhao Dong, Seyed-Mohsen Moosavi-Dezfooli, Jianhuang Lai, and Xiaohua Xie. The enemy of my enemy is my friend: Exploring inverse adversaries for improving adversarial training. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 24678–24687, 2023. 2", + "[23] Y. Dong, Z. Deng, T. Pang, H. Su, and J. Zhu. Adversarial distributional training for robust deep learning. arXiv, 2020. 2", + "[24] M. Everingham, S. M. A. Eslami, L. Van-Gool, C. K. I. Williams, J. Winn, and A. Zisserman. The Pascal visual object classes (voc) challenge. International Journal of Computer Vision, 2010. 5", + "[25] K. Eykholt, I. Evtimov, E. Fernandes, B. Li, A. Rahmati, F. Tramér, A. Prakash, T. Kohno, and D. X. Song. Physical adversarial examples for object detectors. arXiv, 2018. 1", + "[26] L. Feng, B. An, and S. He. Collaboration based multi-label learning. AAAI Conference on Artificial Intelligence, 2019. 1", + "[27] I. J. Goodfellow, J. Shlens, and C. Szegedy. Explaining and harnessing adversarial examples. International Conference on Learning Representations, 2015. 1", + "[28] W. He, J. Wei, X. Chen, N. Carlini, and D. Song. Adversarial example defense: Ensembles of weak defenses are not strong. USENIX Workshop on Offensive Technologies, 2017. 2", + "[29] D. Hendrycks, K. Lee, and M. Mazeika. Using pre-training can improve model robustness and uncertainty. 2019. 2", + "[30] Dan Hendrycks, Kevin Zhao, Steven Basart, Jacob Steinhardt, and Dawn Song. Natural adversarial examples. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15262-15271, 2021. 1" + ], + "bbox": [ + 503, + 92, + 890, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "24259", + "bbox": [ + 478, + 945, + 519, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[31] Lei Hsiung, Yun-Yun Tsai, Pin-Yu Chen, and Tsung-Yi Ho. Towards compositional adversarial robustness: Generalizing adversarial training to composite semantic perturbations. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 24658-24667, 2023. 1", + "[32] S. Hu, L. Ke, X. Wang, and S. Lyu. Tkml-ap: Adversarial attacks to top-k multi-label learning. arXiv, 2021. 2", + "[33] D. T. Huynh and E. Elhamifar. Interactive multi-label cnn learning with partial labels. IEEE Conference on Computer Vision and Pattern Recognition, 2020. 2", + "[34] Tooba Imtiaz, Morgan Kohler, Jared Miller, Zifeng Wang, Mario Sznaier, Octavia I Camps, and Jennifer G Dy. Saif: Sparse adversarial and interpretable attack framework. arXiv preprint arXiv:2212.07495, 2022. 2", + "[35] J. Li R. Ji, H. Liu, X. Hong, Y. Gao, and Q. Tian. Universal perturbation attack against image retrieval. International Conference on Computer Vision, 2019. 1", + "[36] Jinyuan Jia, Wenjie Qu, and Neil Zhenqiang Gong. Multiguard: Provably robust multi-label classification against adversarial examples. Advances in Neural Information Processing Systems, 2022. 1, 3", + "[37] Youngwook Kim, Jae Myung Kim, Zeynep Akata, and Jungwoo Lee. Large loss matters in weakly supervised multi-label classification. 2022. 1", + "[38] Takumi Kobayashi. Two-way multi-label loss. 2023. 1", + "[39] A. Kurakin, I. Goodfellow, and S. Bengio. Adversarial machine learning at scale. International Conference on Learning Representations, 2017. 1", + "[40] A. Kuznetsova, H. Rom, N. Alldrin, J. Uijlings, I. Krasin, J. Pont-Tuset, S. Kamali, S. Popov, M. Malloci, A. Kolesnikov, T. Duerig, and V. Ferrari. The open images dataset v4: Unified image classification, object detection, and visual relationship detection at scale. International Journal of Computer Vision, 2016. 2, 4, 5", + "[41] J. Lanchantin, T. Wang, V. Ordonez, and Y. Qi. General multi-label image classification with transformers. IEEE Conference on Computer Vision and Pattern Recognition, 2021. 2", + "[42] K. Lee, K. Lee, H. Lee, and J. Shin. A simple unified framework for detecting out-of-distribution samples and adversarial attacks. 2018. 2", + "[43] Peng Li, Peng Chen, Yonghong Xie, and Dezheng Zhang. Bi-modal learning with channel-wise attention for multi-label image classification. IEEE Access, 2020. 2", + "[44] Q. Li, M. Qiao, W. Bian, and D. Tao. Conditional graphical lasso for multi-label image classification. IEEE Conference on Computer Vision and Pattern Recognition, 2016. 2", + "[45] Q. Li, X. Peng, Y. Qiao, and Q. Peng. Learning label correlations for multi-label image recognition with graph networks. Pattern Recognition Letters, 2020. 2", + "[46] X. Li, F. Zhao, and Y. Guo. Multi-label image classification with a probabilistic label enhancement model. In UAI, 2014. 2", + "[47] Y. Li and L. Yang. More correlations better performance: Fully associative networks for multi-label image classification. International Conference on Pattern Recognition, 2021. 2" + ], + "bbox": [ + 78, + 90, + 468, + 898 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[48] Y. Li, Y. Song, and J. Luo. Improving pairwise ranking for multi-label image classification. IEEE Conference on Computer Vision and Pattern Recognition, 2017. 2", + "[49] Z. Li, W. Lu, Z. Sun, and W. Xing. Improving multi-label classification using scene cues. Multimedia Tools and Applications, 2017. 2", + "[50] Dekun Lin. Probability guided loss for long-tailed multi-label image classification. 2023. 1", + "[51] A. Madry, A. Makelov, L. Schmidt, D. Tsipras, and A. Vladu. Towards deep learning models resistant to adversarial attacks. International Conference on Learning Representations, 2018. 2", + "[52] S. Melacci, G. Ciravegna, A. Sotgiu, A. Demontis, B. Biggio, M. Gori, and F. Roli. Domain knowledge alleviates adversarial attacks in multi-label classifiers. 2021. 3", + "[53] J.-H. Metzen, M.-C. Kumar, T. Brox, and V. Fischer. Universal adversarial perturbations against semantic image segmentation. International Conference on Computer Vision, 2019. 1", + "[54] G. A. Miller. Wordnet: a lexical database for english. Communications of the ACM, 38(11), 1995. 5", + "[55] S. Moosavi-Dezfooli, A. Fawzi, and P. Frossard. Deepfool: a simple and accurate method to fool deep neural networks. IEEE Conference on Computer Vision and Pattern Recognition, 2016. 6", + "[56] J. Nam, E. L. Mencia, H. J. Kim, and J. Furnkranz. Maximizing subset accuracy with recurrent neural networks in multi-label classification. Neural Information Processing Systems, 2017. 2", + "[57] T. Pang, K. Xu, C. Du, N. Chen, and J. Zhu. Improving adversarial robustness via promoting ensemble diversity. International Conference on Machine learning, 2019. 2", + "[58] T. Pang, K. Xu, Y. Dong, C. Du, N. Chen, and J. Zhu. Rethinking softmax cross-entropy loss for adversarial robustness. arXiv, 2020.", + "[59] T. Pang, X. Yang, Y. Dong, K. Xu, H. Su, and J. Zhu. Boosting adversarial training with hypersphere embedding. Neural Information Processing Systems, 2020. 2", + "[60] Nicolas Papernot, Patrick Mcdaniel, Somesh Jha, Matt Fredrikson, Z. Berkay Celik, and Ananthram Swami. The limitations of deep learning in adversarial settings. IEEE European Symposium on Security and Privacy (EuroS&P), 2016. 1", + "[61] Tao Pu, Tianshui Chen, Hefeng Wu, and Liang Lin. Semantic-aware representation blending for multi-label image recognition with partial labels. 2022. 1", + "[62] Zeyu Qin, Yanbo Fan, Yi Liu, Li Shen, Yong Zhang, Jue Wang, and Baoyuan Wu. Boosting the transferability of adversarial attacks with reverse adversarial perturbation. Advances in Neural Information Processing Systems, 35:29845-29858, 2022. 2", + "[63] T. Ridnik, H. Lawen, A. Noy, and I. Friedman. Tresnet: High performancegpu-dedicated architecture. ArXiv preprint arXiv:2003.13630, 2020.5", + "[64] Tal Ridnik, Emanuel Ben-Baruch, Nadav Zamir, Asaf Noy, Itamar Friedman, Matan Protter, and Lihi Zelnik-Manor. Asymmetric loss for multi-label classification. 2021. 5, 6, 7" + ], + "bbox": [ + 501, + 92, + 890, + 898 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "24260", + "bbox": [ + 478, + 945, + 519, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[65] Tal Ridnik, Gilad Sharir, Avi Ben-Cohen, Emanuel Ben-Baruch, and Asaf Noy. Ml-decoder: Scalable and versatile classification head. 2023. 5, 6", + "[66] Jérôme Rony, Luiz G Hafemann, Luiz S Oliveira, Ismail Ben Ayed, Robert Sabourin, and Eric Granger. Decoupling direction and norm for efficient gradient-based 12 adversarial attacks and defenses. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4322-4330, 2019. 2", + "[67] J. Cohen and E. Rosenfeld and Z. Kolter. Certified adversarial robustness via randomized smoothing. International Conference on Machine learning, 2019. 2", + "[68] A. Shafahi, M. Najibi, A. Ghiasi, Z. Xu, J. Dickerson, C. Studer, L. Davis, G. Taylor, and T. Goldstein. Adversarial training for free! Neural Information Processing Systems, 2019.", + "[69] Nasim Shafiee and Ehsan Elhamifar. Zero-shot attribute attacks on fine-grained recognition models. In Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part V, pages 262-282. Springer, 2022. 1, 2", + "[70] Nitish Shukla and Sudipta Banerjee. Generating adversarial attacks in the latent space. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 730-739, 2023. 1", + "[71] Q. Song, H. Jin, X. Huang, and X. Hu. Multi-label adversarial perturbations. IEEE International Conference on Data Mining, 2018. 1, 2, 3, 6", + "[72] R. Speer, J. Chin, and C. Havasi. Conceptnet 5.5: An open multilingual graph of general knowledge. 2017. 5", + "[73] C. Szegedy, W. Zaremba, I. Sutskever, J. Bruna, D. Erhan, I. Goodfellow, and R. Fergus. Intriguing properties of neural networks. International Conference on Learning Representations, 2014. 2", + "[74] N. Tursynbek, A. Petiushko, and I. Oseledets. Geometry-inspired top-k adversarial perturbations. arXiv, 2020. 2", + "[75] J. Uesato, J. B. Alayrac, P. Huang, R. Stanforth, A. Fawzi, and P. Kohli. Are labels required for improving adversarial robustness? Neural Information Processing Systems, 2019. 2", + "[76] Thomas Verelst, Paul K Rubenstein, Marcin Eichner, Tinne Tuytelaars, and Maxim Berman. Spatial consistency loss for training multi-label classifiers from single-label annotations. 2023. 2", + "[77] J. Wang, Y. Yang, J. Mao, Z. Huang, C. Huang, and W. Xu. Cnn-rnn: A unified framework for multi-label image classification. 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 2", + "[78] Z. Wang, T. Chen, G. Li, G. Li, and L. Lin. Multi-label image recognition by recurrently discovering attentional regions. IEEE International Conference on Computer Vision, 2017. 2", + "[79] Y. Wu, H. Liu, S. Feng, Y. Jin, G. Lyu, and Z. Wu. Gm-mlic: Graph matching based multi-label image classification. International Joint Conference on Artificial Intelligence, 2021. 2" + ], + "bbox": [ + 78, + 90, + 470, + 898 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[80] Z. Wu and M. Palmer. Verbs semantics and lexical selection. Annual Meeting on Association for Computational Linguistics, 1994. 5", + "[81] C. Xie, Z. Zhang, Y. Zhou, S. Bai, J. Wang, Z. Ren, and A. Yuille. Improving transferability of adversarial examples with input diversity. IEEE Conference on Computer Vision and Pattern Recognition, 2019. 2", + "[82] Ming-Kun Xie, Jiahao Xiao, and Sheng-Jun Huang. Label-aware global consistency for multi-label learning with single positive labels. 2022. 1", + "[83] J. Xu, H. Tian, Z. Wang, Y. Wang, W. Kang, and F. Chen. Joint input and output space learning for multi-label image classification. IEEE Transactions on Multimedia, 2020. 2", + "[84] W. Xu, D. Evans, and Y. Qi. Feature squeezing: Detecting adversarial examples in deep neural networks. Network and Distributed Systems Security Symposium, 2018. 2", + "[85] H. Yang, J. T. Zhou, Y. Zhang, B. Gao, J. Wu, and J. Cai. Exploit bounding box annotations for multi-label object recognition. IEEE Conference on Computer Vision and Pattern Recognition, 2016. 1", + "[86] Zhuo Yang, Yufei Han, and Xiangliang Zhang. Characterizing the evasion attackability of multi-label classifiers. 2021. 1, 2", + "[87] Z. Yang, Y. Han, and X. Zhang. Attack transferability characterization for adversarially robust multi-label classification. 2021. 1, 2", + "[88] J. Ye, J. He, X. Peng, W. Wu, and Y. Qiao. Attention-driven dynamic graph convolutional network for multi-label image recognition. European Conference on Computer Vision, 2020. 2", + "[89] R. You, Z. Guo, L. Cui, X. Long, S. Y. Bao, and S. Wen. Cross-modality attention with semantic graph embedding for multi-label classification. AAAI Conference on Artificial Intelligence, 2020. 2", + "[90] X. Yuan, P. He, Q. Zhu, and X. Li. Adversarial examples: Attacks and defenses for deep learning. IEEE Transactions on Neural Networks and Learning Systems, 2019. 2", + "[91] ML. Zhang and Z. Zhou. Multilabel neural networks with applications to functional genomics and text categorization. IEEE Transactions on Knowledge and Data Engineering, 2006. 2", + "[92] Shu Zhang, Ran Xu, Caiming Xiong, and Chetan Ramaiah. Use all the labels: A hierarchical multi-label contrastive learning framework. 2022. 2", + "[93] Z. Zhao, G. Chen, J. Wang, Y. Yang, F. Song, and J. Sun. Attack as defense: Characterizing adversarial examples using robustness. arXiv, 2021. 2", + "[94] Donghao Zhou, Pengfei Chen, Qiong Wang, Guangyong Chen, and Pheng-Ann Heng. Acknowledging the unknown for multi-label learning with single positive labels. 2022. 1", + "[95] N. Zhou, W. Luo, X. Lin, P. Xu, and Z.. Zhang. Generating multi-label adversarial examples by linear programming. International Joint Conference on Neural Networks, 2020. 3", + "[96] N. Zhou, W. Luo, J. Zhang, L. Kong, and H. Zhang. Hiding all labels for multi-label images: An empirical study of adversarial examples. International Joint Conference on Neural Networks, 2021. 2, 7" + ], + "bbox": [ + 501, + 92, + 890, + 898 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "24261", + "bbox": [ + 478, + 945, + 517, + 955 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[97] Y. Zhu, J. T. Kwok, and Z. Zhou. Multi-label learning with global and local label correlation. IEEE Transactions on Knowledge and Data Engineering, 2018. 2", + "[98] D. Zügner, A. Akbarnejad, and S. Gümnmann. Adversarial attacks on neural networks for graph data. International Conference on Knowledge Discovery & Data Mining, 2018. 1" + ], + "bbox": [ + 78, + 90, + 468, + 189 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "24262", + "bbox": [ + 478, + 945, + 517, + 955 + ], + "page_idx": 11 + } +] \ No newline at end of file diff --git a/2024/Semantic-Aware Multi-Label Adversarial Attacks/a7635c9b-bbb0-4721-869c-8ded1f1d58af_model.json b/2024/Semantic-Aware Multi-Label Adversarial Attacks/a7635c9b-bbb0-4721-869c-8ded1f1d58af_model.json new file mode 100644 index 0000000000000000000000000000000000000000..b90b154af8a14ae119f7ede331219073614c8d04 --- /dev/null +++ b/2024/Semantic-Aware Multi-Label Adversarial Attacks/a7635c9b-bbb0-4721-869c-8ded1f1d58af_model.json @@ -0,0 +1,2886 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.812, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.234, + 0.131, + 0.738, + 0.151 + ], + "angle": 0, + "content": "Semantic-Aware Multi-Label Adversarial Attacks" + }, + { + "type": "text", + "bbox": [ + 0.212, + 0.182, + 0.424, + 0.216 + ], + "angle": 0, + "content": "Hassan Mahmood Northeastern University" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.22, + 0.446, + 0.233 + ], + "angle": 0, + "content": "mahmood.h@northeastern.edu" + }, + { + "type": "text", + "bbox": [ + 0.532, + 0.182, + 0.724, + 0.218 + ], + "angle": 0, + "content": "Ehsan Elhamifar Northeastern University" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.22, + 0.752, + 0.233 + ], + "angle": 0, + "content": "e.elhamifar@northeastern.edu" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.269, + 0.314, + 0.285 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.302, + 0.474, + 0.695 + ], + "angle": 0, + "content": "Despite its importance, generating attacks for multi-label learning (MLL) models has received much less attention compared to multi-class recognition. Attacking an MLL model by optimizing a loss on the target set of labels has often the undesired consequence of changing the predictions for other labels. On the other hand, adding a loss on the remaining labels to keep them fixed leads to highly negatively correlated gradient directions, reducing the attack effectiveness. In this paper, we develop a framework for crafting effective and semantic-aware adversarial attacks for MLL. First, to obtain an attack that leads to semantically consistent predictions across all labels, we find a minimal super-set of the target labels, referred to as consistent target set. To do so, we develop an efficient search algorithm over a knowledge graph, which encodes label dependencies. Next, we propose an optimization that searches for an attack that modifies the predictions of labels in the consistent target set while ensuring other labels will not get affected. This leads to an efficient algorithm that projects the gradient of the consistent target set loss onto the orthogonal direction of the gradient of the loss on other labels. Our framework can generate attacks on different target set sizes and for MLL with thousands of labels (as in OpenImages). Finally, by extensive experiments on three datasets and several MLL models, we show that our method generates both successful and semantically consistent attacks.1" + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.719, + 0.21, + 0.735 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.741, + 0.47, + 0.864 + ], + "angle": 0, + "content": "Despite the tremendous success of Deep Neural Networks (DNNs) for image recognition, DNNs are vulnerable to adversarial attacks, i.e., imperceptible image perturbations that result in incorrect prediction with high confidence [9, 25, 27, 30, 35, 39, 53, 60, 69, 70, 98]. Understanding and improving the robustness of DNNs has motivated a large body of research on generating adversarial perturbations and subsequently using them to design defense mech-" + }, + { + "type": "image", + "bbox": [ + 0.523, + 0.273, + 0.868, + 0.51 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.514, + 0.892, + 0.588 + ], + "angle": 0, + "content": "Figure 1. Generating effective attacks for an MLL model is challenging. Top: Two groups of semantically related labels. Green nodes show labels predicted as present before the attack. Bottom: While an attack on the target label 'bicycle' succeeds, it fails to turn off 'vehicle' and 'wheeled vehicle' for \\(\\epsilon < 0.2\\). On the other hand, for \\(\\epsilon > 0.125\\), the attack changes the prediction for the non-target label 'person', which is undesired." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.61, + 0.892, + 0.685 + ], + "angle": 0, + "content": "anisms, e.g., by detecting attacks or retraining the model using perturbed images. The majority of existing works, however, have focused on multi-class recognition (MCR), in which only one class must be predicted in an image [14, 21, 26, 31, 37, 82, 85]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.688, + 0.893, + 0.853 + ], + "angle": 0, + "content": "On the other hand, many real-world applications require finding multiple labels in an image. This includes human-object interaction learning (e.g., recognizing hands and interacting objects), autonomous driving (e.g., recognizing cars, bikes, pedestrians, roads, signs, etc), assistive robotics and surveillance. Therefore, multi-label learning (MLL) aims at recognizing all labels in an image [14, 26, 38, 50, 61, 85, 94]. However, despite its importance and fundamental differences with respect to attacks for MCR (see Figure 1), adversarial attacks for MLL has received much less attention in the literature [1, 2, 36, 71, 86, 87]." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.856, + 0.894, + 0.901 + ], + "angle": 0, + "content": "The main difference between attacks for MCR and MLL stems from the different ways decision boundaries between labels is learned and structured for the two problems. In" + }, + { + "type": "page_footnote", + "bbox": [ + 0.077, + 0.875, + 0.469, + 0.9 + ], + "angle": 0, + "content": "1The code of this work is available at https://github.com/hassan-mahmood/SemanticMLLAttacks.git" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "24251" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.272 + ], + "angle": 0, + "content": "MCR, different labels compete with each other as only one label must be present/predicted. Therefore, attacking an on present label leads to turning it off while automatically turning on another label, see Figure 2 (left). On the other hand, in MLL, labels do not compete, where none, some or all labels can be predicted as present in an image. Thus, attacking a present or an absent label can lead to changing the predictions for none, several or all other labels, as shown in Figure 2 (right). This often has the undesired effect of inconsistent predictions, which can simply be used to detect the attack (e.g., turning off 'pedestrian' can turn on 'bike' and 'stop sign' while turning off 'road')." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.273, + 0.473, + 0.485 + ], + "angle": 0, + "content": "One can try to prevent changing predictions of other labels by crafting the attack while including a loss that enforces predictions of other labels to stay intact. However, as we show, the gradient of the loss for fixing other labels often is highly negatively correlated with the gradient of the loss on the label we want to attack, hence, counteracting the effect of each other. This problem gets more pronounced when the number of labels increases (e.g., in Open Images dataset [40] with 9,600 labels) and the gradient of this additional loss gets larger too. Also, fixing predictions for all other labels still may lead to semantic inconsistency among predictions (e.g., turning off 'vehicle' requires turning off 'car' and 'truck' too, otherwise 'vehicle' being absent while 'car' being present can be used to detect the attack)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.488, + 0.473, + 0.807 + ], + "angle": 0, + "content": "Paper Contributions. We develop a framework for crafting adversarial attacks for MLL that addresses the above challenges. First, to obtain an attack on a target set of labels that leads to semantically consistent predictions across all labels, we find a minimal superset of the target set (referred to as consistent target set) to be attacked/modified. To do so, we develop an efficient search algorithm over a knowledge graph, which encodes label dependencies. Second, we show that finding the attack by optimizing the sum of two losses, one over the consistent target set and the other over other labels, has opposite gradient directions for the two losses, which leads to inefficient perturbations. Third, we propose an optimization that searches for an attack that modifies the predictions of labels in the consistent target set while ensuring that other labels will not get affected. Our optimization leads to a projected gradient algorithm that projects the gradient of the loss for the consistent target set onto the orthogonal direction of the gradient of the loss on other labels. Finally, by extensive experiments on three datasets and several MLL models, we show that our framework generates both successful and semantically consistent attacks." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.811, + 0.22, + 0.827 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.832, + 0.304, + 0.849 + ], + "angle": 0, + "content": "2.1. Multi-Label Recognition" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.856, + 0.472, + 0.901 + ], + "angle": 0, + "content": "The goal of multi-label learning (MLL) is to find all classes of objects (or even abstract concepts) in an image. As compared to multi-class classification, which finds a sin" + }, + { + "type": "image", + "bbox": [ + 0.522, + 0.089, + 0.673, + 0.203 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.683, + 0.089, + 0.871, + 0.203 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.209, + 0.895, + 0.26 + ], + "angle": 0, + "content": "Figure 2. Left: In multi-class recognition (MCR), attacking the present label leads to automatically turning on another label, as labels compete with each other. Right: In multi-label learning (MLL), attacking a label can lead to none \\((\\pmb{x}_1')\\), some \\((\\pmb{x}_2')\\) or all \\((\\pmb{x}_3')\\) other labels changing." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.283, + 0.895, + 0.45 + ], + "angle": 0, + "content": "gle dominant class in an image, MLL is a harder task, since any combination of labels can be present in an image and many labels often correspond to small image regions. This has motivated a large body of research for designing effective MLL methods, using graphical models[44, 46], different loss functions for handling label imbalance [6, 18, 48, 49, 76, 91], exploiting external knowledge, label correlations, and hierarchical relations among labels [13, 19, 33, 43, 56, 78, 88, 89, 92, 97], or using a combination of label and image feature correlations [41, 45, 47, 77, 79, 83] to improve the multi-label performance." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.458, + 0.692, + 0.472 + ], + "angle": 0, + "content": "2.2. Adversarial Attacks" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.478, + 0.895, + 0.659 + ], + "angle": 0, + "content": "Deep Neural Networks (DNNs) have been shown to be vulnerable to small adversarial perturbations, which can easily fool the model [3, 12, 66, 73, 81]. Therefore, many works have studied different ways to design efficient attacks and defense mechanisms for DNNs [4, 5, 10, 11, 20, 22, 23, 28, 29, 34, 42, 51, 57–59, 62, 67–69, 74, 75, 84, 93]. The adversarial attacks can be divided into several categories based on different criteria [90] such as white-box and black-box, image agnostic and image-specific, targeted and untargeted, or restricted to perturb small image regions and unrestricted attacks. In the paper, we generate white-box attacks for multi-label recognition, i.e., assume access to the MLL model." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.67, + 0.783, + 0.684 + ], + "angle": 0, + "content": "2.2.1 Multi-Label Adversarial Attacks" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.689, + 0.895, + 0.903 + ], + "angle": 0, + "content": "Motivated by the increasing interest in the multi-label recognition problem, few works have recently studied MLL attacks. [71] studies a framework for attacking multi-label recognition and ranking systems. However, it does not exploit any relationships among labels to design attacks, which as we show is important to design effective attacks. We use the attacks from this work as baselines in our experiments. Yang et al. [86, 87] designed untargeted attacks for multi-label classification to change as many labels as possible and proposed a framework to measure how well an MLL model can be attacked. In comparison, our focus is targeted multi-label attacks with semantic relationships. Hu et al. [32] proposed to exploit ranking relations to design attacks for top-\\(k\\) multi-label models and [96] proposed an attack to" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "24252" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.11, + 0.094, + 0.432, + 0.304 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.311, + 0.471, + 0.386 + ], + "angle": 0, + "content": "Figure 3. Multi-label learning predicts several labels for an image (see \"MLL Output\"). Attacking a target set ('vehicle' on the top or 'person' and 'bird' on the bottom) using a naive multi-label attack leads to prediction semantic inconsistencies ('car' and 'motorcycle' being on while 'vehicle' is off or 'person' and 'bird' being off while 'animal' is on). However, GMLA handles a large number of labels while achieving semantic consistency." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.405, + 0.471, + 0.722 + ], + "angle": 0, + "content": "hide all labels present in an image, whereas we consider the minimal set of semantically related labels to be attacked. Aich et al. [2] leveraged local patch differences of different objects to generate multi-object attacks and [1] proposed a CLIP-based generative model to generate multi-object attacks in the black-box setting. Jia et al. [36] proposed theoretical robustness guarantees to defend against multi-label adversarial attacks and [52] exploited domain knowledge context to detect adversarial attacks. Context-aware attacks [7, 8] fool context-aware attack detection methods by attacking the label and its context simultaneously. The context in these works is defined in terms of cooccurring labels. In comparison, we propose to attack labels based on their semantic relationships. Moreover, none of these works have addressed the problem of negative gradient correlation in generating large-scale dataset attacks. Among the existing literature, Nan et al. [95] is also comparable to our attack method, and we use it as a baseline. They proposed a fast linear programming-based adversarial example generation algorithm for MLL to minimize the perturbation norm required to achieve a target label." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.735, + 0.41, + 0.753 + ], + "angle": 0, + "content": "3. Multi-Label Learning Attack (MLA)" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.757, + 0.239, + 0.773 + ], + "angle": 0, + "content": "3.1. Problem Setting" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.78, + 0.471, + 0.901 + ], + "angle": 0, + "content": "We study generating adversarial attacks for the Multi-Label Learning (MLL) task. In MLL, multiple labels can appear in an image, see Figure 3, as opposed to the multi-class recognition (MCR), where each image has only one label. Let \\(\\mathcal{C}\\) denote the set of all labels. For an image \\(x\\in \\mathbb{R}^d\\), let \\(\\pmb {y}\\in \\{0,1\\}^{|\\mathcal{C}|}\\) denote the set of its labels, indicating the presence (1) or absence (0) of each label in \\(\\mathcal{C}\\) in the image. Let \\(\\mathcal{F}:\\mathbb{R}^d\\to \\mathbb{R}^{|\\mathcal{C}|}\\) be a multi-label classifier, which we assume" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.258 + ], + "angle": 0, + "content": "has already been learned using training images. The multi-label classifier \\(\\mathcal{F} = \\{f_1, f_2, \\ldots, f_{|\\mathcal{C}|}\\}\\) consists of \\(|\\mathcal{C}|\\) binary classifiers for each label, where \\(f_c(\\pmb{x}) \\in (-\\infty, +\\infty)\\) is the score of the classifier \\(c\\). Therefore, the probability of label \\(c\\) being present in the image \\(\\pmb{x}\\) is given by \\(\\hat{y}_c = \\sigma(f_c(\\pmb{x}))\\), where \\(\\sigma(\\cdot)\\) is the sigmoid function. Finally, let \\(\\Omega_{\\pmb{x}} \\subseteq \\mathcal{C}\\) denote the target set of labels in the image \\(\\pmb{x}\\) which we want to attack, i.e., after the attack the present labels in \\(\\Omega_{\\pmb{x}}\\) must become absent and vice versa. In the next subsection, we study the existing approaches [71] to generate multi-label attacks and identify their drawbacks." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.265, + 0.794, + 0.28 + ], + "angle": 0, + "content": "3.2. Naive Multi-Label Attack (MLA)" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.285, + 0.892, + 0.346 + ], + "angle": 0, + "content": "For an attack on \\( \\pmb{x} \\) that modifies the labels in \\( \\Omega_{\\pmb{x}} \\), one can generate a small perturbation \\( e \\in \\mathbb{R}^d \\) by minimizing the negative multi-label learning loss for labels in \\( \\Omega_{\\pmb{x}} \\) while restricting the magnitude of \\( e \\). More precisely, we can solve" + }, + { + "type": "equation", + "bbox": [ + 0.516, + 0.36, + 0.892, + 0.381 + ], + "angle": 0, + "content": "\\[\n\\text {M L A - U :} \\min _ {\\boldsymbol {e}} - \\mathcal {L} _ {b c e} (\\boldsymbol {x} + \\boldsymbol {e}, \\Omega_ {\\boldsymbol {x}}) \\text {s . t .} \\| \\boldsymbol {e} \\| _ {p} \\leq \\epsilon , \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.392, + 0.891, + 0.423 + ], + "angle": 0, + "content": "where \\(\\| \\cdot \\| _p\\) is the \\(\\ell_p\\)-norm and \\(\\mathcal{L}_{ce}(\\boldsymbol{x}',\\Gamma_{\\boldsymbol{x}'})\\) is the binary cross-entropy loss for image \\(\\boldsymbol{x}'\\) on labels in \\(\\Gamma_{\\boldsymbol{x}'}\\), defined as" + }, + { + "type": "equation", + "bbox": [ + 0.516, + 0.438, + 0.623, + 0.455 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {b c e} \\left(\\boldsymbol {x} ^ {\\prime}, \\Omega_ {\\boldsymbol {x} ^ {\\prime}}\\right) \\triangleq\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.527, + 0.457, + 0.892, + 0.488 + ], + "angle": 0, + "content": "\\[\n\\sum_ {c \\in \\Omega_ {\\boldsymbol {x} ^ {\\prime}}} - y _ {c} \\log \\sigma \\left(f _ {c} \\left(\\boldsymbol {x} ^ {\\prime}\\right)\\right) - \\left(1 - y _ {c}\\right) \\log \\left(1 - \\sigma \\left(f _ {c} \\left(\\boldsymbol {x} ^ {\\prime}\\right)\\right)\\right). \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.499, + 0.891, + 0.589 + ], + "angle": 0, + "content": "The drawback of (1) is that attack on \\(\\Omega_{\\mathbf{x}}\\) can lead to changing the predictions for other labels too, see Figure 2 (right). This often leads to inconsistent predictions, which can simply be used to detect the attack (e.g., turning off 'pedestrian' can turn on 'bike' and 'stop sign' while turning off 'road'), hence significantly reducing the effectiveness of the attack." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.59, + 0.892, + 0.664 + ], + "angle": 0, + "content": "To address this drawback, one can try to prevent changing predictions of other labels \\((\\bar{\\Omega}_{\\pmb{x}})\\), which is the complement of \\(\\Omega_{\\pmb{x}}\\) with respect to \\(\\mathcal{C}\\) by crafting the attack while including a loss that enforces predictions of other labels to stay intact. More precisely, one can solve" + }, + { + "type": "equation", + "bbox": [ + 0.52, + 0.673, + 0.892, + 0.712 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\text {M L A - C :} \\min _ {\\boldsymbol {e}} - \\mathcal {L} _ {b c e} (\\boldsymbol {x} + \\boldsymbol {e}, \\Omega_ {\\boldsymbol {x}}) + \\mathcal {L} _ {b c e} (\\boldsymbol {x} + \\boldsymbol {e}, \\bar {\\Omega} _ {\\boldsymbol {x}}), \\tag {3} \\\\ \\begin{array}{l} \\text {s . t .} \\| e \\| _ {p} \\leq \\epsilon , \\end{array} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.72, + 0.892, + 0.901 + ], + "angle": 0, + "content": "where the first term in the objective function tries to flip the labels in \\(\\Omega_{x}\\) while the second term preserves the labels in \\(\\bar{\\Omega}_{x}\\). Notice that with the additional objective, the space of perturbations in (3) is smaller than that in (1), yet it ensures not modifying labels outside the target set. However, as we verify by empirical results, the gradient of the loss for fixing other labels often is highly negatively correlated with the gradient of the loss on the target labels, hence, counteracting the effect of each other. We hypothesize that this effect is due to strong spurious correlations among labels, learnt by the model during training. Given two highly-correlated labels in an image, attacking one label while fixing the other" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "24253" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.228 + ], + "angle": 0, + "content": "using (3) would lead to opposite gradients. This problem gets more pronounced when the number of labels increases (e.g., in Open Images dataset [40] with 9,600 labels) and the gradient of this additional loss gets larger too. Moreover, fixing predictions for labels in \\(\\bar{\\Omega}_{\\pmb{x}}\\) still may lead to semantic inconsistencies in predictions (e.g., turning off 'vehicle' requires turning off 'car' and 'truck', otherwise 'vehicle' being off while 'car' being on can be used to detect the attack), hence, reducing the attack effectiveness." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.239, + 0.45, + 0.256 + ], + "angle": 0, + "content": "4. Generalized Multi-Label Attack (GMLA)" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.265, + 0.473, + 0.431 + ], + "angle": 0, + "content": "We develop a framework for crafting adversarial attacks for MLL that addresses the challenges of conventional MLA, discussed above. First, to obtain an attack on a target label set \\(\\Omega_{x}\\) that leads to semantically consistent predictions across all labels, we find a minimal superset of the target set \\(\\Psi_{x}\\) (referred to as consistent target set) that needs to be attacked/modified. Given that there are often multiple such superset, we develop an efficient search algorithm over a knowledge graph \\(\\mathcal{G}\\) that encodes label dependencies. We denote by \\(\\Psi_{x} = h\\bigl (\\Omega_{x},\\mathcal{G}\\bigr)\\) the output of the search algorithm, which we will describe in detail later in this section." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.438, + 0.292, + 0.455 + ], + "angle": 0, + "content": "4.1. Proposed Optimization" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.461, + 0.47, + 0.522 + ], + "angle": 0, + "content": "We then study a projection-based optimization that searches for an attack that modifies the predictions of labels in \\(\\Psi_{\\pmb{x}}\\) while ensuring that other labels \\(\\bar{\\Psi}_{\\pmb{x}}\\) will not get affected. More specifically, we propose to solve" + }, + { + "type": "equation", + "bbox": [ + 0.099, + 0.528, + 0.469, + 0.591 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\text {G M L A :} \\min _ {\\boldsymbol {e}} - \\mathcal {L} _ {b c e} (\\boldsymbol {x} + \\boldsymbol {e}, \\Psi_ {\\boldsymbol {x}}), \\\\ \\text {s . t .} \\mathcal {L} _ {b c e} (\\boldsymbol {x} + \\boldsymbol {e}, \\bar {\\Psi} _ {\\boldsymbol {x}}) = \\mathcal {L} _ {b c e} (\\boldsymbol {x}, \\bar {\\Psi} _ {\\boldsymbol {x}}), \\tag {4} \\\\ \\| \\boldsymbol {e} \\| _ {p} \\leq \\epsilon , \\Psi_ {\\boldsymbol {x}} = h (\\Omega_ {\\boldsymbol {x}}, \\mathcal {G}), \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.595, + 0.47, + 0.731 + ], + "angle": 0, + "content": "where we only minimize the attack loss on the consistent target set \\(\\Psi_{\\mathbf{x}}\\), while requiring that the binary cross-entropy loss on other labels \\(\\bar{\\Psi}_{\\mathbf{x}}\\) stay the same after the attack. This means that instead of trying to make the predictions on other labels more confident as in (3), we try to keep them stay the same after the attack. As we also show in the experiments (see Figure 8), this significantly boosts the attack by resolving the high negative correlation of the gradients of the two losses in (3) and finding better attack directions." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.731, + 0.47, + 0.777 + ], + "angle": 0, + "content": "Since solving the optimization in (4) that ensures the first constraint is satisfied is difficult, we take a first-order approximation on this constraint around \\( x \\) (as \\( e \\) is small)," + }, + { + "type": "equation", + "bbox": [ + 0.126, + 0.781, + 0.47, + 0.837 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {L} _ {b c e} (\\boldsymbol {x} + \\boldsymbol {e}, \\bar {\\Psi} _ {\\boldsymbol {x}}) \\approx \\mathcal {L} _ {b c e} (\\boldsymbol {x}, \\bar {\\Psi} _ {\\boldsymbol {x}}) + \\boldsymbol {g} _ {\\boldsymbol {x}, \\bar {\\Psi} _ {\\boldsymbol {x}}} ^ {\\top} \\boldsymbol {e}, \\\\ \\text {w h e r e ,} \\quad \\boldsymbol {g} _ {\\boldsymbol {x}, \\bar {\\Psi} _ {\\boldsymbol {x}}} \\triangleq \\frac {\\partial \\mathcal {L} _ {b c e} (\\boldsymbol {x} , \\bar {\\Psi} _ {\\boldsymbol {x}})}{\\partial \\boldsymbol {x}}. \\end{array} \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.84, + 0.262, + 0.854 + ], + "angle": 0, + "content": "Thus, we can rewrite (4) as" + }, + { + "type": "equation", + "bbox": [ + 0.1, + 0.861, + 0.469, + 0.905 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\min _ {\\boldsymbol {e}} - \\mathcal {L} _ {b c e} (\\boldsymbol {x} + \\boldsymbol {e}, \\Psi_ {\\boldsymbol {x}}), \\tag {6} \\\\ \\begin{array}{l} \\text {s . t .} \\quad \\boldsymbol {g} _ {\\boldsymbol {x}, \\bar {\\Psi} _ {\\boldsymbol {x}}} ^ {\\top} \\boldsymbol {e} = \\mathbf {0}, \\| \\boldsymbol {e} \\| _ {p} \\leq \\epsilon , \\Psi_ {\\boldsymbol {x}} = h \\big (\\Omega_ {\\boldsymbol {x}}, \\mathcal {G} \\big). \\end{array} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.092, + 0.892, + 0.137 + ], + "angle": 0, + "content": "The constraint \\(g_{x,\\bar{\\Psi}_x}^\\top e = 0\\) implies that \\(e\\) must be in the orthogonal space to the gradient direction \\(g_{x,\\bar{\\Psi}_x}\\), hence not changing other labels. Thus, we can write" + }, + { + "type": "equation", + "bbox": [ + 0.542, + 0.141, + 0.892, + 0.18 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {e} = \\boldsymbol {P} _ {\\boldsymbol {x}, \\bar {\\Psi} _ {\\boldsymbol {x}}} \\alpha , \\quad \\boldsymbol {P} _ {\\boldsymbol {x}, \\bar {\\Psi} _ {\\boldsymbol {x}}} \\triangleq \\boldsymbol {I} - \\frac {\\boldsymbol {g} _ {\\boldsymbol {x} , \\bar {\\Psi} _ {\\boldsymbol {x}}} \\boldsymbol {g} _ {\\boldsymbol {x} , \\bar {\\Psi} _ {\\boldsymbol {x}}} ^ {\\top}}{\\| \\boldsymbol {g} _ {\\boldsymbol {x} , \\bar {\\Psi} _ {\\boldsymbol {x}}} \\| _ {2} ^ {2}}, \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.191, + 0.892, + 0.237 + ], + "angle": 0, + "content": "for some \\(\\alpha \\in \\mathbb{R}^d\\), where \\(P_{x,\\bar{\\Psi}_x}\\) is the orthogonal projection matrix on the gradient \\(g_{x,\\bar{\\Psi}_x}\\). Thus, we can write the optimization in (4) as" + }, + { + "type": "equation", + "bbox": [ + 0.514, + 0.248, + 0.892, + 0.291 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\text {G M L A :} \\min _ {\\boldsymbol {\\alpha}} - \\mathcal {L} _ {b c e} (\\boldsymbol {x} + \\boldsymbol {P} _ {\\boldsymbol {x}, \\bar {\\Psi} _ {\\boldsymbol {x}}} \\boldsymbol {\\alpha}, \\Psi_ {\\boldsymbol {x}}), \\tag {8} \\\\ \\begin{array}{l} \\text {s . t .} \\| P _ {\\boldsymbol {x}, \\bar {\\Psi} _ {\\boldsymbol {x}}} \\boldsymbol {\\alpha} \\| _ {p} \\leq \\epsilon , \\quad \\Psi_ {\\boldsymbol {x}} = h (\\Omega_ {\\boldsymbol {x}}, \\mathcal {G}). \\end{array} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.3, + 0.892, + 0.348 + ], + "angle": 0, + "content": "We follow AutoPGD [17] to iteratively solve (8). At each iteration, we linearly approximate the objective function and solve \\((\\pmb{g}_{\\pmb{x},\\Psi_{\\pmb{x}}}\\) is the gradient of \\(\\mathcal{L}_{bce}(\\pmb {x},\\Psi_{\\pmb{x}}))\\)" + }, + { + "type": "equation", + "bbox": [ + 0.556, + 0.358, + 0.892, + 0.387 + ], + "angle": 0, + "content": "\\[\n\\min _ {\\boldsymbol {\\alpha}} - \\mathbf {g} _ {\\boldsymbol {x}, \\Psi_ {\\boldsymbol {x}}} ^ {\\top} \\left(\\boldsymbol {P} _ {\\boldsymbol {x}, \\bar {\\Psi} _ {\\boldsymbol {x}}} \\boldsymbol {\\alpha}\\right), \\tag {9}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.562, + 0.385, + 0.837, + 0.403 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\text {s . t .} \\| P _ {\\boldsymbol {x}, \\bar {\\Psi} _ {\\boldsymbol {x}}} \\boldsymbol {\\alpha} \\| _ {p} \\leq \\epsilon , \\quad \\Psi_ {\\boldsymbol {x}} = h (\\Omega_ {\\boldsymbol {x}}, \\mathcal {G}). \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.413, + 0.892, + 0.443 + ], + "angle": 0, + "content": "As we show in the supplementary materials, we can solve (9) for \\( p = \\infty \\) and get the closed form update for \\( e \\) as" + }, + { + "type": "equation", + "bbox": [ + 0.527, + 0.453, + 0.892, + 0.489 + ], + "angle": 0, + "content": "\\[\ne = \\epsilon \\cdot \\frac {\\boldsymbol {P} _ {\\boldsymbol {x} , \\bar {\\Psi} _ {\\boldsymbol {x}}} \\boldsymbol {\\nu}}{\\| \\boldsymbol {P} _ {\\boldsymbol {x} , \\bar {\\Psi} _ {\\boldsymbol {x}}} \\boldsymbol {\\nu} \\| _ {\\infty}}, \\quad \\boldsymbol {\\nu} \\triangleq \\operatorname {s g n} \\left(\\boldsymbol {g} _ {\\boldsymbol {x}, \\Psi_ {\\boldsymbol {x}}}\\right). \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.498, + 0.892, + 0.573 + ], + "angle": 0, + "content": "We further enhance the effectiveness of the attack, especially for the case when the gradients of both the targeted and non-targeted classes are aligned (have positive correlation). In such instances, our approach involves finding the direction \\( e \\) using" + }, + { + "type": "equation", + "bbox": [ + 0.508, + 0.584, + 0.892, + 0.617 + ], + "angle": 0, + "content": "\\[\n\\min _ {e} e ^ {T} \\left(- \\frac {\\mathbf {g} _ {\\mathbf {x} , \\Psi_ {\\mathbf {x}}}}{\\| \\mathbf {g} _ {\\mathbf {x} , \\Psi_ {\\mathbf {x}}} \\| _ {2}} + \\frac {\\mathbf {g} _ {\\mathbf {x} , \\bar {\\Psi} _ {\\mathbf {x}}}}{\\| \\mathbf {g} _ {\\mathbf {x} , \\bar {\\Psi} _ {\\mathbf {x}}} \\| _ {2}}\\right) \\text {s . t .} \\| e \\| _ {p} \\leq \\epsilon . \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.627, + 0.892, + 0.642 + ], + "angle": 0, + "content": "We provide more details and analysis in the supplementary." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.651, + 0.874, + 0.668 + ], + "angle": 0, + "content": "4.2. Consistent Target Set via Knowledge Graph" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.675, + 0.892, + 0.84 + ], + "angle": 0, + "content": "We obtain a consistent target set by developing an efficient search algorithm over a knowledge graph \\(\\mathcal{G}\\) that encodes label dependencies. Assume \\(\\mathcal{G} = (\\mathcal{C},\\mathcal{E})\\) is a directed acyclic knowledge graph built on the labels \\(\\mathcal{C}\\), where \\(\\mathcal{E}\\) denotes the set of edges (see below for details about building this graph). A consistent target set \\(\\Psi_{x}\\) is defined as a superset of the target nodes/labels \\(\\Omega_{x}\\) that if attacked successfully leads to MLL outputs so that \\(i)\\) when MLL predicts 1 for a parent node/label, then at least one of its children is also predicted as 1; \\(ii)\\) when all children of a node/label are predicted as 0, then the parent is predicted as 0." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.841, + 0.892, + 0.902 + ], + "angle": 0, + "content": "Algorithm 1 shows our algorithm and the time complexity for each step to obtain the consistent target set. The algorithm works as follows. Given the target set \\(\\Omega_{x}\\), MLL predictions \\(\\mathcal{S}\\), and the adjacency matrix \\(\\mathcal{E}\\) of the knowledge" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "24254" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.304 + ], + "angle": 0, + "content": "graph, the algorithm finds the minimal superset of \\(\\Omega_{\\mathbf{x}}\\) to be modified. While attacking a label, we need to maintain its consistency with respect to its children and parents. To maintain children consistency, each child of the target node must be turned OFF unless that child has multiple parents ON. We parse the path from target node to the leaf nodes and perform the same operation on every node. Similarly, to maintain parents consistency, all parents must be turned OFF unless some parent has more than one child ON. We perform this process for each node along the path from target node to the root until there are no more nodes to modify. The upper bound of algorithm's time complexity is \\(\\mathcal{O}(\\Omega \\mathcal{C})\\). As Figure 4 shows, on the same graph, consistent target sets depend on the MLL predictions." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.311, + 0.473, + 0.523 + ], + "angle": 0, + "content": "Knowledge Graph Construction. To construct \\(\\mathcal{G}\\), we use WordNet [54], which contains rich semantic relationships between labels\\(^2\\). One can also use other sources, such as ConceptNet [72] or OpenImages semantic hierarchy [40]. We build a tree \\(\\mathcal{G} = (\\mathcal{C}, \\mathcal{E})\\) on all labels \\(\\mathcal{C}\\) using hypernym and hyponym relations of labels. This can also be easily extended to other relationships e.g., antonymy, entailment, etc. For each label in \\(\\mathcal{C}\\), we use WordNet to extract its parent and child labels (e.g., for 'car', we obtain 'vehicle' as parent using its hybernyms). Since a word can be associated with several synsets, we choose the synset with the closest match to the label description. To build the tree, we use the maximum WUP similarity [80] between a child and multiple parent nodes to select a single parent." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.537, + 0.21, + 0.554 + ], + "angle": 0, + "content": "5. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.562, + 0.267, + 0.579 + ], + "angle": 0, + "content": "5.1. Experimental Setup" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.585, + 0.47, + 0.844 + ], + "angle": 0, + "content": "Datasets. We use Pascal-VOC [24], NUS-WIDE [16] and OpenImages [40] for studying the effectiveness of multi-label attacks. For Pascal-VOC, we trained each MLL model on 8,000 images from the training sets of PASCAL-VOC 2007 and PASCAL-VOC 2012 and created the adversarial examples for the test set of PASCAL-VOC 2007. To build \\(\\mathcal{G}\\), we extracted abstract classes from WordNet using which and the original 20 labels, we obtained 35 labels/nodes. For NUS-WIDE, we trained each MLL model on 150K images from the training set and attacked the models using the test set of the dataset. We used Wordnet to extract abstract classes and built a tree on labels. The total number of labels are 116, which includes 80 original labels and 36 additional abstract classes from WordNet. For OpenImages, we used pre-trained model from [64] and used test images to generate the attacks. We use the official class hierarchy provided in OpenImages as semantic relationship information." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.85, + 0.468, + 0.866 + ], + "angle": 0, + "content": "Multi-Label Recognition Models. We investigate the ef" + }, + { + "type": "page_footnote", + "bbox": [ + 0.078, + 0.875, + 0.469, + 0.901 + ], + "angle": 0, + "content": "2WordNet is a lexical database for the English language, containing 155,327 words organized in 175,979 synsets." + }, + { + "type": "code_caption", + "bbox": [ + 0.51, + 0.095, + 0.837, + 0.11 + ], + "angle": 0, + "content": "Algorithm 1: Consistent Target Set Construction" + }, + { + "type": "algorithm", + "bbox": [ + 0.501, + 0.112, + 0.892, + 0.435 + ], + "angle": 0, + "content": "Input: \\(\\Omega\\) : Target Set, \\(S\\) : MLL Label Predictions, \\(\\mathcal{E}\\) : Knowledge Graph's Adjacency Matrix Output: \\(\\Gamma\\) Expanded Target Set Procedure: \\(f_{select}(X)\\) : return \\(\\{i:X_i = True\\}\\) Procedure \\(f_{child.}(n,\\mathcal{E},\\mathcal{S})\\) .. return \\(f_{select}(\\mathcal{E}_{[n,:]}\\odot \\mathcal{S} == 1)\\) Procedure \\(f_{par.}(n,\\mathcal{E},\\mathcal{S})\\) .. return \\(f_{select}(\\mathcal{E}_{[:n]}\\odot \\mathcal{S} == 1)\\) \nProcedure Consistent_Comp \\((n,V,\\Gamma ,f_1,f_2)\\) . Queue Q I \\(\\leftarrow f_1(n,\\mathcal{E},\\mathcal{S})\\) \\(\\triangleright \\mathcal{O}(1)\\) Q.enqueue(I) \\(\\triangleright \\mathcal{O}(1)\\) while \\(\\mathcal{Q}\\) is not empty do \\(\\triangleright \\mathcal{O}(\\mathcal{C})\\) \\(v_{n} = \\mathcal{Q}.dequeue()\\) if \\(v_{n}\\notin \\mathcal{V}\\) then \\(\\nu \\gets \\nu \\cup \\{v_n\\}\\) \\(\\triangleright \\mathcal{O}(1)\\) \\(I\\gets f_2(v_n,\\mathcal{E},\\mathcal{S})\\backslash \\Gamma\\) if \\(|I| < 2\\) then \\(\\Gamma \\leftarrow \\Gamma \\cup \\{v_n\\}\\) \\(\\triangleright \\mathcal{O}(1)\\) \\(I\\gets f_1(v_n,\\mathcal{E},\\mathcal{S})\\) Q.enqueue(I) \n\\(\\Gamma = \\{\\}\\) \nforeach \\(n\\in \\Omega\\) do \\(\\triangleright \\mathcal{O}(\\Omega)\\) V = {n} \n\\(\\Gamma \\leftarrow\\) Consistent_Comp(n,V,Γ,fchild., \\(f_{par.})\\) \\(\\triangleright \\mathcal{O}(\\mathcal{C})\\) \\(\\Gamma \\leftarrow\\) Consistent_Comp(n,V,Γ,fpar., \\(f_{child.})\\) \\(\\triangleright \\mathcal{O}(\\mathcal{C})\\)" + }, + { + "type": "image", + "bbox": [ + 0.512, + 0.45, + 0.88, + 0.54 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.545, + 0.892, + 0.595 + ], + "angle": 0, + "content": "Figure 4. Examples of different consistent target sets obtained by Algorithm 1. Green nodes show the present labels predicted by the MLL and \\(\\Omega = \\{t\\}\\) is the target. The labels to be modified, \\(\\Psi\\) are shown within the red region and the labels to be fixed \\(\\bar{\\Psi}\\) are shown within the green region." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.612, + 0.871, + 0.627 + ], + "angle": 0, + "content": "fectiveness of multi-label attacks on three MLL models." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.631, + 0.892, + 0.722 + ], + "angle": 0, + "content": "- ML-GCN [15]: It explicitly learns relationships among labels using Graph Convolutional Networks (GCN). It builds a graph using the word embeddings and the cooccurrence matrix of labels and uses a GCN to extract information about label relationships. We trained the model using the binary cross-entropy loss." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.724, + 0.892, + 0.8 + ], + "angle": 0, + "content": "- Asymmetric Loss (ASL) [64]: It is an effective multi-label learning method that uses a novel loss for better optimization over highly imbalanced positive and negative class distributions. Following their experimental setting, we trained the TResNet-L [63] backbone." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.804, + 0.892, + 0.865 + ], + "angle": 0, + "content": "- ML-Decoder [65]: It is an attention-based unified decoder architecture for zero-shot, single-label, and multi-label classification. It uses a group-decoding scheme to alleviate the problem of scaling to large number of classes." + }, + { + "type": "list", + "bbox": [ + 0.498, + 0.631, + 0.892, + 0.865 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.871, + 0.892, + 0.902 + ], + "angle": 0, + "content": "Perturbation Generation. For PASCAL-VOC and NUS-WIDE, we show results on a range of perturbation budgets." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "24255" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.119, + 0.09, + 0.299, + 0.213 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.303, + 0.091, + 0.482, + 0.213 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.487, + 0.091, + 0.665, + 0.213 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.669, + 0.091, + 0.848, + 0.213 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.216, + 0.894, + 0.241 + ], + "angle": 0, + "content": "Figure 5. Naive fooling rate \\((\\mathrm{FR}_N)\\) and graph-based fooling rate \\((\\mathrm{FR}_S)\\) of different attacks on ML-GCN model, trained on PASCAL-VOC for one and two label/node attacks. The x-axis shows the upper bound on the \\(l_{\\infty}\\)-norm of perturbations \\((\\epsilon)\\)." + }, + { + "type": "table", + "bbox": [ + 0.086, + 0.255, + 0.89, + 0.462 + ], + "angle": 0, + "content": "
DatasetPASCAL-VOCNUS-WIDE
Target Set Size|Ω| = 1|Ω| = 2|Ω| = 1|Ω| = 2
ModelAttack↑FRN↑FRS↓NTR↑SSIM↑FRN↑FRS↓NTR↑SSIM↑FRN↑FRS↓NTR↑SSIM↑FRN↑FRS↓NTR↑SSIM
ML-GEN [15]MLA-U [71]100.075.54.90.97100.068.04.50.9799.743.51.50.9699.331.71.60.96
MLA-C [71]99.968.93.00.9699.860.22.80.9796.427.40.40.9792.418.50.40.97
MLA-LP [65]56.16.700.10.9946.76.000.30.9919.33.500.10.9811.43.300.00.98
GMLA (Ours)100.099.42.70.97100.098.42.50.9899.295.80.50.9799.191.30.40.97
ASL [64]MLA-U [71]100.052.84.60.97100.048.34.80.98100.050.02.00.97100.043.32.10.97
MLA-C [71]100.039.72.30.9799.733.22.10.98100.035.50.70.97100.030.00.70.96
MLA-LP [65]15.82.400.10.9911.92.900.50.9920.84.800.00.9816.13.100.00.98
GMLA (Ours)100.098.82.20.97100.098.82.00.98100.096.10.80.97100.093.20.70.97
ML-Dec [65]MLA-U [71]99.766.25.30.9799.862.05.70.9898.856.44.10.9797.950.44.60.98
MLA-C [71]99.150.62.70.9897.540.72.40.9773.630.41.00.9768.226.70.90.97
MLA-LP [65]19.43.700.10.9817.63.200.20.9813.34.100.00.979.72.900.00.98
GMLA (Ours)99.196.22.70.9899.397.12.50.9795.184.91.10.9793.982.01.00.98
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.465, + 0.893, + 0.491 + ], + "angle": 0, + "content": "Table 1. Experimental evaluation of the four attack methods on three models for \\( \\epsilon = 0.01 \\). The values represent the mean computed using the attack performance across all the combinations of target classes of size \\( |\\Omega| \\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.517, + 0.47, + 0.623 + ], + "angle": 0, + "content": "For OpenImages with 9,600 labels, we perform experiments for large-scale attacks with different sizes of the target set for a fixed epsilon value. To generate the target sets for attack, we randomly draw 100 samples of size \\( k \\) labels. For each draw from OpenImages, we randomly sample \\( k / 2 \\) leaf nodes (labels) from the graph \\( \\mathcal{G} \\) and sample the remaining labels which are not part of the graph." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.633, + 0.47, + 0.831 + ], + "angle": 0, + "content": "Baselines. We use MLA-U and MLA-C as baselines, following Song et al. [71]. Additionally, we use MLA-LP [65] as a baseline, which generates adversarial perturbation for multi-label recognition by solving a linear programming problem using the interior point method while minimizing the \\( l_{\\infty} \\) norm. In contrast to other methods, it requires computing the Jacobian at each optimization step. In our experiments, MLA-LP did not converge for OpenImages. To provide a comprehensive comparison, we extend our evaluation to ML-DP [71], a greedy algorithm that computes multi-label attack perturbations using constraint linearization as introduced in DeepFool [55]. We show the results for ML-DP in supplementary material." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.84, + 0.47, + 0.903 + ], + "angle": 0, + "content": "Evaluation Metrics. Let \\(\\mathcal{I}\\) be the set of images that are attacked and \\(\\mathcal{A} \\subseteq \\mathcal{I}\\) denote the set of images that are successfully attacked, i.e., for \\(x \\in \\mathcal{A}\\), all labels in \\(\\Omega_x\\) change after the attack. Let \\(\\mathcal{A}_{\\mathcal{G}} \\subseteq \\mathcal{A}\\) denote the subset of \\(\\mathcal{A}\\) for" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.517, + 0.89, + 0.547 + ], + "angle": 0, + "content": "which the attack produces semantically consistent predictions in the output of MLL according to \\(\\mathcal{G}\\)" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.548, + 0.892, + 0.578 + ], + "angle": 0, + "content": "We define naive fooling rate, \\(FR_{N}\\) and semantic-based fooling rate, \\(FR_{S}\\), as" + }, + { + "type": "equation", + "bbox": [ + 0.602, + 0.584, + 0.891, + 0.614 + ], + "angle": 0, + "content": "\\[\nF R _ {N} = \\frac {| \\mathcal {A} |}{| \\mathcal {I} |}, F R _ {S} = \\frac {| \\mathcal {A} _ {\\mathcal {G}} |}{| \\mathcal {I} |}. \\tag {12}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.621, + 0.892, + 0.742 + ], + "angle": 0, + "content": "Thus, \\(FR_{N}\\) measures fraction of attacked images whose attacks has been successful, without considering whether the MLL predictions are semantically consistent. On the other hand, \\(FR_{S}\\) captures fraction of attacked images whose attacks have been successful and produced semantically consistent MLL predictions. We also define non-target flip rate, \\(NT_{R}\\), which is the percentage of semantically unrelated labels (labels in \\(\\bar{\\Psi}_{k}\\)) which were flipped by the attack, i.e.," + }, + { + "type": "equation", + "bbox": [ + 0.555, + 0.751, + 0.891, + 0.79 + ], + "angle": 0, + "content": "\\[\nN T _ {R} = \\frac {1}{| \\mathcal {A} |} \\sum_ {k \\in \\mathcal {A}} \\frac {\\sum_ {i \\in \\bar {\\Psi} _ {k}} \\left(1 - \\delta \\left(f _ {i} ^ {(k)} , y _ {i} ^ {(k)}\\right)\\right)}{| \\bar {\\Psi} _ {k} |}, \\tag {13}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.793, + 0.892, + 0.902 + ], + "angle": 0, + "content": "where, \\(\\delta\\) is Kronecker delta function that equals 1 when the two inputs are equal and 0 otherwise, \\(y_{i}^{(k)},f_{i}^{(k)}\\in \\{0,1\\}\\) are the model predictions on clean and adversarial images respectively, of \\(i^{th}\\) non-target class of \\(k^{th}\\) successfully attacked image. Finally, we measure the imperceptibility of the perturbations using average structural similarity (SSIM) between pairs of original and adversarial images." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.957 + ], + "angle": 0, + "content": "24256" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.086, + 0.092, + 0.27, + 0.229 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.276, + 0.092, + 0.457, + 0.229 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.077, + 0.233, + 0.468, + 0.259 + ], + "angle": 0, + "content": "Figure 6. Performance of different multi-label attacks with fixed \\(\\epsilon = 0.05\\) on OpenImages as we increase the target set size." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.285, + 0.468, + 0.316 + ], + "angle": 0, + "content": "Note that \\(FR_{N}, FR_{S}\\), and \\(SSIM\\) should be high while \\(NT_{R}\\) should be low for a good attack method." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.326, + 0.279, + 0.342 + ], + "angle": 0, + "content": "5.2. Experimental Results" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.35, + 0.468, + 0.471 + ], + "angle": 0, + "content": "Figure 5 shows the performance of different attack methods on PASCAL-VOC for one- and two-node attacks for different epsilon values using ML-GCN classifier. In Table 1, we show the evaluation across the three MLL models for a fixed \\(\\epsilon = 0.01\\) for which the performance of all attacks has plateaued3. We also show the evaluation on OpenImages for different target sizes in Fig. 6 and Tab. 2. From the results, we make the following conclusions:" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.475, + 0.468, + 0.611 + ], + "angle": 0, + "content": "- As Fig. 5 shows, all methods achieve high naive fooling rate \\( FR_{N} \\) given large enough perturbation budget, yet once we filter out the attacks leading to semantically inconsistent predictions, the performance \\( (FR_{S}) \\) of all baselines significantly decreases. However, our GMLA achieves very high semantic-based fooling rate than baselines. From Tab. 1 and 2, our method achieves naive fooling rate \\( FR_{N} \\) comparable to the other methods but outperforms them over \\( FR_{S} \\) by a significant margin." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.616, + 0.468, + 0.797 + ], + "angle": 0, + "content": "- Notice from Fig. 5 and 6 that MLA-U has higher naive and semantic-based fooling rates than MLA-C. The reason is the strong positive correlations learned among related cooccurring labels during model training, which MLA-U implicitly exploits. However, MLA-U being oblivious to the relationships among labels can inevitably affect unrelated labels, as shown in Tab. 1 and 2. This explains why MLA-U has the highest \\( N T_{R} \\) across different settings. The difference becomes more apparent as we move to attack larger datasets e.g. OpenImages. This is because, a larger number of labels increases the chances of learning spurious correlations among unrelated labels." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.801, + 0.468, + 0.876 + ], + "angle": 0, + "content": "- Based on Fig. 5, MLA-LP achieves lowest performance compared to other attack methods for both fooling rates on PASCAL-VOC and NUS-WIDE datasets, and does not converge for OpenImages experiments. This is because MLA-LP uses interior point method at each iteration to solve a" + }, + { + "type": "table", + "bbox": [ + 0.505, + 0.09, + 0.885, + 0.145 + ], + "angle": 0, + "content": "
Attack|Ω|=1|Ω|=2|Ω|=3|Ω|=4|Ω|=5
MLA-U0.47 ± 0.020.57 ± 0.030.66 ± 0.030.75 ± 0.040.87 ± 0.03
MLA-C0.32 ± 0.090.31 ± 0.090.09 ± 0.070.06 ± 0.040.0 ± 0.0
GMLA (Ours)0.32 ± 0.140.16 ± 0.120.21 ± 0.130.11 ± 0.070.06 ± 0.04
" + }, + { + "type": "table_caption", + "bbox": [ + 0.5, + 0.149, + 0.892, + 0.175 + ], + "angle": 0, + "content": "Table 2. Percentage of semantically unrelated labels \\((NT_R)\\) affected at \\(\\epsilon = 0.05\\) for ASL[64] on OpenImages." + }, + { + "type": "image", + "bbox": [ + 0.507, + 0.192, + 0.882, + 0.35 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.354, + 0.892, + 0.392 + ], + "angle": 0, + "content": "Figure 7. Transferability across models on PASCAL-VOC. The y-axis shows the source model which generates the perturbation and x-axis shows the target model evaluated on that perturbation." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.413, + 0.892, + 0.563 + ], + "angle": 0, + "content": "system of equations, which define the constraints on the target and non-target labels. Because of the complex relationships among different labels, the feasible region for the given linear problem might be empty. This has also been identified by [96]. When the LP problem has a feasible solution, MLA-LP successfully finds the perturbation that satisfy the attack constraints. This explains why, for the small number of successfully attacked images, MLA-LP affects the least percentage of non-targeted labels, achieving low \\(\\mathrm{NT}_R\\) as shown in Tab. 1." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.569, + 0.892, + 0.66 + ], + "angle": 0, + "content": "- Each attack method produces imperceptible perturbations, as we constrain the maximum infinity norm of the perturbation to 0.01 (on images with pixel values between 0 to 1). Notice also from Table 1 that the average SSIM scores between the adversarial and original images is very close to 1, showing imperceptibility of perturbations." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.665, + 0.892, + 0.861 + ], + "angle": 0, + "content": "- Notice from Fig. 6 that MLA-C fails to successfully attack large-scale datasets and its performance drops drastically as we increase the target set size. As mentioned earlier, this is attributed to the observation that gradients of target and non-targeted classes are often opposite (as shown in Fig. 8) and as MLA-C optimizes the target and non-target loss simultaneously, the resulting perturbations are sub-optimal. From Tab. 2, MLA-C achieves lowest \\(\\mathrm{NT}_R\\) for target sizes greater than 2 but also performs poorly on fooling rates. Note that despite achieving high fooling rates \\(\\mathrm{FR}_N\\) and \\(\\mathrm{FR}_G\\), our GMLA method affects very small percentage of semantically unrelated labels, which shows the success of our constraint proposed in (6)." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.871, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Attack Transferability. Figure 7 shows the cross-model transferability of different attacks. For each source model," + }, + { + "type": "page_footnote", + "bbox": [ + 0.096, + 0.887, + 0.466, + 0.901 + ], + "angle": 0, + "content": "3We show results of ablation experiment on GMLA in supplementary." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "24257" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.086, + 0.088, + 0.268, + 0.191 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.277, + 0.089, + 0.457, + 0.191 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.077, + 0.196, + 0.47, + 0.258 + ], + "angle": 0, + "content": "Figure 8. Stacked bar charts showing the correlation between the gradient of the loss on target labels \\( g_{\\boldsymbol{x}, \\Psi_{\\boldsymbol{x}}} \\) and on other labels \\( g_{\\boldsymbol{x}, \\Psi_{\\boldsymbol{x}}} \\) for different sizes of the target set on OpenImages. Left: Using (3) as objective. Right: using our proposed (6) that optimizes the loss on target labels while keeping the loss on non-target labels the same (as a constraint)." + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.273, + 0.462, + 0.419 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.427, + 0.471, + 0.502 + ], + "angle": 0, + "content": "Figure 9. Results of attacking ML-GCN on PASCAL-VOC (first two columns) and NUS-WIDE (last two columns). Each column shows the model predictions for clean \\((\\epsilon = 0)\\) and attacked images. Rounded rectangles group semantically related labels. Inconsistent predictions caused around target labels are shown with red rectangles. The red labels at the top are targeted labels and the arrows show the relationships." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.525, + 0.47, + 0.678 + ], + "angle": 0, + "content": "we compute the perturbations (scaled to \\(\\epsilon = 0.1\\)) for images and evaluate the target models exclusively on the images that were successfully attacked by the respective source model (hence the diagonal values are all 1). Notice that although all attacks, other than MLA-LP, are transferable, GMLA semantic attack transfers better and achieves the highest \\(\\mathrm{FR}_N\\) and \\(\\mathrm{FR}_S\\). From Table 1, notice that all attacks were able to achieve non-trivial graph-based fooling rate. However, GMLA is the most effective method to generate semantically consistent and generally transferrable attacks." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.682, + 0.47, + 0.82 + ], + "angle": 0, + "content": "Gradient Correlations. Figure 8 shows the correlation between the gradient of the loss on target labels (to be modified), \\( g_{x,\\Psi_x} \\), and on other labels (to be fixed), \\( g_{x,\\bar{\\Psi}_x} \\), for different sizes of the target set on OpenImages. Notice that adding the two losses leads to highly negatively correlated gradients for them. However, only optimizing the loss on target labels while keeping the loss on non-target labels the same (as a constraint) leads to significant increase in gradient correlations, which can justify the success of GMLA." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.825, + 0.471, + 0.903 + ], + "angle": 0, + "content": "Qualitative Results. Figure 9 shows qualitative results of attacking ML-GCN using PASCAL-VOC and NUS-WIDE. Notice that in all four cases, respectively, MLA-U and MLA-C lead to inconsistencies. For example, to turn off the boat label in the first image, MLA-U attacks the boat and" + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.089, + 0.885, + 0.306 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.311, + 0.895, + 0.399 + ], + "angle": 0, + "content": "Figure 10. Since the adversarial images have imperceptible changes, we visualize the perturbations computed using different methods for various target classes of PASCAL-VOC. The perturbations are computed by setting the maximum budget \\(\\epsilon = 0.01\\) and are scaled for visualization. For each perturbation, we compute it's dot product (D) with the perturbation computed using our proposed attack - GMLA, and the structural similarity (S) of the original and the adversarial image (after adding the perturbation)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.417, + 0.892, + 0.553 + ], + "angle": 0, + "content": "craft labels but does not attack the vehicle label, leading to semantically inconsistent prediction. MLA-C successfully attacks boat, but keeps all other labels fixed, causing inconsistent predictions. For the second image, MLA-U successfully kept consistency around one group of labels but causes inconsistency in the other group. Similar to MLA-C, MLA-LP causes semantic inconsistencies for all images. Notice that in all cases, GMLA successfully modifies the necessary labels to ensure semantic consistency." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.554, + 0.892, + 0.66 + ], + "angle": 0, + "content": "In Figure 10, we visualize the perturbations computed by different methods and compare the SSIM (S) of baselines with GMLA. We also show the dot product (D) between the perturbation computed using each baseline method and the one computed using GMLA. We can see that GMLA finds different attack directions than the baseline methods, which results in semantically consistent and transferable attacks." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.675, + 0.628, + 0.691 + ], + "angle": 0, + "content": "6. Conclusions" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.7, + 0.892, + 0.807 + ], + "angle": 0, + "content": "We developed an efficient framework to generate attacks for multi-label recognition that ensures semantic consistency of the output labels based on relationships among labels while effectively attacking a large number of labels. By extensive experiments on three datasets and several MLL models, we showed that our method generates both semantically consistent and successful adversarial attacks." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.822, + 0.669, + 0.839 + ], + "angle": 0, + "content": "Acknowledgements" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.892, + 0.902 + ], + "angle": 0, + "content": "This work is sponsored by Khoury College of Northeastern funds, DARPA (HR00112220001), NSF (IIS-2115110), ARO (W911NF2110276). Content does not necessarily reflect the position/policy of the Government." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "24258" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.079, + 0.09, + 0.176, + 0.107 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.184 + ], + "angle": 0, + "content": "[1] Abhishek Aich, Calvin-Khang Ta, Akash Gupta, Chengyu Song, Srikanth Krishnamurthy, Salman Asif, and Amit Roy-Chowdhury. Gama: Generative adversarial multi-object scene attacks. Advances in Neural Information Processing Systems, 35:36914-36930, 2022. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.186, + 0.472, + 0.269 + ], + "angle": 0, + "content": "[2] Abhishek Aich, Shasha Li, Chengyu Song, M Salman Asif, Srikanth V Krishnamurthy, and Amit K Roy-Chowdhury. Leveraging local patch differences in multi-object scenes for generative adversarial attacks. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 1308-1318, 2023. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.271, + 0.468, + 0.299 + ], + "angle": 0, + "content": "[3] N. Akhtar and A. Mian. Threat of adversarial attacks on deep learning in computer vision: A survey. arXiv, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.301, + 0.468, + 0.342 + ], + "angle": 0, + "content": "[4] A. Athalye, N. Carlini, and D. A. Wagner. Obfuscated gradients give a false sense of security: Circumventing defenses to adversarial examples. 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.344, + 0.468, + 0.385 + ], + "angle": 0, + "content": "[5] Yuanhao Ban and Yinpeng Dong. Pre-trained adversarial perturbations. In Advances in Neural Information Processing Systems, pages 1196-1209. Curran Associates, Inc., 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.387, + 0.468, + 0.442 + ], + "angle": 0, + "content": "[6] Emanuel Ben-Baruch, Tal Ridnik, Itamar Friedman, Avi Ben-Cohen, Nadav Zamir, Asaf Noy, and Lihi Zelnik-Manor. Multi-label classification with partial annotations using class-aware selective loss. 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.444, + 0.468, + 0.5 + ], + "angle": 0, + "content": "[7] Zikui Cai, Xinxin Xie, Shasha Li, Mingjun Yin, Chengyu Song, Srikanth V. Krishnamurthy, Amit K. Roy-Chowdhury, and M. Salman Asif. Context-aware transfer attacks for object detection. ArXiv, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.501, + 0.468, + 0.57 + ], + "angle": 0, + "content": "[8]Zikui Cai, Shantanu Rane, Alejandro E. Brito, Chengyu Song,Srikanth V.Krishnamurthy,Amit K.Roy-Chowdhury, and M.Salman Asif.Zero-query transfer attacks on context-aware object detectors.IEEE Conference on Computer Vision and Pattern Recognition,2022.3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.572, + 0.468, + 0.613 + ], + "angle": 0, + "content": "[9] N. Carlini and D. Wagner. Adversarial examples are not easily detected: Bypassing ten detection methods. Workshop on Artificial Intelligence and Security, 2017. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.615, + 0.468, + 0.656 + ], + "angle": 0, + "content": "[10] N. Carlini and D. Wagner. Towards evaluating the robustness of neural networks. IEEE Symposium on Security and Privacy, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.659, + 0.468, + 0.699 + ], + "angle": 0, + "content": "[11] Y. Carmon, A. Raghunathan, L. Schmidt, P. Liang, and J. C. Duchi. Unlabeled data improves adversarial robustness. Neural Information Processing Systems, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.701, + 0.468, + 0.755 + ], + "angle": 0, + "content": "[12] P.-Y. Chen, Y. Sharma, H. Zhang, J. Yi, and C.-J. Hsieh. Ead: Elastic-net attacks to deep neural networks via adversarial examples. AAAI Conference on Artificial Intelligence, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.758, + 0.468, + 0.813 + ], + "angle": 0, + "content": "[13] T. Chen, M. Xu, X. Hui, H. Wu, and L. Lin. Learning semantic-specific graph representation for multi-label image recognition. IEEE International Conference on Computer Vision, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.816, + 0.468, + 0.871 + ], + "angle": 0, + "content": "[14] Zhao-Min Chen, Xiu-Shen Wei, Xin Jin, and Yanwen Guo. Multi-label image recognition with joint class-aware map disentangling and label correlation embedding. IEEE International Conference on Multimedia and Expo, 2019. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.873, + 0.468, + 0.901 + ], + "angle": 0, + "content": "[15] Z. M. Chen, X. S. Wei, P. Wang, and Y. Guo. Multi-label image recognition with graph convolutional networks. IEEE" + }, + { + "type": "list", + "bbox": [ + 0.079, + 0.115, + 0.472, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.892, + 0.12 + ], + "angle": 0, + "content": "Conference on Computer Vision and Pattern Recognition, abs/1904.03582, 2019. 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.122, + 0.892, + 0.178 + ], + "angle": 0, + "content": "[16] T. S. Chua, J. Tang, R. Hong, H. Li, Z. Luo, and Y. T. Zheng. Nus-wide: A real-world web image database from national university of bangalore. ACM International Conference on Image and Video Retrieval, 2009. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.18, + 0.892, + 0.221 + ], + "angle": 0, + "content": "[17] F. Croce and M. Hein. Reliable evaluation of adversarial robustness with an ensemble of diverse parameter-free attacks. ArXiv, 2020. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.224, + 0.892, + 0.264 + ], + "angle": 0, + "content": "[18] S. D. Dao, E. Zhao, D. Phung, and J. Cai. Multi-label image classification with contrastive learning. arXiv preprint, arXiv:2107.11626, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.267, + 0.892, + 0.323 + ], + "angle": 0, + "content": "[19] J. Deng, N. Ding, Y. Jia, A. Frome, K. Murphy, S. Bengio, Y. Li, H. Neven, and H. Adam. Large-scale object classification using label relation graphs. European Conference on Computer Vision, 2014. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.325, + 0.892, + 0.379 + ], + "angle": 0, + "content": "[20] G. W. Ding, Y. Sharma, K. Y. Lui, and R. Huang. Max-margin adversarial (mma) training: Direct input space margin maximization through adversarial training. arXiv, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.382, + 0.892, + 0.437 + ], + "angle": 0, + "content": "[21] Zixuan Ding, Ao Wang, Hui Chen, Qiang Zhang, Pengzhang Liu, Yongjun Bao, Weipeng Yan, and Jungong Han. Exploring structured semantic prior for multi label recognition with incomplete labels. 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.44, + 0.892, + 0.522 + ], + "angle": 0, + "content": "[22] Junhao Dong, Seyed-Mohsen Moosavi-Dezfooli, Jianhuang Lai, and Xiaohua Xie. The enemy of my enemy is my friend: Exploring inverse adversaries for improving adversarial training. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 24678–24687, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.525, + 0.892, + 0.566 + ], + "angle": 0, + "content": "[23] Y. Dong, Z. Deng, T. Pang, H. Su, and J. Zhu. Adversarial distributional training for robust deep learning. arXiv, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.569, + 0.892, + 0.624 + ], + "angle": 0, + "content": "[24] M. Everingham, S. M. A. Eslami, L. Van-Gool, C. K. I. Williams, J. Winn, and A. Zisserman. The Pascal visual object classes (voc) challenge. International Journal of Computer Vision, 2010. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.627, + 0.892, + 0.668 + ], + "angle": 0, + "content": "[25] K. Eykholt, I. Evtimov, E. Fernandes, B. Li, A. Rahmati, F. Tramér, A. Prakash, T. Kohno, and D. X. Song. Physical adversarial examples for object detectors. arXiv, 2018. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.67, + 0.892, + 0.711 + ], + "angle": 0, + "content": "[26] L. Feng, B. An, and S. He. Collaboration based multi-label learning. AAAI Conference on Artificial Intelligence, 2019. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.714, + 0.892, + 0.755 + ], + "angle": 0, + "content": "[27] I. J. Goodfellow, J. Shlens, and C. Szegedy. Explaining and harnessing adversarial examples. International Conference on Learning Representations, 2015. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.758, + 0.892, + 0.812 + ], + "angle": 0, + "content": "[28] W. He, J. Wei, X. Chen, N. Carlini, and D. Song. Adversarial example defense: Ensembles of weak defenses are not strong. USENIX Workshop on Offensive Technologies, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.815, + 0.892, + 0.843 + ], + "angle": 0, + "content": "[29] D. Hendrycks, K. Lee, and M. Mazeika. Using pre-training can improve model robustness and uncertainty. 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.846, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[30] Dan Hendrycks, Kevin Zhao, Steven Basart, Jacob Steinhardt, and Dawn Song. Natural adversarial examples. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15262-15271, 2021. 1" + }, + { + "type": "list", + "bbox": [ + 0.504, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.946, + 0.52, + 0.957 + ], + "angle": 0, + "content": "24259" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.162 + ], + "angle": 0, + "content": "[31] Lei Hsiung, Yun-Yun Tsai, Pin-Yu Chen, and Tsung-Yi Ho. Towards compositional adversarial robustness: Generalizing adversarial training to composite semantic perturbations. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 24658-24667, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.164, + 0.469, + 0.19 + ], + "angle": 0, + "content": "[32] S. Hu, L. Ke, X. Wang, and S. Lyu. Tkml-ap: Adversarial attacks to top-k multi-label learning. arXiv, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.192, + 0.469, + 0.233 + ], + "angle": 0, + "content": "[33] D. T. Huynh and E. Elhamifar. Interactive multi-label cnn learning with partial labels. IEEE Conference on Computer Vision and Pattern Recognition, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.235, + 0.469, + 0.289 + ], + "angle": 0, + "content": "[34] Tooba Imtiaz, Morgan Kohler, Jared Miller, Zifeng Wang, Mario Sznaier, Octavia I Camps, and Jennifer G Dy. Saif: Sparse adversarial and interpretable attack framework. arXiv preprint arXiv:2212.07495, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.292, + 0.469, + 0.332 + ], + "angle": 0, + "content": "[35] J. Li R. Ji, H. Liu, X. Hong, Y. Gao, and Q. Tian. Universal perturbation attack against image retrieval. International Conference on Computer Vision, 2019. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.334, + 0.469, + 0.389 + ], + "angle": 0, + "content": "[36] Jinyuan Jia, Wenjie Qu, and Neil Zhenqiang Gong. Multiguard: Provably robust multi-label classification against adversarial examples. Advances in Neural Information Processing Systems, 2022. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.391, + 0.469, + 0.431 + ], + "angle": 0, + "content": "[37] Youngwook Kim, Jae Myung Kim, Zeynep Akata, and Jungwoo Lee. Large loss matters in weakly supervised multi-label classification. 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.433, + 0.437, + 0.447 + ], + "angle": 0, + "content": "[38] Takumi Kobayashi. Two-way multi-label loss. 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.449, + 0.469, + 0.49 + ], + "angle": 0, + "content": "[39] A. Kurakin, I. Goodfellow, and S. Bengio. Adversarial machine learning at scale. International Conference on Learning Representations, 2017. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.491, + 0.469, + 0.574 + ], + "angle": 0, + "content": "[40] A. Kuznetsova, H. Rom, N. Alldrin, J. Uijlings, I. Krasin, J. Pont-Tuset, S. Kamali, S. Popov, M. Malloci, A. Kolesnikov, T. Duerig, and V. Ferrari. The open images dataset v4: Unified image classification, object detection, and visual relationship detection at scale. International Journal of Computer Vision, 2016. 2, 4, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.576, + 0.469, + 0.629 + ], + "angle": 0, + "content": "[41] J. Lanchantin, T. Wang, V. Ordonez, and Y. Qi. General multi-label image classification with transformers. IEEE Conference on Computer Vision and Pattern Recognition, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.632, + 0.469, + 0.672 + ], + "angle": 0, + "content": "[42] K. Lee, K. Lee, H. Lee, and J. Shin. A simple unified framework for detecting out-of-distribution samples and adversarial attacks. 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.675, + 0.469, + 0.716 + ], + "angle": 0, + "content": "[43] Peng Li, Peng Chen, Yonghong Xie, and Dezheng Zhang. Bi-modal learning with channel-wise attention for multi-label image classification. IEEE Access, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.718, + 0.469, + 0.759 + ], + "angle": 0, + "content": "[44] Q. Li, M. Qiao, W. Bian, and D. Tao. Conditional graphical lasso for multi-label image classification. IEEE Conference on Computer Vision and Pattern Recognition, 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.761, + 0.469, + 0.801 + ], + "angle": 0, + "content": "[45] Q. Li, X. Peng, Y. Qiao, and Q. Peng. Learning label correlations for multi-label image recognition with graph networks. Pattern Recognition Letters, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.803, + 0.469, + 0.843 + ], + "angle": 0, + "content": "[46] X. Li, F. Zhao, and Y. Guo. Multi-label image classification with a probabilistic label enhancement model. In UAI, 2014. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.846, + 0.469, + 0.899 + ], + "angle": 0, + "content": "[47] Y. Li and L. Yang. More correlations better performance: Fully associative networks for multi-label image classification. International Conference on Pattern Recognition, 2021. 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.134 + ], + "angle": 0, + "content": "[48] Y. Li, Y. Song, and J. Luo. Improving pairwise ranking for multi-label image classification. IEEE Conference on Computer Vision and Pattern Recognition, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.136, + 0.892, + 0.175 + ], + "angle": 0, + "content": "[49] Z. Li, W. Lu, Z. Sun, and W. Xing. Improving multi-label classification using scene cues. Multimedia Tools and Applications, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.178, + 0.892, + 0.204 + ], + "angle": 0, + "content": "[50] Dekun Lin. Probability guided loss for long-tailed multi-label image classification. 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.205, + 0.892, + 0.259 + ], + "angle": 0, + "content": "[51] A. Madry, A. Makelov, L. Schmidt, D. Tsipras, and A. Vladu. Towards deep learning models resistant to adversarial attacks. International Conference on Learning Representations, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.261, + 0.892, + 0.3 + ], + "angle": 0, + "content": "[52] S. Melacci, G. Ciravegna, A. Sotgiu, A. Demontis, B. Biggio, M. Gori, and F. Roli. Domain knowledge alleviates adversarial attacks in multi-label classifiers. 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.302, + 0.892, + 0.355 + ], + "angle": 0, + "content": "[53] J.-H. Metzen, M.-C. Kumar, T. Brox, and V. Fischer. Universal adversarial perturbations against semantic image segmentation. International Conference on Computer Vision, 2019. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.358, + 0.892, + 0.384 + ], + "angle": 0, + "content": "[54] G. A. Miller. Wordnet: a lexical database for english. Communications of the ACM, 38(11), 1995. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.386, + 0.892, + 0.44 + ], + "angle": 0, + "content": "[55] S. Moosavi-Dezfooli, A. Fawzi, and P. Frossard. Deepfool: a simple and accurate method to fool deep neural networks. IEEE Conference on Computer Vision and Pattern Recognition, 2016. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.442, + 0.892, + 0.495 + ], + "angle": 0, + "content": "[56] J. Nam, E. L. Mencia, H. J. Kim, and J. Furnkranz. Maximizing subset accuracy with recurrent neural networks in multi-label classification. Neural Information Processing Systems, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.497, + 0.892, + 0.538 + ], + "angle": 0, + "content": "[57] T. Pang, K. Xu, C. Du, N. Chen, and J. Zhu. Improving adversarial robustness via promoting ensemble diversity. International Conference on Machine learning, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.54, + 0.892, + 0.579 + ], + "angle": 0, + "content": "[58] T. Pang, K. Xu, Y. Dong, C. Du, N. Chen, and J. Zhu. Rethinking softmax cross-entropy loss for adversarial robustness. arXiv, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.581, + 0.892, + 0.622 + ], + "angle": 0, + "content": "[59] T. Pang, X. Yang, Y. Dong, K. Xu, H. Su, and J. Zhu. Boosting adversarial training with hypersphere embedding. Neural Information Processing Systems, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.624, + 0.892, + 0.69 + ], + "angle": 0, + "content": "[60] Nicolas Papernot, Patrick Mcdaniel, Somesh Jha, Matt Fredrikson, Z. Berkay Celik, and Ananthram Swami. The limitations of deep learning in adversarial settings. IEEE European Symposium on Security and Privacy (EuroS&P), 2016. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.693, + 0.892, + 0.734 + ], + "angle": 0, + "content": "[61] Tao Pu, Tianshui Chen, Hefeng Wu, and Liang Lin. Semantic-aware representation blending for multi-label image recognition with partial labels. 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.736, + 0.892, + 0.801 + ], + "angle": 0, + "content": "[62] Zeyu Qin, Yanbo Fan, Yi Liu, Li Shen, Yong Zhang, Jue Wang, and Baoyuan Wu. Boosting the transferability of adversarial attacks with reverse adversarial perturbation. Advances in Neural Information Processing Systems, 35:29845-29858, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.804, + 0.892, + 0.845 + ], + "angle": 0, + "content": "[63] T. Ridnik, H. Lawen, A. Noy, and I. Friedman. Tresnet: High performancegpu-dedicated architecture. ArXiv preprint arXiv:2003.13630, 2020.5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.847, + 0.892, + 0.899 + ], + "angle": 0, + "content": "[64] Tal Ridnik, Emanuel Ben-Baruch, Nadav Zamir, Asaf Noy, Itamar Friedman, Matan Protter, and Lihi Zelnik-Manor. Asymmetric loss for multi-label classification. 2021. 5, 6, 7" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.946, + 0.52, + 0.957 + ], + "angle": 0, + "content": "24260" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.134 + ], + "angle": 0, + "content": "[65] Tal Ridnik, Gilad Sharir, Avi Ben-Cohen, Emanuel Ben-Baruch, and Asaf Noy. Ml-decoder: Scalable and versatile classification head. 2023. 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.136, + 0.472, + 0.219 + ], + "angle": 0, + "content": "[66] Jérôme Rony, Luiz G Hafemann, Luiz S Oliveira, Ismail Ben Ayed, Robert Sabourin, and Eric Granger. Decoupling direction and norm for efficient gradient-based 12 adversarial attacks and defenses. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4322-4330, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.222, + 0.471, + 0.263 + ], + "angle": 0, + "content": "[67] J. Cohen and E. Rosenfeld and Z. Kolter. Certified adversarial robustness via randomized smoothing. International Conference on Machine learning, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.266, + 0.47, + 0.32 + ], + "angle": 0, + "content": "[68] A. Shafahi, M. Najibi, A. Ghiasi, Z. Xu, J. Dickerson, C. Studer, L. Davis, G. Taylor, and T. Goldstein. Adversarial training for free! Neural Information Processing Systems, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.324, + 0.471, + 0.393 + ], + "angle": 0, + "content": "[69] Nasim Shafiee and Ehsan Elhamifar. Zero-shot attribute attacks on fine-grained recognition models. In Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part V, pages 262-282. Springer, 2022. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.395, + 0.47, + 0.451 + ], + "angle": 0, + "content": "[70] Nitish Shukla and Sudipta Banerjee. Generating adversarial attacks in the latent space. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 730-739, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.453, + 0.469, + 0.494 + ], + "angle": 0, + "content": "[71] Q. Song, H. Jin, X. Huang, and X. Hu. Multi-label adversarial perturbations. IEEE International Conference on Data Mining, 2018. 1, 2, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.497, + 0.469, + 0.525 + ], + "angle": 0, + "content": "[72] R. Speer, J. Chin, and C. Havasi. Conceptnet 5.5: An open multilingual graph of general knowledge. 2017. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.527, + 0.469, + 0.581 + ], + "angle": 0, + "content": "[73] C. Szegedy, W. Zaremba, I. Sutskever, J. Bruna, D. Erhan, I. Goodfellow, and R. Fergus. Intriguing properties of neural networks. International Conference on Learning Representations, 2014. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.584, + 0.469, + 0.613 + ], + "angle": 0, + "content": "[74] N. Tursynbek, A. Petiushko, and I. Oseledets. Geometry-inspired top-k adversarial perturbations. arXiv, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.615, + 0.469, + 0.668 + ], + "angle": 0, + "content": "[75] J. Uesato, J. B. Alayrac, P. Huang, R. Stanforth, A. Fawzi, and P. Kohli. Are labels required for improving adversarial robustness? Neural Information Processing Systems, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.672, + 0.469, + 0.727 + ], + "angle": 0, + "content": "[76] Thomas Verelst, Paul K Rubenstein, Marcin Eichner, Tinne Tuytelaars, and Maxim Berman. Spatial consistency loss for training multi-label classifiers from single-label annotations. 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.73, + 0.469, + 0.785 + ], + "angle": 0, + "content": "[77] J. Wang, Y. Yang, J. Mao, Z. Huang, C. Huang, and W. Xu. Cnn-rnn: A unified framework for multi-label image classification. 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.788, + 0.469, + 0.841 + ], + "angle": 0, + "content": "[78] Z. Wang, T. Chen, G. Li, G. Li, and L. Lin. Multi-label image recognition by recurrently discovering attentional regions. IEEE International Conference on Computer Vision, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.845, + 0.469, + 0.899 + ], + "angle": 0, + "content": "[79] Y. Wu, H. Liu, S. Feng, Y. Jin, G. Lyu, and Z. Wu. Gm-mlic: Graph matching based multi-label image classification. International Joint Conference on Artificial Intelligence, 2021. 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.472, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.134 + ], + "angle": 0, + "content": "[80] Z. Wu and M. Palmer. Verbs semantics and lexical selection. Annual Meeting on Association for Computational Linguistics, 1994. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.136, + 0.892, + 0.19 + ], + "angle": 0, + "content": "[81] C. Xie, Z. Zhang, Y. Zhou, S. Bai, J. Wang, Z. Ren, and A. Yuille. Improving transferability of adversarial examples with input diversity. IEEE Conference on Computer Vision and Pattern Recognition, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.193, + 0.892, + 0.233 + ], + "angle": 0, + "content": "[82] Ming-Kun Xie, Jiahao Xiao, and Sheng-Jun Huang. Label-aware global consistency for multi-label learning with single positive labels. 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.235, + 0.892, + 0.275 + ], + "angle": 0, + "content": "[83] J. Xu, H. Tian, Z. Wang, Y. Wang, W. Kang, and F. Chen. Joint input and output space learning for multi-label image classification. IEEE Transactions on Multimedia, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.278, + 0.892, + 0.319 + ], + "angle": 0, + "content": "[84] W. Xu, D. Evans, and Y. Qi. Feature squeezing: Detecting adversarial examples in deep neural networks. Network and Distributed Systems Security Symposium, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.321, + 0.892, + 0.375 + ], + "angle": 0, + "content": "[85] H. Yang, J. T. Zhou, Y. Zhang, B. Gao, J. Wu, and J. Cai. Exploit bounding box annotations for multi-label object recognition. IEEE Conference on Computer Vision and Pattern Recognition, 2016. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.377, + 0.892, + 0.417 + ], + "angle": 0, + "content": "[86] Zhuo Yang, Yufei Han, and Xiangliang Zhang. Characterizing the evasion attackability of multi-label classifiers. 2021. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.42, + 0.892, + 0.46 + ], + "angle": 0, + "content": "[87] Z. Yang, Y. Han, and X. Zhang. Attack transferability characterization for adversarially robust multi-label classification. 2021. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.462, + 0.892, + 0.516 + ], + "angle": 0, + "content": "[88] J. Ye, J. He, X. Peng, W. Wu, and Y. Qiao. Attention-driven dynamic graph convolutional network for multi-label image recognition. European Conference on Computer Vision, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.519, + 0.892, + 0.573 + ], + "angle": 0, + "content": "[89] R. You, Z. Guo, L. Cui, X. Long, S. Y. Bao, and S. Wen. Cross-modality attention with semantic graph embedding for multi-label classification. AAAI Conference on Artificial Intelligence, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.576, + 0.892, + 0.617 + ], + "angle": 0, + "content": "[90] X. Yuan, P. He, Q. Zhu, and X. Li. Adversarial examples: Attacks and defenses for deep learning. IEEE Transactions on Neural Networks and Learning Systems, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.619, + 0.892, + 0.672 + ], + "angle": 0, + "content": "[91] ML. Zhang and Z. Zhou. Multilabel neural networks with applications to functional genomics and text categorization. IEEE Transactions on Knowledge and Data Engineering, 2006. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.675, + 0.892, + 0.716 + ], + "angle": 0, + "content": "[92] Shu Zhang, Ran Xu, Caiming Xiong, and Chetan Ramaiah. Use all the labels: A hierarchical multi-label contrastive learning framework. 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.718, + 0.892, + 0.758 + ], + "angle": 0, + "content": "[93] Z. Zhao, G. Chen, J. Wang, Y. Yang, F. Song, and J. Sun. Attack as defense: Characterizing adversarial examples using robustness. arXiv, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.761, + 0.892, + 0.802 + ], + "angle": 0, + "content": "[94] Donghao Zhou, Pengfei Chen, Qiong Wang, Guangyong Chen, and Pheng-Ann Heng. Acknowledging the unknown for multi-label learning with single positive labels. 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.804, + 0.892, + 0.844 + ], + "angle": 0, + "content": "[95] N. Zhou, W. Luo, X. Lin, P. Xu, and Z.. Zhang. Generating multi-label adversarial examples by linear programming. International Joint Conference on Neural Networks, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.846, + 0.892, + 0.9 + ], + "angle": 0, + "content": "[96] N. Zhou, W. Luo, J. Zhang, L. Kong, and H. Zhang. Hiding all labels for multi-label images: An empirical study of adversarial examples. International Joint Conference on Neural Networks, 2021. 2, 7" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.946, + 0.518, + 0.957 + ], + "angle": 0, + "content": "24261" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.135 + ], + "angle": 0, + "content": "[97] Y. Zhu, J. T. Kwok, and Z. Zhou. Multi-label learning with global and local label correlation. IEEE Transactions on Knowledge and Data Engineering, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.136, + 0.47, + 0.19 + ], + "angle": 0, + "content": "[98] D. Zügner, A. Akbarnejad, and S. Gümnmann. Adversarial attacks on neural networks for graph data. International Conference on Knowledge Discovery & Data Mining, 2018. 1" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.19 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.946, + 0.519, + 0.957 + ], + "angle": 0, + "content": "24262" + } + ] +] \ No newline at end of file diff --git a/2024/Semantic-Aware Multi-Label Adversarial Attacks/a7635c9b-bbb0-4721-869c-8ded1f1d58af_origin.pdf b/2024/Semantic-Aware Multi-Label Adversarial Attacks/a7635c9b-bbb0-4721-869c-8ded1f1d58af_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..3297ed6b6ae2b7ae024e0365998bc18b9d82f537 --- /dev/null +++ b/2024/Semantic-Aware Multi-Label Adversarial Attacks/a7635c9b-bbb0-4721-869c-8ded1f1d58af_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:58962d6ddefae6fe59d27dd67ba960d479793dbe126e680f725802737759df84 +size 6946290 diff --git a/2024/Semantic-Aware Multi-Label Adversarial Attacks/full.md b/2024/Semantic-Aware Multi-Label Adversarial Attacks/full.md new file mode 100644 index 0000000000000000000000000000000000000000..90aff7c973b1c30dcf87381a26f8525a99f8c392 --- /dev/null +++ b/2024/Semantic-Aware Multi-Label Adversarial Attacks/full.md @@ -0,0 +1,398 @@ +# Semantic-Aware Multi-Label Adversarial Attacks + +Hassan Mahmood Northeastern University + +mahmood.h@northeastern.edu + +Ehsan Elhamifar Northeastern University + +e.elhamifar@northeastern.edu + +# Abstract + +Despite its importance, generating attacks for multi-label learning (MLL) models has received much less attention compared to multi-class recognition. Attacking an MLL model by optimizing a loss on the target set of labels has often the undesired consequence of changing the predictions for other labels. On the other hand, adding a loss on the remaining labels to keep them fixed leads to highly negatively correlated gradient directions, reducing the attack effectiveness. In this paper, we develop a framework for crafting effective and semantic-aware adversarial attacks for MLL. First, to obtain an attack that leads to semantically consistent predictions across all labels, we find a minimal super-set of the target labels, referred to as consistent target set. To do so, we develop an efficient search algorithm over a knowledge graph, which encodes label dependencies. Next, we propose an optimization that searches for an attack that modifies the predictions of labels in the consistent target set while ensuring other labels will not get affected. This leads to an efficient algorithm that projects the gradient of the consistent target set loss onto the orthogonal direction of the gradient of the loss on other labels. Our framework can generate attacks on different target set sizes and for MLL with thousands of labels (as in OpenImages). Finally, by extensive experiments on three datasets and several MLL models, we show that our method generates both successful and semantically consistent attacks.1 + +# 1. Introduction + +Despite the tremendous success of Deep Neural Networks (DNNs) for image recognition, DNNs are vulnerable to adversarial attacks, i.e., imperceptible image perturbations that result in incorrect prediction with high confidence [9, 25, 27, 30, 35, 39, 53, 60, 69, 70, 98]. Understanding and improving the robustness of DNNs has motivated a large body of research on generating adversarial perturbations and subsequently using them to design defense mech- + +![](images/c0b229c86348da89cef4ce1a9fb2b18faf52001d2f7849124935241c76f5a36c.jpg) +Figure 1. Generating effective attacks for an MLL model is challenging. Top: Two groups of semantically related labels. Green nodes show labels predicted as present before the attack. Bottom: While an attack on the target label 'bicycle' succeeds, it fails to turn off 'vehicle' and 'wheeled vehicle' for $\epsilon < 0.2$ . On the other hand, for $\epsilon > 0.125$ , the attack changes the prediction for the non-target label 'person', which is undesired. + +anisms, e.g., by detecting attacks or retraining the model using perturbed images. The majority of existing works, however, have focused on multi-class recognition (MCR), in which only one class must be predicted in an image [14, 21, 26, 31, 37, 82, 85]. + +On the other hand, many real-world applications require finding multiple labels in an image. This includes human-object interaction learning (e.g., recognizing hands and interacting objects), autonomous driving (e.g., recognizing cars, bikes, pedestrians, roads, signs, etc), assistive robotics and surveillance. Therefore, multi-label learning (MLL) aims at recognizing all labels in an image [14, 26, 38, 50, 61, 85, 94]. However, despite its importance and fundamental differences with respect to attacks for MCR (see Figure 1), adversarial attacks for MLL has received much less attention in the literature [1, 2, 36, 71, 86, 87]. + +The main difference between attacks for MCR and MLL stems from the different ways decision boundaries between labels is learned and structured for the two problems. In + +MCR, different labels compete with each other as only one label must be present/predicted. Therefore, attacking an on present label leads to turning it off while automatically turning on another label, see Figure 2 (left). On the other hand, in MLL, labels do not compete, where none, some or all labels can be predicted as present in an image. Thus, attacking a present or an absent label can lead to changing the predictions for none, several or all other labels, as shown in Figure 2 (right). This often has the undesired effect of inconsistent predictions, which can simply be used to detect the attack (e.g., turning off 'pedestrian' can turn on 'bike' and 'stop sign' while turning off 'road'). + +One can try to prevent changing predictions of other labels by crafting the attack while including a loss that enforces predictions of other labels to stay intact. However, as we show, the gradient of the loss for fixing other labels often is highly negatively correlated with the gradient of the loss on the label we want to attack, hence, counteracting the effect of each other. This problem gets more pronounced when the number of labels increases (e.g., in Open Images dataset [40] with 9,600 labels) and the gradient of this additional loss gets larger too. Also, fixing predictions for all other labels still may lead to semantic inconsistency among predictions (e.g., turning off 'vehicle' requires turning off 'car' and 'truck' too, otherwise 'vehicle' being absent while 'car' being present can be used to detect the attack). + +Paper Contributions. We develop a framework for crafting adversarial attacks for MLL that addresses the above challenges. First, to obtain an attack on a target set of labels that leads to semantically consistent predictions across all labels, we find a minimal superset of the target set (referred to as consistent target set) to be attacked/modified. To do so, we develop an efficient search algorithm over a knowledge graph, which encodes label dependencies. Second, we show that finding the attack by optimizing the sum of two losses, one over the consistent target set and the other over other labels, has opposite gradient directions for the two losses, which leads to inefficient perturbations. Third, we propose an optimization that searches for an attack that modifies the predictions of labels in the consistent target set while ensuring that other labels will not get affected. Our optimization leads to a projected gradient algorithm that projects the gradient of the loss for the consistent target set onto the orthogonal direction of the gradient of the loss on other labels. Finally, by extensive experiments on three datasets and several MLL models, we show that our framework generates both successful and semantically consistent attacks. + +# 2. Related Work + +# 2.1. Multi-Label Recognition + +The goal of multi-label learning (MLL) is to find all classes of objects (or even abstract concepts) in an image. As compared to multi-class classification, which finds a sin + +![](images/53b5d79a90093b316d4c54826ab27b08b7e4bd15af32c11749d91528b87ac8f4.jpg) +Figure 2. Left: In multi-class recognition (MCR), attacking the present label leads to automatically turning on another label, as labels compete with each other. Right: In multi-label learning (MLL), attacking a label can lead to none $(\pmb{x}_1')$ , some $(\pmb{x}_2')$ or all $(\pmb{x}_3')$ other labels changing. + +![](images/3bf7f6d733be83068af386401b041a3736562a9480ef00e21e8647d49b1ca177.jpg) + +gle dominant class in an image, MLL is a harder task, since any combination of labels can be present in an image and many labels often correspond to small image regions. This has motivated a large body of research for designing effective MLL methods, using graphical models[44, 46], different loss functions for handling label imbalance [6, 18, 48, 49, 76, 91], exploiting external knowledge, label correlations, and hierarchical relations among labels [13, 19, 33, 43, 56, 78, 88, 89, 92, 97], or using a combination of label and image feature correlations [41, 45, 47, 77, 79, 83] to improve the multi-label performance. + +# 2.2. Adversarial Attacks + +Deep Neural Networks (DNNs) have been shown to be vulnerable to small adversarial perturbations, which can easily fool the model [3, 12, 66, 73, 81]. Therefore, many works have studied different ways to design efficient attacks and defense mechanisms for DNNs [4, 5, 10, 11, 20, 22, 23, 28, 29, 34, 42, 51, 57–59, 62, 67–69, 74, 75, 84, 93]. The adversarial attacks can be divided into several categories based on different criteria [90] such as white-box and black-box, image agnostic and image-specific, targeted and untargeted, or restricted to perturb small image regions and unrestricted attacks. In the paper, we generate white-box attacks for multi-label recognition, i.e., assume access to the MLL model. + +# 2.2.1 Multi-Label Adversarial Attacks + +Motivated by the increasing interest in the multi-label recognition problem, few works have recently studied MLL attacks. [71] studies a framework for attacking multi-label recognition and ranking systems. However, it does not exploit any relationships among labels to design attacks, which as we show is important to design effective attacks. We use the attacks from this work as baselines in our experiments. Yang et al. [86, 87] designed untargeted attacks for multi-label classification to change as many labels as possible and proposed a framework to measure how well an MLL model can be attacked. In comparison, our focus is targeted multi-label attacks with semantic relationships. Hu et al. [32] proposed to exploit ranking relations to design attacks for top- $k$ multi-label models and [96] proposed an attack to + +![](images/38f2981baa0ae6ca00ec3d871a97df4efe9dcdb8d23775e72217fa4424416c23.jpg) +Figure 3. Multi-label learning predicts several labels for an image (see "MLL Output"). Attacking a target set ('vehicle' on the top or 'person' and 'bird' on the bottom) using a naive multi-label attack leads to prediction semantic inconsistencies ('car' and 'motorcycle' being on while 'vehicle' is off or 'person' and 'bird' being off while 'animal' is on). However, GMLA handles a large number of labels while achieving semantic consistency. + +hide all labels present in an image, whereas we consider the minimal set of semantically related labels to be attacked. Aich et al. [2] leveraged local patch differences of different objects to generate multi-object attacks and [1] proposed a CLIP-based generative model to generate multi-object attacks in the black-box setting. Jia et al. [36] proposed theoretical robustness guarantees to defend against multi-label adversarial attacks and [52] exploited domain knowledge context to detect adversarial attacks. Context-aware attacks [7, 8] fool context-aware attack detection methods by attacking the label and its context simultaneously. The context in these works is defined in terms of cooccurring labels. In comparison, we propose to attack labels based on their semantic relationships. Moreover, none of these works have addressed the problem of negative gradient correlation in generating large-scale dataset attacks. Among the existing literature, Nan et al. [95] is also comparable to our attack method, and we use it as a baseline. They proposed a fast linear programming-based adversarial example generation algorithm for MLL to minimize the perturbation norm required to achieve a target label. + +# 3. Multi-Label Learning Attack (MLA) + +# 3.1. Problem Setting + +We study generating adversarial attacks for the Multi-Label Learning (MLL) task. In MLL, multiple labels can appear in an image, see Figure 3, as opposed to the multi-class recognition (MCR), where each image has only one label. Let $\mathcal{C}$ denote the set of all labels. For an image $x\in \mathbb{R}^d$ , let $\pmb {y}\in \{0,1\}^{|\mathcal{C}|}$ denote the set of its labels, indicating the presence (1) or absence (0) of each label in $\mathcal{C}$ in the image. Let $\mathcal{F}:\mathbb{R}^d\to \mathbb{R}^{|\mathcal{C}|}$ be a multi-label classifier, which we assume + +has already been learned using training images. The multi-label classifier $\mathcal{F} = \{f_1, f_2, \ldots, f_{|\mathcal{C}|}\}$ consists of $|\mathcal{C}|$ binary classifiers for each label, where $f_c(\pmb{x}) \in (-\infty, +\infty)$ is the score of the classifier $c$ . Therefore, the probability of label $c$ being present in the image $\pmb{x}$ is given by $\hat{y}_c = \sigma(f_c(\pmb{x}))$ , where $\sigma(\cdot)$ is the sigmoid function. Finally, let $\Omega_{\pmb{x}} \subseteq \mathcal{C}$ denote the target set of labels in the image $\pmb{x}$ which we want to attack, i.e., after the attack the present labels in $\Omega_{\pmb{x}}$ must become absent and vice versa. In the next subsection, we study the existing approaches [71] to generate multi-label attacks and identify their drawbacks. + +# 3.2. Naive Multi-Label Attack (MLA) + +For an attack on $\pmb{x}$ that modifies the labels in $\Omega_{\pmb{x}}$ , one can generate a small perturbation $e \in \mathbb{R}^d$ by minimizing the negative multi-label learning loss for labels in $\Omega_{\pmb{x}}$ while restricting the magnitude of $e$ . More precisely, we can solve + +$$ +\text {M L A - U :} \min _ {\boldsymbol {e}} - \mathcal {L} _ {b c e} (\boldsymbol {x} + \boldsymbol {e}, \Omega_ {\boldsymbol {x}}) \text {s . t .} \| \boldsymbol {e} \| _ {p} \leq \epsilon , \tag {1} +$$ + +where $\| \cdot \| _p$ is the $\ell_p$ -norm and $\mathcal{L}_{ce}(\boldsymbol{x}',\Gamma_{\boldsymbol{x}'})$ is the binary cross-entropy loss for image $\boldsymbol{x}'$ on labels in $\Gamma_{\boldsymbol{x}'}$ , defined as + +$$ +\mathcal {L} _ {b c e} \left(\boldsymbol {x} ^ {\prime}, \Omega_ {\boldsymbol {x} ^ {\prime}}\right) \triangleq +$$ + +$$ +\sum_ {c \in \Omega_ {\boldsymbol {x} ^ {\prime}}} - y _ {c} \log \sigma \left(f _ {c} \left(\boldsymbol {x} ^ {\prime}\right)\right) - \left(1 - y _ {c}\right) \log \left(1 - \sigma \left(f _ {c} \left(\boldsymbol {x} ^ {\prime}\right)\right)\right). \tag {2} +$$ + +The drawback of (1) is that attack on $\Omega_{\mathbf{x}}$ can lead to changing the predictions for other labels too, see Figure 2 (right). This often leads to inconsistent predictions, which can simply be used to detect the attack (e.g., turning off 'pedestrian' can turn on 'bike' and 'stop sign' while turning off 'road'), hence significantly reducing the effectiveness of the attack. + +To address this drawback, one can try to prevent changing predictions of other labels $(\bar{\Omega}_{\pmb{x}})$ , which is the complement of $\Omega_{\pmb{x}}$ with respect to $\mathcal{C}$ by crafting the attack while including a loss that enforces predictions of other labels to stay intact. More precisely, one can solve + +$$ +\begin{array}{l} \text {M L A - C :} \min _ {\boldsymbol {e}} - \mathcal {L} _ {b c e} (\boldsymbol {x} + \boldsymbol {e}, \Omega_ {\boldsymbol {x}}) + \mathcal {L} _ {b c e} (\boldsymbol {x} + \boldsymbol {e}, \bar {\Omega} _ {\boldsymbol {x}}), \tag {3} \\ \begin{array}{l} \text {s . t .} \| e \| _ {p} \leq \epsilon , \end{array} \\ \end{array} +$$ + +where the first term in the objective function tries to flip the labels in $\Omega_{x}$ while the second term preserves the labels in $\bar{\Omega}_{x}$ . Notice that with the additional objective, the space of perturbations in (3) is smaller than that in (1), yet it ensures not modifying labels outside the target set. However, as we verify by empirical results, the gradient of the loss for fixing other labels often is highly negatively correlated with the gradient of the loss on the target labels, hence, counteracting the effect of each other. We hypothesize that this effect is due to strong spurious correlations among labels, learnt by the model during training. Given two highly-correlated labels in an image, attacking one label while fixing the other + +using (3) would lead to opposite gradients. This problem gets more pronounced when the number of labels increases (e.g., in Open Images dataset [40] with 9,600 labels) and the gradient of this additional loss gets larger too. Moreover, fixing predictions for labels in $\bar{\Omega}_{\pmb{x}}$ still may lead to semantic inconsistencies in predictions (e.g., turning off 'vehicle' requires turning off 'car' and 'truck', otherwise 'vehicle' being off while 'car' being on can be used to detect the attack), hence, reducing the attack effectiveness. + +# 4. Generalized Multi-Label Attack (GMLA) + +We develop a framework for crafting adversarial attacks for MLL that addresses the challenges of conventional MLA, discussed above. First, to obtain an attack on a target label set $\Omega_{x}$ that leads to semantically consistent predictions across all labels, we find a minimal superset of the target set $\Psi_{x}$ (referred to as consistent target set) that needs to be attacked/modified. Given that there are often multiple such superset, we develop an efficient search algorithm over a knowledge graph $\mathcal{G}$ that encodes label dependencies. We denote by $\Psi_{x} = h\bigl (\Omega_{x},\mathcal{G}\bigr)$ the output of the search algorithm, which we will describe in detail later in this section. + +# 4.1. Proposed Optimization + +We then study a projection-based optimization that searches for an attack that modifies the predictions of labels in $\Psi_{\pmb{x}}$ while ensuring that other labels $\bar{\Psi}_{\pmb{x}}$ will not get affected. More specifically, we propose to solve + +$$ +\begin{array}{l} \text {G M L A :} \min _ {\boldsymbol {e}} - \mathcal {L} _ {b c e} (\boldsymbol {x} + \boldsymbol {e}, \Psi_ {\boldsymbol {x}}), \\ \text {s . t .} \mathcal {L} _ {b c e} (\boldsymbol {x} + \boldsymbol {e}, \bar {\Psi} _ {\boldsymbol {x}}) = \mathcal {L} _ {b c e} (\boldsymbol {x}, \bar {\Psi} _ {\boldsymbol {x}}), \tag {4} \\ \| \boldsymbol {e} \| _ {p} \leq \epsilon , \Psi_ {\boldsymbol {x}} = h (\Omega_ {\boldsymbol {x}}, \mathcal {G}), \\ \end{array} +$$ + +where we only minimize the attack loss on the consistent target set $\Psi_{\mathbf{x}}$ , while requiring that the binary cross-entropy loss on other labels $\bar{\Psi}_{\mathbf{x}}$ stay the same after the attack. This means that instead of trying to make the predictions on other labels more confident as in (3), we try to keep them stay the same after the attack. As we also show in the experiments (see Figure 8), this significantly boosts the attack by resolving the high negative correlation of the gradients of the two losses in (3) and finding better attack directions. + +Since solving the optimization in (4) that ensures the first constraint is satisfied is difficult, we take a first-order approximation on this constraint around $x$ (as $e$ is small), + +$$ +\begin{array}{l} \mathcal {L} _ {b c e} (\boldsymbol {x} + \boldsymbol {e}, \bar {\Psi} _ {\boldsymbol {x}}) \approx \mathcal {L} _ {b c e} (\boldsymbol {x}, \bar {\Psi} _ {\boldsymbol {x}}) + \boldsymbol {g} _ {\boldsymbol {x}, \bar {\Psi} _ {\boldsymbol {x}}} ^ {\top} \boldsymbol {e}, \\ \text {w h e r e ,} \quad \boldsymbol {g} _ {\boldsymbol {x}, \bar {\Psi} _ {\boldsymbol {x}}} \triangleq \frac {\partial \mathcal {L} _ {b c e} (\boldsymbol {x} , \bar {\Psi} _ {\boldsymbol {x}})}{\partial \boldsymbol {x}}. \end{array} \tag {5} +$$ + +Thus, we can rewrite (4) as + +$$ +\begin{array}{l} \min _ {\boldsymbol {e}} - \mathcal {L} _ {b c e} (\boldsymbol {x} + \boldsymbol {e}, \Psi_ {\boldsymbol {x}}), \tag {6} \\ \begin{array}{l} \text {s . t .} \quad \boldsymbol {g} _ {\boldsymbol {x}, \bar {\Psi} _ {\boldsymbol {x}}} ^ {\top} \boldsymbol {e} = \mathbf {0}, \| \boldsymbol {e} \| _ {p} \leq \epsilon , \Psi_ {\boldsymbol {x}} = h \big (\Omega_ {\boldsymbol {x}}, \mathcal {G} \big). \end{array} \\ \end{array} +$$ + +The constraint $g_{x,\bar{\Psi}_x}^\top e = 0$ implies that $e$ must be in the orthogonal space to the gradient direction $g_{x,\bar{\Psi}_x}$ , hence not changing other labels. Thus, we can write + +$$ +\boldsymbol {e} = \boldsymbol {P} _ {\boldsymbol {x}, \bar {\Psi} _ {\boldsymbol {x}}} \alpha , \quad \boldsymbol {P} _ {\boldsymbol {x}, \bar {\Psi} _ {\boldsymbol {x}}} \triangleq \boldsymbol {I} - \frac {\boldsymbol {g} _ {\boldsymbol {x} , \bar {\Psi} _ {\boldsymbol {x}}} \boldsymbol {g} _ {\boldsymbol {x} , \bar {\Psi} _ {\boldsymbol {x}}} ^ {\top}}{\| \boldsymbol {g} _ {\boldsymbol {x} , \bar {\Psi} _ {\boldsymbol {x}}} \| _ {2} ^ {2}}, \tag {7} +$$ + +for some $\alpha \in \mathbb{R}^d$ , where $P_{x,\bar{\Psi}_x}$ is the orthogonal projection matrix on the gradient $g_{x,\bar{\Psi}_x}$ . Thus, we can write the optimization in (4) as + +$$ +\begin{array}{l} \text {G M L A :} \min _ {\boldsymbol {\alpha}} - \mathcal {L} _ {b c e} (\boldsymbol {x} + \boldsymbol {P} _ {\boldsymbol {x}, \bar {\Psi} _ {\boldsymbol {x}}} \boldsymbol {\alpha}, \Psi_ {\boldsymbol {x}}), \tag {8} \\ \begin{array}{l} \text {s . t .} \| P _ {\boldsymbol {x}, \bar {\Psi} _ {\boldsymbol {x}}} \boldsymbol {\alpha} \| _ {p} \leq \epsilon , \quad \Psi_ {\boldsymbol {x}} = h (\Omega_ {\boldsymbol {x}}, \mathcal {G}). \end{array} \\ \end{array} +$$ + +We follow AutoPGD [17] to iteratively solve (8). At each iteration, we linearly approximate the objective function and solve $(\pmb{g}_{\pmb{x},\Psi_{\pmb{x}}}$ is the gradient of $\mathcal{L}_{bce}(\pmb {x},\Psi_{\pmb{x}}))$ + +$$ +\min _ {\boldsymbol {\alpha}} - \mathbf {g} _ {\boldsymbol {x}, \Psi_ {\boldsymbol {x}}} ^ {\top} \left(\boldsymbol {P} _ {\boldsymbol {x}, \bar {\Psi} _ {\boldsymbol {x}}} \boldsymbol {\alpha}\right), \tag {9} +$$ + +$$ +\begin{array}{l} \text {s . t .} \| P _ {\boldsymbol {x}, \bar {\Psi} _ {\boldsymbol {x}}} \boldsymbol {\alpha} \| _ {p} \leq \epsilon , \quad \Psi_ {\boldsymbol {x}} = h (\Omega_ {\boldsymbol {x}}, \mathcal {G}). \end{array} +$$ + +As we show in the supplementary materials, we can solve (9) for $p = \infty$ and get the closed form update for $e$ as + +$$ +e = \epsilon \cdot \frac {\boldsymbol {P} _ {\boldsymbol {x} , \bar {\Psi} _ {\boldsymbol {x}}} \boldsymbol {\nu}}{\| \boldsymbol {P} _ {\boldsymbol {x} , \bar {\Psi} _ {\boldsymbol {x}}} \boldsymbol {\nu} \| _ {\infty}}, \quad \boldsymbol {\nu} \triangleq \operatorname {s g n} \left(\boldsymbol {g} _ {\boldsymbol {x}, \Psi_ {\boldsymbol {x}}}\right). \tag {10} +$$ + +We further enhance the effectiveness of the attack, especially for the case when the gradients of both the targeted and non-targeted classes are aligned (have positive correlation). In such instances, our approach involves finding the direction $e$ using + +$$ +\min _ {e} e ^ {T} \left(- \frac {\mathbf {g} _ {\mathbf {x} , \Psi_ {\mathbf {x}}}}{\| \mathbf {g} _ {\mathbf {x} , \Psi_ {\mathbf {x}}} \| _ {2}} + \frac {\mathbf {g} _ {\mathbf {x} , \bar {\Psi} _ {\mathbf {x}}}}{\| \mathbf {g} _ {\mathbf {x} , \bar {\Psi} _ {\mathbf {x}}} \| _ {2}}\right) \text {s . t .} \| e \| _ {p} \leq \epsilon . \tag {11} +$$ + +We provide more details and analysis in the supplementary. + +# 4.2. Consistent Target Set via Knowledge Graph + +We obtain a consistent target set by developing an efficient search algorithm over a knowledge graph $\mathcal{G}$ that encodes label dependencies. Assume $\mathcal{G} = (\mathcal{C},\mathcal{E})$ is a directed acyclic knowledge graph built on the labels $\mathcal{C}$ , where $\mathcal{E}$ denotes the set of edges (see below for details about building this graph). A consistent target set $\Psi_{x}$ is defined as a superset of the target nodes/labels $\Omega_{x}$ that if attacked successfully leads to MLL outputs so that $i)$ when MLL predicts 1 for a parent node/label, then at least one of its children is also predicted as 1; $ii)$ when all children of a node/label are predicted as 0, then the parent is predicted as 0. + +Algorithm 1 shows our algorithm and the time complexity for each step to obtain the consistent target set. The algorithm works as follows. Given the target set $\Omega_{x}$ , MLL predictions $\mathcal{S}$ , and the adjacency matrix $\mathcal{E}$ of the knowledge + +graph, the algorithm finds the minimal superset of $\Omega_{\mathbf{x}}$ to be modified. While attacking a label, we need to maintain its consistency with respect to its children and parents. To maintain children consistency, each child of the target node must be turned OFF unless that child has multiple parents ON. We parse the path from target node to the leaf nodes and perform the same operation on every node. Similarly, to maintain parents consistency, all parents must be turned OFF unless some parent has more than one child ON. We perform this process for each node along the path from target node to the root until there are no more nodes to modify. The upper bound of algorithm's time complexity is $\mathcal{O}(\Omega \mathcal{C})$ . As Figure 4 shows, on the same graph, consistent target sets depend on the MLL predictions. + +Knowledge Graph Construction. To construct $\mathcal{G}$ , we use WordNet [54], which contains rich semantic relationships between labels $^2$ . One can also use other sources, such as ConceptNet [72] or OpenImages semantic hierarchy [40]. We build a tree $\mathcal{G} = (\mathcal{C}, \mathcal{E})$ on all labels $\mathcal{C}$ using hypernym and hyponym relations of labels. This can also be easily extended to other relationships e.g., antonymy, entailment, etc. For each label in $\mathcal{C}$ , we use WordNet to extract its parent and child labels (e.g., for 'car', we obtain 'vehicle' as parent using its hybernyms). Since a word can be associated with several synsets, we choose the synset with the closest match to the label description. To build the tree, we use the maximum WUP similarity [80] between a child and multiple parent nodes to select a single parent. + +# 5. Experiments + +# 5.1. Experimental Setup + +Datasets. We use Pascal-VOC [24], NUS-WIDE [16] and OpenImages [40] for studying the effectiveness of multi-label attacks. For Pascal-VOC, we trained each MLL model on 8,000 images from the training sets of PASCAL-VOC 2007 and PASCAL-VOC 2012 and created the adversarial examples for the test set of PASCAL-VOC 2007. To build $\mathcal{G}$ , we extracted abstract classes from WordNet using which and the original 20 labels, we obtained 35 labels/nodes. For NUS-WIDE, we trained each MLL model on 150K images from the training set and attacked the models using the test set of the dataset. We used Wordnet to extract abstract classes and built a tree on labels. The total number of labels are 116, which includes 80 original labels and 36 additional abstract classes from WordNet. For OpenImages, we used pre-trained model from [64] and used test images to generate the attacks. We use the official class hierarchy provided in OpenImages as semantic relationship information. + +Multi-Label Recognition Models. We investigate the ef + +Algorithm 1: Consistent Target Set Construction +Input: $\Omega$ : Target Set, $S$ : MLL Label Predictions, $\mathcal{E}$ : Knowledge Graph's Adjacency Matrix Output: $\Gamma$ Expanded Target Set Procedure: $f_{select}(X)$ : return $\{i:X_i = True\}$ Procedure $f_{child.}(n,\mathcal{E},\mathcal{S})$ .. return $f_{select}(\mathcal{E}_{[n,:]}\odot \mathcal{S} == 1)$ Procedure $f_{par.}(n,\mathcal{E},\mathcal{S})$ .. return $f_{select}(\mathcal{E}_{[:n]}\odot \mathcal{S} == 1)$ +Procedure Consistent_Comp $(n,V,\Gamma ,f_1,f_2)$ . Queue Q I $\leftarrow f_1(n,\mathcal{E},\mathcal{S})$ $\triangleright \mathcal{O}(1)$ Q.enqueue(I) $\triangleright \mathcal{O}(1)$ while $\mathcal{Q}$ is not empty do $\triangleright \mathcal{O}(\mathcal{C})$ $v_{n} = \mathcal{Q}.dequeue()$ if $v_{n}\notin \mathcal{V}$ then $\nu \gets \nu \cup \{v_n\}$ $\triangleright \mathcal{O}(1)$ $I\gets f_2(v_n,\mathcal{E},\mathcal{S})\backslash \Gamma$ if $|I| < 2$ then $\Gamma \leftarrow \Gamma \cup \{v_n\}$ $\triangleright \mathcal{O}(1)$ $I\gets f_1(v_n,\mathcal{E},\mathcal{S})$ Q.enqueue(I) + $\Gamma = \{\}$ +foreach $n\in \Omega$ do $\triangleright \mathcal{O}(\Omega)$ V = {n} + $\Gamma \leftarrow$ Consistent_Comp(n,V,Γ,fchild., $f_{par.})$ $\triangleright \mathcal{O}(\mathcal{C})$ $\Gamma \leftarrow$ Consistent_Comp(n,V,Γ,fpar., $f_{child.})$ $\triangleright \mathcal{O}(\mathcal{C})$ + +![](images/493e62d6104bbb3d867c5856ea4fe00074275e03170d6275985f5b07d42b3833.jpg) +Figure 4. Examples of different consistent target sets obtained by Algorithm 1. Green nodes show the present labels predicted by the MLL and $\Omega = \{t\}$ is the target. The labels to be modified, $\Psi$ are shown within the red region and the labels to be fixed $\bar{\Psi}$ are shown within the green region. + +fectiveness of multi-label attacks on three MLL models. + +- ML-GCN [15]: It explicitly learns relationships among labels using Graph Convolutional Networks (GCN). It builds a graph using the word embeddings and the cooccurrence matrix of labels and uses a GCN to extract information about label relationships. We trained the model using the binary cross-entropy loss. +- Asymmetric Loss (ASL) [64]: It is an effective multi-label learning method that uses a novel loss for better optimization over highly imbalanced positive and negative class distributions. Following their experimental setting, we trained the TResNet-L [63] backbone. +- ML-Decoder [65]: It is an attention-based unified decoder architecture for zero-shot, single-label, and multi-label classification. It uses a group-decoding scheme to alleviate the problem of scaling to large number of classes. + +Perturbation Generation. For PASCAL-VOC and NUS-WIDE, we show results on a range of perturbation budgets. + +![](images/a766f3d13113a029f1ff315e8ec21514cfe74780e4f57afb984469b3a7ae8182.jpg) +Figure 5. Naive fooling rate $(\mathrm{FR}_N)$ and graph-based fooling rate $(\mathrm{FR}_S)$ of different attacks on ML-GCN model, trained on PASCAL-VOC for one and two label/node attacks. The x-axis shows the upper bound on the $l_{\infty}$ -norm of perturbations $(\epsilon)$ . + +![](images/e1d49b422ba75be5eb73d3a2b25c250a34ba77532aea66c9e6c041fe11b9a9ca.jpg) + +![](images/1f9e5078065947deb5521b92330279779a76f8db0c34f4b06724f4fe3c6ab520.jpg) + +![](images/b7eff2bc4297de6a49667ff653d328d72bcfb2ee6f694a4fd1e7e731acacb9a4.jpg) + +
DatasetPASCAL-VOCNUS-WIDE
Target Set Size|Ω| = 1|Ω| = 2|Ω| = 1|Ω| = 2
ModelAttack↑FRN↑FRS↓NTR↑SSIM↑FRN↑FRS↓NTR↑SSIM↑FRN↑FRS↓NTR↑SSIM↑FRN↑FRS↓NTR↑SSIM
ML-GEN [15]MLA-U [71]100.075.54.90.97100.068.04.50.9799.743.51.50.9699.331.71.60.96
MLA-C [71]99.968.93.00.9699.860.22.80.9796.427.40.40.9792.418.50.40.97
MLA-LP [65]56.16.700.10.9946.76.000.30.9919.33.500.10.9811.43.300.00.98
GMLA (Ours)100.099.42.70.97100.098.42.50.9899.295.80.50.9799.191.30.40.97
ASL [64]MLA-U [71]100.052.84.60.97100.048.34.80.98100.050.02.00.97100.043.32.10.97
MLA-C [71]100.039.72.30.9799.733.22.10.98100.035.50.70.97100.030.00.70.96
MLA-LP [65]15.82.400.10.9911.92.900.50.9920.84.800.00.9816.13.100.00.98
GMLA (Ours)100.098.82.20.97100.098.82.00.98100.096.10.80.97100.093.20.70.97
ML-Dec [65]MLA-U [71]99.766.25.30.9799.862.05.70.9898.856.44.10.9797.950.44.60.98
MLA-C [71]99.150.62.70.9897.540.72.40.9773.630.41.00.9768.226.70.90.97
MLA-LP [65]19.43.700.10.9817.63.200.20.9813.34.100.00.979.72.900.00.98
GMLA (Ours)99.196.22.70.9899.397.12.50.9795.184.91.10.9793.982.01.00.98
+ +Table 1. Experimental evaluation of the four attack methods on three models for $\epsilon = 0.01$ . The values represent the mean computed using the attack performance across all the combinations of target classes of size $|\Omega|$ . + +For OpenImages with 9,600 labels, we perform experiments for large-scale attacks with different sizes of the target set for a fixed epsilon value. To generate the target sets for attack, we randomly draw 100 samples of size $k$ labels. For each draw from OpenImages, we randomly sample $k / 2$ leaf nodes (labels) from the graph $\mathcal{G}$ and sample the remaining labels which are not part of the graph. + +Baselines. We use MLA-U and MLA-C as baselines, following Song et al. [71]. Additionally, we use MLA-LP [65] as a baseline, which generates adversarial perturbation for multi-label recognition by solving a linear programming problem using the interior point method while minimizing the $l_{\infty}$ norm. In contrast to other methods, it requires computing the Jacobian at each optimization step. In our experiments, MLA-LP did not converge for OpenImages. To provide a comprehensive comparison, we extend our evaluation to ML-DP [71], a greedy algorithm that computes multi-label attack perturbations using constraint linearization as introduced in DeepFool [55]. We show the results for ML-DP in supplementary material. + +Evaluation Metrics. Let $\mathcal{I}$ be the set of images that are attacked and $\mathcal{A} \subseteq \mathcal{I}$ denote the set of images that are successfully attacked, i.e., for $x \in \mathcal{A}$ , all labels in $\Omega_x$ change after the attack. Let $\mathcal{A}_{\mathcal{G}} \subseteq \mathcal{A}$ denote the subset of $\mathcal{A}$ for + +which the attack produces semantically consistent predictions in the output of MLL according to $\mathcal{G}$ + +We define naive fooling rate, $FR_{N}$ and semantic-based fooling rate, $FR_{S}$ , as + +$$ +F R _ {N} = \frac {| \mathcal {A} |}{| \mathcal {I} |}, F R _ {S} = \frac {| \mathcal {A} _ {\mathcal {G}} |}{| \mathcal {I} |}. \tag {12} +$$ + +Thus, $FR_{N}$ measures fraction of attacked images whose attacks has been successful, without considering whether the MLL predictions are semantically consistent. On the other hand, $FR_{S}$ captures fraction of attacked images whose attacks have been successful and produced semantically consistent MLL predictions. We also define non-target flip rate, $NT_{R}$ , which is the percentage of semantically unrelated labels (labels in $\bar{\Psi}_{k}$ ) which were flipped by the attack, i.e., + +$$ +N T _ {R} = \frac {1}{| \mathcal {A} |} \sum_ {k \in \mathcal {A}} \frac {\sum_ {i \in \bar {\Psi} _ {k}} \left(1 - \delta \left(f _ {i} ^ {(k)} , y _ {i} ^ {(k)}\right)\right)}{| \bar {\Psi} _ {k} |}, \tag {13} +$$ + +where, $\delta$ is Kronecker delta function that equals 1 when the two inputs are equal and 0 otherwise, $y_{i}^{(k)},f_{i}^{(k)}\in \{0,1\}$ are the model predictions on clean and adversarial images respectively, of $i^{th}$ non-target class of $k^{th}$ successfully attacked image. Finally, we measure the imperceptibility of the perturbations using average structural similarity (SSIM) between pairs of original and adversarial images. + +![](images/32ef6eeb8e4b544a25824f5f8631165aa5bc7b21672169b575d3abea9dde8c17.jpg) +Figure 6. Performance of different multi-label attacks with fixed $\epsilon = 0.05$ on OpenImages as we increase the target set size. + +![](images/6dd053c946d34acba1843bbb6779842a7f3b9d734715ca6194ca6bb8c7faadfe.jpg) + +Note that $FR_{N}, FR_{S}$ , and $SSIM$ should be high while $NT_{R}$ should be low for a good attack method. + +# 5.2. Experimental Results + +Figure 5 shows the performance of different attack methods on PASCAL-VOC for one- and two-node attacks for different epsilon values using ML-GCN classifier. In Table 1, we show the evaluation across the three MLL models for a fixed $\epsilon = 0.01$ for which the performance of all attacks has plateaued3. We also show the evaluation on OpenImages for different target sizes in Fig. 6 and Tab. 2. From the results, we make the following conclusions: + +- As Fig. 5 shows, all methods achieve high naive fooling rate $FR_{N}$ given large enough perturbation budget, yet once we filter out the attacks leading to semantically inconsistent predictions, the performance $(FR_{S})$ of all baselines significantly decreases. However, our GMLA achieves very high semantic-based fooling rate than baselines. From Tab. 1 and 2, our method achieves naive fooling rate $FR_{N}$ comparable to the other methods but outperforms them over $FR_{S}$ by a significant margin. + +- Notice from Fig. 5 and 6 that MLA-U has higher naive and semantic-based fooling rates than MLA-C. The reason is the strong positive correlations learned among related cooccurring labels during model training, which MLA-U implicitly exploits. However, MLA-U being oblivious to the relationships among labels can inevitably affect unrelated labels, as shown in Tab. 1 and 2. This explains why MLA-U has the highest $N T_{R}$ across different settings. The difference becomes more apparent as we move to attack larger datasets e.g. OpenImages. This is because, a larger number of labels increases the chances of learning spurious correlations among unrelated labels. + +- Based on Fig. 5, MLA-LP achieves lowest performance compared to other attack methods for both fooling rates on PASCAL-VOC and NUS-WIDE datasets, and does not converge for OpenImages experiments. This is because MLA-LP uses interior point method at each iteration to solve a + +
Attack|Ω|=1|Ω|=2|Ω|=3|Ω|=4|Ω|=5
MLA-U0.47 ± 0.020.57 ± 0.030.66 ± 0.030.75 ± 0.040.87 ± 0.03
MLA-C0.32 ± 0.090.31 ± 0.090.09 ± 0.070.06 ± 0.040.0 ± 0.0
GMLA (Ours)0.32 ± 0.140.16 ± 0.120.21 ± 0.130.11 ± 0.070.06 ± 0.04
+ +Table 2. Percentage of semantically unrelated labels $(NT_R)$ affected at $\epsilon = 0.05$ for ASL[64] on OpenImages. + +![](images/6c9bf462a4ff3740658d5cf5d127a6069bae81a6e3fab69cd1b1a2f29d732a36.jpg) +Figure 7. Transferability across models on PASCAL-VOC. The y-axis shows the source model which generates the perturbation and x-axis shows the target model evaluated on that perturbation. + +system of equations, which define the constraints on the target and non-target labels. Because of the complex relationships among different labels, the feasible region for the given linear problem might be empty. This has also been identified by [96]. When the LP problem has a feasible solution, MLA-LP successfully finds the perturbation that satisfy the attack constraints. This explains why, for the small number of successfully attacked images, MLA-LP affects the least percentage of non-targeted labels, achieving low $\mathrm{NT}_R$ as shown in Tab. 1. + +- Each attack method produces imperceptible perturbations, as we constrain the maximum infinity norm of the perturbation to 0.01 (on images with pixel values between 0 to 1). Notice also from Table 1 that the average SSIM scores between the adversarial and original images is very close to 1, showing imperceptibility of perturbations. + +- Notice from Fig. 6 that MLA-C fails to successfully attack large-scale datasets and its performance drops drastically as we increase the target set size. As mentioned earlier, this is attributed to the observation that gradients of target and non-targeted classes are often opposite (as shown in Fig. 8) and as MLA-C optimizes the target and non-target loss simultaneously, the resulting perturbations are sub-optimal. From Tab. 2, MLA-C achieves lowest $\mathrm{NT}_R$ for target sizes greater than 2 but also performs poorly on fooling rates. Note that despite achieving high fooling rates $\mathrm{FR}_N$ and $\mathrm{FR}_G$ , our GMLA method affects very small percentage of semantically unrelated labels, which shows the success of our constraint proposed in (6). + +Attack Transferability. Figure 7 shows the cross-model transferability of different attacks. For each source model, + +![](images/cf4f16901dbae9f6a2bf53647b471bfe5733bb9aa4dc2b1fd0440a56da6f002d.jpg) +Figure 8. Stacked bar charts showing the correlation between the gradient of the loss on target labels $g_{\boldsymbol{x}, \Psi_{\boldsymbol{x}}}$ and on other labels $g_{\boldsymbol{x}, \Psi_{\boldsymbol{x}}}$ for different sizes of the target set on OpenImages. Left: Using (3) as objective. Right: using our proposed (6) that optimizes the loss on target labels while keeping the loss on non-target labels the same (as a constraint). + +![](images/809578826816b7b0766edb079e1ba6eff1f7f675c241b895830aa9e5f59b8d7a.jpg) + +![](images/a2058a7676f0f1da527cdc506401ccaeaa97740fbe2f35c5679231a0eda71ac2.jpg) +Figure 9. Results of attacking ML-GCN on PASCAL-VOC (first two columns) and NUS-WIDE (last two columns). Each column shows the model predictions for clean $(\epsilon = 0)$ and attacked images. Rounded rectangles group semantically related labels. Inconsistent predictions caused around target labels are shown with red rectangles. The red labels at the top are targeted labels and the arrows show the relationships. + +we compute the perturbations (scaled to $\epsilon = 0.1$ ) for images and evaluate the target models exclusively on the images that were successfully attacked by the respective source model (hence the diagonal values are all 1). Notice that although all attacks, other than MLA-LP, are transferable, GMLA semantic attack transfers better and achieves the highest $\mathrm{FR}_N$ and $\mathrm{FR}_S$ . From Table 1, notice that all attacks were able to achieve non-trivial graph-based fooling rate. However, GMLA is the most effective method to generate semantically consistent and generally transferrable attacks. + +Gradient Correlations. Figure 8 shows the correlation between the gradient of the loss on target labels (to be modified), $g_{x,\Psi_x}$ , and on other labels (to be fixed), $g_{x,\bar{\Psi}_x}$ , for different sizes of the target set on OpenImages. Notice that adding the two losses leads to highly negatively correlated gradients for them. However, only optimizing the loss on target labels while keeping the loss on non-target labels the same (as a constraint) leads to significant increase in gradient correlations, which can justify the success of GMLA. + +Qualitative Results. Figure 9 shows qualitative results of attacking ML-GCN using PASCAL-VOC and NUS-WIDE. Notice that in all four cases, respectively, MLA-U and MLA-C lead to inconsistencies. For example, to turn off the boat label in the first image, MLA-U attacks the boat and + +![](images/9dc3923be7314c7ad03040db56f9d9cdbf2019286621f15d0504a561eba448eb.jpg) +Figure 10. Since the adversarial images have imperceptible changes, we visualize the perturbations computed using different methods for various target classes of PASCAL-VOC. The perturbations are computed by setting the maximum budget $\epsilon = 0.01$ and are scaled for visualization. For each perturbation, we compute it's dot product (D) with the perturbation computed using our proposed attack - GMLA, and the structural similarity (S) of the original and the adversarial image (after adding the perturbation). + +craft labels but does not attack the vehicle label, leading to semantically inconsistent prediction. MLA-C successfully attacks boat, but keeps all other labels fixed, causing inconsistent predictions. For the second image, MLA-U successfully kept consistency around one group of labels but causes inconsistency in the other group. Similar to MLA-C, MLA-LP causes semantic inconsistencies for all images. Notice that in all cases, GMLA successfully modifies the necessary labels to ensure semantic consistency. + +In Figure 10, we visualize the perturbations computed by different methods and compare the SSIM (S) of baselines with GMLA. We also show the dot product (D) between the perturbation computed using each baseline method and the one computed using GMLA. We can see that GMLA finds different attack directions than the baseline methods, which results in semantically consistent and transferable attacks. + +# 6. Conclusions + +We developed an efficient framework to generate attacks for multi-label recognition that ensures semantic consistency of the output labels based on relationships among labels while effectively attacking a large number of labels. By extensive experiments on three datasets and several MLL models, we showed that our method generates both semantically consistent and successful adversarial attacks. + +# Acknowledgements + +This work is sponsored by Khoury College of Northeastern funds, DARPA (HR00112220001), NSF (IIS-2115110), ARO (W911NF2110276). Content does not necessarily reflect the position/policy of the Government. + +# References + +[1] Abhishek Aich, Calvin-Khang Ta, Akash Gupta, Chengyu Song, Srikanth Krishnamurthy, Salman Asif, and Amit Roy-Chowdhury. Gama: Generative adversarial multi-object scene attacks. Advances in Neural Information Processing Systems, 35:36914-36930, 2022. 1, 3 +[2] Abhishek Aich, Shasha Li, Chengyu Song, M Salman Asif, Srikanth V Krishnamurthy, and Amit K Roy-Chowdhury. Leveraging local patch differences in multi-object scenes for generative adversarial attacks. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 1308-1318, 2023. 1, 3 +[3] N. Akhtar and A. Mian. Threat of adversarial attacks on deep learning in computer vision: A survey. arXiv, 2018. 2 +[4] A. Athalye, N. Carlini, and D. A. Wagner. Obfuscated gradients give a false sense of security: Circumventing defenses to adversarial examples. 2018. 2 +[5] Yuanhao Ban and Yinpeng Dong. Pre-trained adversarial perturbations. In Advances in Neural Information Processing Systems, pages 1196-1209. Curran Associates, Inc., 2022. 2 +[6] Emanuel Ben-Baruch, Tal Ridnik, Itamar Friedman, Avi Ben-Cohen, Nadav Zamir, Asaf Noy, and Lihi Zelnik-Manor. Multi-label classification with partial annotations using class-aware selective loss. 2022. 2 +[7] Zikui Cai, Xinxin Xie, Shasha Li, Mingjun Yin, Chengyu Song, Srikanth V. Krishnamurthy, Amit K. Roy-Chowdhury, and M. Salman Asif. Context-aware transfer attacks for object detection. ArXiv, 2021. 3 +[8]Zikui Cai, Shantanu Rane, Alejandro E. Brito, Chengyu Song,Srikanth V.Krishnamurthy,Amit K.Roy-Chowdhury, and M.Salman Asif.Zero-query transfer attacks on context-aware object detectors.IEEE Conference on Computer Vision and Pattern Recognition,2022.3 +[9] N. Carlini and D. Wagner. Adversarial examples are not easily detected: Bypassing ten detection methods. Workshop on Artificial Intelligence and Security, 2017. 1 +[10] N. Carlini and D. Wagner. Towards evaluating the robustness of neural networks. IEEE Symposium on Security and Privacy, 2017. 2 +[11] Y. Carmon, A. Raghunathan, L. Schmidt, P. Liang, and J. C. Duchi. Unlabeled data improves adversarial robustness. Neural Information Processing Systems, 2019. 2 +[12] P.-Y. Chen, Y. Sharma, H. Zhang, J. Yi, and C.-J. Hsieh. Ead: Elastic-net attacks to deep neural networks via adversarial examples. AAAI Conference on Artificial Intelligence, 2018. 2 +[13] T. Chen, M. Xu, X. Hui, H. Wu, and L. Lin. Learning semantic-specific graph representation for multi-label image recognition. IEEE International Conference on Computer Vision, 2019. 2 +[14] Zhao-Min Chen, Xiu-Shen Wei, Xin Jin, and Yanwen Guo. Multi-label image recognition with joint class-aware map disentangling and label correlation embedding. IEEE International Conference on Multimedia and Expo, 2019. 1 +[15] Z. M. Chen, X. S. Wei, P. Wang, and Y. Guo. Multi-label image recognition with graph convolutional networks. IEEE + +Conference on Computer Vision and Pattern Recognition, abs/1904.03582, 2019. 5, 6 +[16] T. S. Chua, J. Tang, R. Hong, H. Li, Z. Luo, and Y. T. Zheng. Nus-wide: A real-world web image database from national university of bangalore. ACM International Conference on Image and Video Retrieval, 2009. 5 +[17] F. Croce and M. Hein. Reliable evaluation of adversarial robustness with an ensemble of diverse parameter-free attacks. ArXiv, 2020. 4 +[18] S. D. Dao, E. Zhao, D. Phung, and J. Cai. Multi-label image classification with contrastive learning. arXiv preprint, arXiv:2107.11626, 2021. 2 +[19] J. Deng, N. Ding, Y. Jia, A. Frome, K. Murphy, S. Bengio, Y. Li, H. Neven, and H. Adam. Large-scale object classification using label relation graphs. European Conference on Computer Vision, 2014. 2 +[20] G. W. Ding, Y. Sharma, K. Y. Lui, and R. Huang. Max-margin adversarial (mma) training: Direct input space margin maximization through adversarial training. arXiv, 2020. 2 +[21] Zixuan Ding, Ao Wang, Hui Chen, Qiang Zhang, Pengzhang Liu, Yongjun Bao, Weipeng Yan, and Jungong Han. Exploring structured semantic prior for multi label recognition with incomplete labels. 2023. 1 +[22] Junhao Dong, Seyed-Mohsen Moosavi-Dezfooli, Jianhuang Lai, and Xiaohua Xie. The enemy of my enemy is my friend: Exploring inverse adversaries for improving adversarial training. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 24678–24687, 2023. 2 +[23] Y. Dong, Z. Deng, T. Pang, H. Su, and J. Zhu. Adversarial distributional training for robust deep learning. arXiv, 2020. 2 +[24] M. Everingham, S. M. A. Eslami, L. Van-Gool, C. K. I. Williams, J. Winn, and A. Zisserman. The Pascal visual object classes (voc) challenge. International Journal of Computer Vision, 2010. 5 +[25] K. Eykholt, I. Evtimov, E. Fernandes, B. Li, A. Rahmati, F. Tramér, A. Prakash, T. Kohno, and D. X. Song. Physical adversarial examples for object detectors. arXiv, 2018. 1 +[26] L. Feng, B. An, and S. He. Collaboration based multi-label learning. AAAI Conference on Artificial Intelligence, 2019. 1 +[27] I. J. Goodfellow, J. Shlens, and C. Szegedy. Explaining and harnessing adversarial examples. International Conference on Learning Representations, 2015. 1 +[28] W. He, J. Wei, X. Chen, N. Carlini, and D. Song. Adversarial example defense: Ensembles of weak defenses are not strong. USENIX Workshop on Offensive Technologies, 2017. 2 +[29] D. Hendrycks, K. Lee, and M. Mazeika. Using pre-training can improve model robustness and uncertainty. 2019. 2 +[30] Dan Hendrycks, Kevin Zhao, Steven Basart, Jacob Steinhardt, and Dawn Song. Natural adversarial examples. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15262-15271, 2021. 1 + +[31] Lei Hsiung, Yun-Yun Tsai, Pin-Yu Chen, and Tsung-Yi Ho. Towards compositional adversarial robustness: Generalizing adversarial training to composite semantic perturbations. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 24658-24667, 2023. 1 +[32] S. Hu, L. Ke, X. Wang, and S. Lyu. Tkml-ap: Adversarial attacks to top-k multi-label learning. arXiv, 2021. 2 +[33] D. T. Huynh and E. Elhamifar. Interactive multi-label cnn learning with partial labels. IEEE Conference on Computer Vision and Pattern Recognition, 2020. 2 +[34] Tooba Imtiaz, Morgan Kohler, Jared Miller, Zifeng Wang, Mario Sznaier, Octavia I Camps, and Jennifer G Dy. Saif: Sparse adversarial and interpretable attack framework. arXiv preprint arXiv:2212.07495, 2022. 2 +[35] J. Li R. Ji, H. Liu, X. Hong, Y. Gao, and Q. Tian. Universal perturbation attack against image retrieval. International Conference on Computer Vision, 2019. 1 +[36] Jinyuan Jia, Wenjie Qu, and Neil Zhenqiang Gong. Multiguard: Provably robust multi-label classification against adversarial examples. Advances in Neural Information Processing Systems, 2022. 1, 3 +[37] Youngwook Kim, Jae Myung Kim, Zeynep Akata, and Jungwoo Lee. Large loss matters in weakly supervised multi-label classification. 2022. 1 +[38] Takumi Kobayashi. Two-way multi-label loss. 2023. 1 +[39] A. Kurakin, I. Goodfellow, and S. Bengio. Adversarial machine learning at scale. International Conference on Learning Representations, 2017. 1 +[40] A. Kuznetsova, H. Rom, N. Alldrin, J. Uijlings, I. Krasin, J. Pont-Tuset, S. Kamali, S. Popov, M. Malloci, A. Kolesnikov, T. Duerig, and V. Ferrari. The open images dataset v4: Unified image classification, object detection, and visual relationship detection at scale. International Journal of Computer Vision, 2016. 2, 4, 5 +[41] J. Lanchantin, T. Wang, V. Ordonez, and Y. Qi. General multi-label image classification with transformers. IEEE Conference on Computer Vision and Pattern Recognition, 2021. 2 +[42] K. Lee, K. Lee, H. Lee, and J. Shin. A simple unified framework for detecting out-of-distribution samples and adversarial attacks. 2018. 2 +[43] Peng Li, Peng Chen, Yonghong Xie, and Dezheng Zhang. Bi-modal learning with channel-wise attention for multi-label image classification. IEEE Access, 2020. 2 +[44] Q. Li, M. Qiao, W. Bian, and D. Tao. Conditional graphical lasso for multi-label image classification. IEEE Conference on Computer Vision and Pattern Recognition, 2016. 2 +[45] Q. Li, X. Peng, Y. Qiao, and Q. Peng. Learning label correlations for multi-label image recognition with graph networks. Pattern Recognition Letters, 2020. 2 +[46] X. Li, F. Zhao, and Y. Guo. Multi-label image classification with a probabilistic label enhancement model. In UAI, 2014. 2 +[47] Y. Li and L. Yang. More correlations better performance: Fully associative networks for multi-label image classification. International Conference on Pattern Recognition, 2021. 2 + +[48] Y. Li, Y. Song, and J. Luo. Improving pairwise ranking for multi-label image classification. IEEE Conference on Computer Vision and Pattern Recognition, 2017. 2 +[49] Z. Li, W. Lu, Z. Sun, and W. Xing. Improving multi-label classification using scene cues. Multimedia Tools and Applications, 2017. 2 +[50] Dekun Lin. Probability guided loss for long-tailed multi-label image classification. 2023. 1 +[51] A. Madry, A. Makelov, L. Schmidt, D. Tsipras, and A. Vladu. Towards deep learning models resistant to adversarial attacks. International Conference on Learning Representations, 2018. 2 +[52] S. Melacci, G. Ciravegna, A. Sotgiu, A. Demontis, B. Biggio, M. Gori, and F. Roli. Domain knowledge alleviates adversarial attacks in multi-label classifiers. 2021. 3 +[53] J.-H. Metzen, M.-C. Kumar, T. Brox, and V. Fischer. Universal adversarial perturbations against semantic image segmentation. International Conference on Computer Vision, 2019. 1 +[54] G. A. Miller. Wordnet: a lexical database for english. Communications of the ACM, 38(11), 1995. 5 +[55] S. Moosavi-Dezfooli, A. Fawzi, and P. Frossard. Deepfool: a simple and accurate method to fool deep neural networks. IEEE Conference on Computer Vision and Pattern Recognition, 2016. 6 +[56] J. Nam, E. L. Mencia, H. J. Kim, and J. Furnkranz. Maximizing subset accuracy with recurrent neural networks in multi-label classification. Neural Information Processing Systems, 2017. 2 +[57] T. Pang, K. Xu, C. Du, N. Chen, and J. Zhu. Improving adversarial robustness via promoting ensemble diversity. International Conference on Machine learning, 2019. 2 +[58] T. Pang, K. Xu, Y. Dong, C. Du, N. Chen, and J. Zhu. Rethinking softmax cross-entropy loss for adversarial robustness. arXiv, 2020. +[59] T. Pang, X. Yang, Y. Dong, K. Xu, H. Su, and J. Zhu. Boosting adversarial training with hypersphere embedding. Neural Information Processing Systems, 2020. 2 +[60] Nicolas Papernot, Patrick Mcdaniel, Somesh Jha, Matt Fredrikson, Z. Berkay Celik, and Ananthram Swami. The limitations of deep learning in adversarial settings. IEEE European Symposium on Security and Privacy (EuroS&P), 2016. 1 +[61] Tao Pu, Tianshui Chen, Hefeng Wu, and Liang Lin. Semantic-aware representation blending for multi-label image recognition with partial labels. 2022. 1 +[62] Zeyu Qin, Yanbo Fan, Yi Liu, Li Shen, Yong Zhang, Jue Wang, and Baoyuan Wu. Boosting the transferability of adversarial attacks with reverse adversarial perturbation. Advances in Neural Information Processing Systems, 35:29845-29858, 2022. 2 +[63] T. Ridnik, H. Lawen, A. Noy, and I. Friedman. Tresnet: High performancegpu-dedicated architecture. ArXiv preprint arXiv:2003.13630, 2020.5 +[64] Tal Ridnik, Emanuel Ben-Baruch, Nadav Zamir, Asaf Noy, Itamar Friedman, Matan Protter, and Lihi Zelnik-Manor. Asymmetric loss for multi-label classification. 2021. 5, 6, 7 + +[65] Tal Ridnik, Gilad Sharir, Avi Ben-Cohen, Emanuel Ben-Baruch, and Asaf Noy. Ml-decoder: Scalable and versatile classification head. 2023. 5, 6 +[66] Jérôme Rony, Luiz G Hafemann, Luiz S Oliveira, Ismail Ben Ayed, Robert Sabourin, and Eric Granger. Decoupling direction and norm for efficient gradient-based 12 adversarial attacks and defenses. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4322-4330, 2019. 2 +[67] J. Cohen and E. Rosenfeld and Z. Kolter. Certified adversarial robustness via randomized smoothing. International Conference on Machine learning, 2019. 2 +[68] A. Shafahi, M. Najibi, A. Ghiasi, Z. Xu, J. Dickerson, C. Studer, L. Davis, G. Taylor, and T. Goldstein. Adversarial training for free! Neural Information Processing Systems, 2019. +[69] Nasim Shafiee and Ehsan Elhamifar. Zero-shot attribute attacks on fine-grained recognition models. In Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part V, pages 262-282. Springer, 2022. 1, 2 +[70] Nitish Shukla and Sudipta Banerjee. Generating adversarial attacks in the latent space. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 730-739, 2023. 1 +[71] Q. Song, H. Jin, X. Huang, and X. Hu. Multi-label adversarial perturbations. IEEE International Conference on Data Mining, 2018. 1, 2, 3, 6 +[72] R. Speer, J. Chin, and C. Havasi. Conceptnet 5.5: An open multilingual graph of general knowledge. 2017. 5 +[73] C. Szegedy, W. Zaremba, I. Sutskever, J. Bruna, D. Erhan, I. Goodfellow, and R. Fergus. Intriguing properties of neural networks. International Conference on Learning Representations, 2014. 2 +[74] N. Tursynbek, A. Petiushko, and I. Oseledets. Geometry-inspired top-k adversarial perturbations. arXiv, 2020. 2 +[75] J. Uesato, J. B. Alayrac, P. Huang, R. Stanforth, A. Fawzi, and P. Kohli. Are labels required for improving adversarial robustness? Neural Information Processing Systems, 2019. 2 +[76] Thomas Verelst, Paul K Rubenstein, Marcin Eichner, Tinne Tuytelaars, and Maxim Berman. Spatial consistency loss for training multi-label classifiers from single-label annotations. 2023. 2 +[77] J. Wang, Y. Yang, J. Mao, Z. Huang, C. Huang, and W. Xu. Cnn-rnn: A unified framework for multi-label image classification. 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 2 +[78] Z. Wang, T. Chen, G. Li, G. Li, and L. Lin. Multi-label image recognition by recurrently discovering attentional regions. IEEE International Conference on Computer Vision, 2017. 2 +[79] Y. Wu, H. Liu, S. Feng, Y. Jin, G. Lyu, and Z. Wu. Gm-mlic: Graph matching based multi-label image classification. International Joint Conference on Artificial Intelligence, 2021. 2 + +[80] Z. Wu and M. Palmer. Verbs semantics and lexical selection. Annual Meeting on Association for Computational Linguistics, 1994. 5 +[81] C. Xie, Z. Zhang, Y. Zhou, S. Bai, J. Wang, Z. Ren, and A. Yuille. Improving transferability of adversarial examples with input diversity. IEEE Conference on Computer Vision and Pattern Recognition, 2019. 2 +[82] Ming-Kun Xie, Jiahao Xiao, and Sheng-Jun Huang. Label-aware global consistency for multi-label learning with single positive labels. 2022. 1 +[83] J. Xu, H. Tian, Z. Wang, Y. Wang, W. Kang, and F. Chen. Joint input and output space learning for multi-label image classification. IEEE Transactions on Multimedia, 2020. 2 +[84] W. Xu, D. Evans, and Y. Qi. Feature squeezing: Detecting adversarial examples in deep neural networks. Network and Distributed Systems Security Symposium, 2018. 2 +[85] H. Yang, J. T. Zhou, Y. Zhang, B. Gao, J. Wu, and J. Cai. Exploit bounding box annotations for multi-label object recognition. IEEE Conference on Computer Vision and Pattern Recognition, 2016. 1 +[86] Zhuo Yang, Yufei Han, and Xiangliang Zhang. Characterizing the evasion attackability of multi-label classifiers. 2021. 1, 2 +[87] Z. Yang, Y. Han, and X. Zhang. Attack transferability characterization for adversarially robust multi-label classification. 2021. 1, 2 +[88] J. Ye, J. He, X. Peng, W. Wu, and Y. Qiao. Attention-driven dynamic graph convolutional network for multi-label image recognition. European Conference on Computer Vision, 2020. 2 +[89] R. You, Z. Guo, L. Cui, X. Long, S. Y. Bao, and S. Wen. Cross-modality attention with semantic graph embedding for multi-label classification. AAAI Conference on Artificial Intelligence, 2020. 2 +[90] X. Yuan, P. He, Q. Zhu, and X. Li. Adversarial examples: Attacks and defenses for deep learning. IEEE Transactions on Neural Networks and Learning Systems, 2019. 2 +[91] ML. Zhang and Z. Zhou. Multilabel neural networks with applications to functional genomics and text categorization. IEEE Transactions on Knowledge and Data Engineering, 2006. 2 +[92] Shu Zhang, Ran Xu, Caiming Xiong, and Chetan Ramaiah. Use all the labels: A hierarchical multi-label contrastive learning framework. 2022. 2 +[93] Z. Zhao, G. Chen, J. Wang, Y. Yang, F. Song, and J. Sun. Attack as defense: Characterizing adversarial examples using robustness. arXiv, 2021. 2 +[94] Donghao Zhou, Pengfei Chen, Qiong Wang, Guangyong Chen, and Pheng-Ann Heng. Acknowledging the unknown for multi-label learning with single positive labels. 2022. 1 +[95] N. Zhou, W. Luo, X. Lin, P. Xu, and Z.. Zhang. Generating multi-label adversarial examples by linear programming. International Joint Conference on Neural Networks, 2020. 3 +[96] N. Zhou, W. Luo, J. Zhang, L. Kong, and H. Zhang. Hiding all labels for multi-label images: An empirical study of adversarial examples. International Joint Conference on Neural Networks, 2021. 2, 7 + +[97] Y. Zhu, J. T. Kwok, and Z. Zhou. Multi-label learning with global and local label correlation. IEEE Transactions on Knowledge and Data Engineering, 2018. 2 +[98] D. Zügner, A. Akbarnejad, and S. Gümnmann. Adversarial attacks on neural networks for graph data. International Conference on Knowledge Discovery & Data Mining, 2018. 1 \ No newline at end of file diff --git a/2024/Semantic-Aware Multi-Label Adversarial Attacks/images.zip b/2024/Semantic-Aware Multi-Label Adversarial Attacks/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..3347c2d4aa586ec2103798de6658a535d58df919 --- /dev/null +++ b/2024/Semantic-Aware Multi-Label Adversarial Attacks/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1989521b74999fac1572eab737c60203dcc62b2f697adab2c1ae82a802b41615 +size 713349 diff --git a/2024/Semantic-Aware Multi-Label Adversarial Attacks/layout.json b/2024/Semantic-Aware Multi-Label Adversarial Attacks/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..eb143c4eb01c485ab22accca01b805d2a044abb6 --- /dev/null +++ b/2024/Semantic-Aware Multi-Label Adversarial Attacks/layout.json @@ -0,0 +1,12052 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 143, + 103, + 451, + 119 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 103, + 451, + 119 + ], + "spans": [ + { + "bbox": [ + 143, + 103, + 451, + 119 + ], + "type": "text", + "content": "Semantic-Aware Multi-Label Adversarial Attacks" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 129, + 144, + 259, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 144, + 259, + 171 + ], + "spans": [ + { + "bbox": [ + 129, + 144, + 259, + 171 + ], + "type": "text", + "content": "Hassan Mahmood Northeastern University" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 174, + 272, + 184 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 174, + 272, + 184 + ], + "spans": [ + { + "bbox": [ + 130, + 174, + 272, + 184 + ], + "type": "text", + "content": "mahmood.h@northeastern.edu" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 325, + 144, + 443, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 325, + 144, + 443, + 172 + ], + "spans": [ + { + "bbox": [ + 325, + 144, + 443, + 172 + ], + "type": "text", + "content": "Ehsan Elhamifar Northeastern University" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 307, + 174, + 460, + 184 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 174, + 460, + 184 + ], + "spans": [ + { + "bbox": [ + 307, + 174, + 460, + 184 + ], + "type": "text", + "content": "e.elhamifar@northeastern.edu" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 143, + 213, + 192, + 225 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 213, + 192, + 225 + ], + "spans": [ + { + "bbox": [ + 143, + 213, + 192, + 225 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 239, + 290, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 239, + 290, + 550 + ], + "spans": [ + { + "bbox": [ + 46, + 239, + 290, + 550 + ], + "type": "text", + "content": "Despite its importance, generating attacks for multi-label learning (MLL) models has received much less attention compared to multi-class recognition. Attacking an MLL model by optimizing a loss on the target set of labels has often the undesired consequence of changing the predictions for other labels. On the other hand, adding a loss on the remaining labels to keep them fixed leads to highly negatively correlated gradient directions, reducing the attack effectiveness. In this paper, we develop a framework for crafting effective and semantic-aware adversarial attacks for MLL. First, to obtain an attack that leads to semantically consistent predictions across all labels, we find a minimal super-set of the target labels, referred to as consistent target set. To do so, we develop an efficient search algorithm over a knowledge graph, which encodes label dependencies. Next, we propose an optimization that searches for an attack that modifies the predictions of labels in the consistent target set while ensuring other labels will not get affected. This leads to an efficient algorithm that projects the gradient of the consistent target set loss onto the orthogonal direction of the gradient of the loss on other labels. Our framework can generate attacks on different target set sizes and for MLL with thousands of labels (as in OpenImages). Finally, by extensive experiments on three datasets and several MLL models, we show that our method generates both successful and semantically consistent attacks.1" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 569, + 128, + 582 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 569, + 128, + 582 + ], + "spans": [ + { + "bbox": [ + 47, + 569, + 128, + 582 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 586, + 287, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 586, + 287, + 684 + ], + "spans": [ + { + "bbox": [ + 46, + 586, + 287, + 684 + ], + "type": "text", + "content": "Despite the tremendous success of Deep Neural Networks (DNNs) for image recognition, DNNs are vulnerable to adversarial attacks, i.e., imperceptible image perturbations that result in incorrect prediction with high confidence [9, 25, 27, 30, 35, 39, 53, 60, 69, 70, 98]. Understanding and improving the robustness of DNNs has motivated a large body of research on generating adversarial perturbations and subsequently using them to design defense mech-" + } + ] + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 320, + 216, + 531, + 403 + ], + "blocks": [ + { + "bbox": [ + 320, + 216, + 531, + 403 + ], + "lines": [ + { + "bbox": [ + 320, + 216, + 531, + 403 + ], + "spans": [ + { + "bbox": [ + 320, + 216, + 531, + 403 + ], + "type": "image", + "image_path": "c0b229c86348da89cef4ce1a9fb2b18faf52001d2f7849124935241c76f5a36c.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 407, + 545, + 465 + ], + "lines": [ + { + "bbox": [ + 305, + 407, + 545, + 465 + ], + "spans": [ + { + "bbox": [ + 305, + 407, + 545, + 465 + ], + "type": "text", + "content": "Figure 1. Generating effective attacks for an MLL model is challenging. Top: Two groups of semantically related labels. Green nodes show labels predicted as present before the attack. Bottom: While an attack on the target label 'bicycle' succeeds, it fails to turn off 'vehicle' and 'wheeled vehicle' for " + }, + { + "bbox": [ + 305, + 407, + 545, + 465 + ], + "type": "inline_equation", + "content": "\\epsilon < 0.2" + }, + { + "bbox": [ + 305, + 407, + 545, + 465 + ], + "type": "text", + "content": ". On the other hand, for " + }, + { + "bbox": [ + 305, + 407, + 545, + 465 + ], + "type": "inline_equation", + "content": "\\epsilon > 0.125" + }, + { + "bbox": [ + 305, + 407, + 545, + 465 + ], + "type": "text", + "content": ", the attack changes the prediction for the non-target label 'person', which is undesired." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 483, + 545, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 483, + 545, + 542 + ], + "spans": [ + { + "bbox": [ + 304, + 483, + 545, + 542 + ], + "type": "text", + "content": "anisms, e.g., by detecting attacks or retraining the model using perturbed images. The majority of existing works, however, have focused on multi-class recognition (MCR), in which only one class must be predicted in an image [14, 21, 26, 31, 37, 82, 85]." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 544, + 546, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 544, + 546, + 675 + ], + "spans": [ + { + "bbox": [ + 304, + 544, + 546, + 675 + ], + "type": "text", + "content": "On the other hand, many real-world applications require finding multiple labels in an image. This includes human-object interaction learning (e.g., recognizing hands and interacting objects), autonomous driving (e.g., recognizing cars, bikes, pedestrians, roads, signs, etc), assistive robotics and surveillance. Therefore, multi-label learning (MLL) aims at recognizing all labels in an image [14, 26, 38, 50, 61, 85, 94]. However, despite its importance and fundamental differences with respect to attacks for MCR (see Figure 1), adversarial attacks for MLL has received much less attention in the literature [1, 2, 36, 71, 86, 87]." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 305, + 677, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 677, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 677, + 547, + 713 + ], + "type": "text", + "content": "The main difference between attacks for MCR and MLL stems from the different ways decision boundaries between labels is learned and structured for the two problems. In" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 693, + 287, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 693, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 47, + 693, + 287, + 712 + ], + "type": "text", + "content": "1The code of this work is available at https://github.com/hassan-mahmood/SemanticMLLAttacks.git" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "24251" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 215 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 215 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 215 + ], + "type": "text", + "content": "MCR, different labels compete with each other as only one label must be present/predicted. Therefore, attacking an on present label leads to turning it off while automatically turning on another label, see Figure 2 (left). On the other hand, in MLL, labels do not compete, where none, some or all labels can be predicted as present in an image. Thus, attacking a present or an absent label can lead to changing the predictions for none, several or all other labels, as shown in Figure 2 (right). This often has the undesired effect of inconsistent predictions, which can simply be used to detect the attack (e.g., turning off 'pedestrian' can turn on 'bike' and 'stop sign' while turning off 'road')." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 216, + 289, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 216, + 289, + 384 + ], + "spans": [ + { + "bbox": [ + 46, + 216, + 289, + 384 + ], + "type": "text", + "content": "One can try to prevent changing predictions of other labels by crafting the attack while including a loss that enforces predictions of other labels to stay intact. However, as we show, the gradient of the loss for fixing other labels often is highly negatively correlated with the gradient of the loss on the label we want to attack, hence, counteracting the effect of each other. This problem gets more pronounced when the number of labels increases (e.g., in Open Images dataset [40] with 9,600 labels) and the gradient of this additional loss gets larger too. Also, fixing predictions for all other labels still may lead to semantic inconsistency among predictions (e.g., turning off 'vehicle' requires turning off 'car' and 'truck' too, otherwise 'vehicle' being absent while 'car' being present can be used to detect the attack)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 386, + 289, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 386, + 289, + 639 + ], + "spans": [ + { + "bbox": [ + 46, + 386, + 289, + 639 + ], + "type": "text", + "content": "Paper Contributions. We develop a framework for crafting adversarial attacks for MLL that addresses the above challenges. First, to obtain an attack on a target set of labels that leads to semantically consistent predictions across all labels, we find a minimal superset of the target set (referred to as consistent target set) to be attacked/modified. To do so, we develop an efficient search algorithm over a knowledge graph, which encodes label dependencies. Second, we show that finding the attack by optimizing the sum of two losses, one over the consistent target set and the other over other labels, has opposite gradient directions for the two losses, which leads to inefficient perturbations. Third, we propose an optimization that searches for an attack that modifies the predictions of labels in the consistent target set while ensuring that other labels will not get affected. Our optimization leads to a projected gradient algorithm that projects the gradient of the loss for the consistent target set onto the orthogonal direction of the gradient of the loss on other labels. Finally, by extensive experiments on three datasets and several MLL models, we show that our framework generates both successful and semantically consistent attacks." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 642, + 134, + 654 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 642, + 134, + 654 + ], + "spans": [ + { + "bbox": [ + 47, + 642, + 134, + 654 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 658, + 186, + 672 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 658, + 186, + 672 + ], + "spans": [ + { + "bbox": [ + 47, + 658, + 186, + 672 + ], + "type": "text", + "content": "2.1. Multi-Label Recognition" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 677, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 288, + 713 + ], + "type": "text", + "content": "The goal of multi-label learning (MLL) is to find all classes of objects (or even abstract concepts) in an image. As compared to multi-class classification, which finds a sin" + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 319, + 70, + 411, + 160 + ], + "blocks": [ + { + "bbox": [ + 319, + 70, + 411, + 160 + ], + "lines": [ + { + "bbox": [ + 319, + 70, + 411, + 160 + ], + "spans": [ + { + "bbox": [ + 319, + 70, + 411, + 160 + ], + "type": "image", + "image_path": "53b5d79a90093b316d4c54826ab27b08b7e4bd15af32c11749d91528b87ac8f4.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 165, + 547, + 205 + ], + "lines": [ + { + "bbox": [ + 304, + 165, + 547, + 205 + ], + "spans": [ + { + "bbox": [ + 304, + 165, + 547, + 205 + ], + "type": "text", + "content": "Figure 2. Left: In multi-class recognition (MCR), attacking the present label leads to automatically turning on another label, as labels compete with each other. Right: In multi-label learning (MLL), attacking a label can lead to none " + }, + { + "bbox": [ + 304, + 165, + 547, + 205 + ], + "type": "inline_equation", + "content": "(\\pmb{x}_1')" + }, + { + "bbox": [ + 304, + 165, + 547, + 205 + ], + "type": "text", + "content": ", some " + }, + { + "bbox": [ + 304, + 165, + 547, + 205 + ], + "type": "inline_equation", + "content": "(\\pmb{x}_2')" + }, + { + "bbox": [ + 304, + 165, + 547, + 205 + ], + "type": "text", + "content": " or all " + }, + { + "bbox": [ + 304, + 165, + 547, + 205 + ], + "type": "inline_equation", + "content": "(\\pmb{x}_3')" + }, + { + "bbox": [ + 304, + 165, + 547, + 205 + ], + "type": "text", + "content": " other labels changing." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 417, + 70, + 533, + 160 + ], + "blocks": [ + { + "bbox": [ + 417, + 70, + 533, + 160 + ], + "lines": [ + { + "bbox": [ + 417, + 70, + 533, + 160 + ], + "spans": [ + { + "bbox": [ + 417, + 70, + 533, + 160 + ], + "type": "image", + "image_path": "3bf7f6d733be83068af386401b041a3736562a9480ef00e21e8647d49b1ca177.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 224, + 547, + 356 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 224, + 547, + 356 + ], + "spans": [ + { + "bbox": [ + 304, + 224, + 547, + 356 + ], + "type": "text", + "content": "gle dominant class in an image, MLL is a harder task, since any combination of labels can be present in an image and many labels often correspond to small image regions. This has motivated a large body of research for designing effective MLL methods, using graphical models[44, 46], different loss functions for handling label imbalance [6, 18, 48, 49, 76, 91], exploiting external knowledge, label correlations, and hierarchical relations among labels [13, 19, 33, 43, 56, 78, 88, 89, 92, 97], or using a combination of label and image feature correlations [41, 45, 47, 77, 79, 83] to improve the multi-label performance." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 362, + 423, + 373 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 362, + 423, + 373 + ], + "spans": [ + { + "bbox": [ + 306, + 362, + 423, + 373 + ], + "type": "text", + "content": "2.2. Adversarial Attacks" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 378, + 547, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 378, + 547, + 521 + ], + "spans": [ + { + "bbox": [ + 304, + 378, + 547, + 521 + ], + "type": "text", + "content": "Deep Neural Networks (DNNs) have been shown to be vulnerable to small adversarial perturbations, which can easily fool the model [3, 12, 66, 73, 81]. Therefore, many works have studied different ways to design efficient attacks and defense mechanisms for DNNs [4, 5, 10, 11, 20, 22, 23, 28, 29, 34, 42, 51, 57–59, 62, 67–69, 74, 75, 84, 93]. The adversarial attacks can be divided into several categories based on different criteria [90] such as white-box and black-box, image agnostic and image-specific, targeted and untargeted, or restricted to perturb small image regions and unrestricted attacks. In the paper, we generate white-box attacks for multi-label recognition, i.e., assume access to the MLL model." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 530, + 479, + 541 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 530, + 479, + 541 + ], + "spans": [ + { + "bbox": [ + 306, + 530, + 479, + 541 + ], + "type": "text", + "content": "2.2.1 Multi-Label Adversarial Attacks" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 545, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 545, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 545, + 547, + 715 + ], + "type": "text", + "content": "Motivated by the increasing interest in the multi-label recognition problem, few works have recently studied MLL attacks. [71] studies a framework for attacking multi-label recognition and ranking systems. However, it does not exploit any relationships among labels to design attacks, which as we show is important to design effective attacks. We use the attacks from this work as baselines in our experiments. Yang et al. [86, 87] designed untargeted attacks for multi-label classification to change as many labels as possible and proposed a framework to measure how well an MLL model can be attacked. In comparison, our focus is targeted multi-label attacks with semantic relationships. Hu et al. [32] proposed to exploit ranking relations to design attacks for top-" + }, + { + "bbox": [ + 304, + 545, + 547, + 715 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 304, + 545, + 547, + 715 + ], + "type": "text", + "content": " multi-label models and [96] proposed an attack to" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "24252" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 67, + 74, + 264, + 240 + ], + "blocks": [ + { + "bbox": [ + 67, + 74, + 264, + 240 + ], + "lines": [ + { + "bbox": [ + 67, + 74, + 264, + 240 + ], + "spans": [ + { + "bbox": [ + 67, + 74, + 264, + 240 + ], + "type": "image", + "image_path": "38f2981baa0ae6ca00ec3d871a97df4efe9dcdb8d23775e72217fa4424416c23.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 246, + 288, + 305 + ], + "lines": [ + { + "bbox": [ + 46, + 246, + 288, + 305 + ], + "spans": [ + { + "bbox": [ + 46, + 246, + 288, + 305 + ], + "type": "text", + "content": "Figure 3. Multi-label learning predicts several labels for an image (see \"MLL Output\"). Attacking a target set ('vehicle' on the top or 'person' and 'bird' on the bottom) using a naive multi-label attack leads to prediction semantic inconsistencies ('car' and 'motorcycle' being on while 'vehicle' is off or 'person' and 'bird' being off while 'animal' is on). However, GMLA handles a large number of labels while achieving semantic consistency." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 320, + 288, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 320, + 288, + 571 + ], + "spans": [ + { + "bbox": [ + 46, + 320, + 288, + 571 + ], + "type": "text", + "content": "hide all labels present in an image, whereas we consider the minimal set of semantically related labels to be attacked. Aich et al. [2] leveraged local patch differences of different objects to generate multi-object attacks and [1] proposed a CLIP-based generative model to generate multi-object attacks in the black-box setting. Jia et al. [36] proposed theoretical robustness guarantees to defend against multi-label adversarial attacks and [52] exploited domain knowledge context to detect adversarial attacks. Context-aware attacks [7, 8] fool context-aware attack detection methods by attacking the label and its context simultaneously. The context in these works is defined in terms of cooccurring labels. In comparison, we propose to attack labels based on their semantic relationships. Moreover, none of these works have addressed the problem of negative gradient correlation in generating large-scale dataset attacks. Among the existing literature, Nan et al. [95] is also comparable to our attack method, and we use it as a baseline. They proposed a fast linear programming-based adversarial example generation algorithm for MLL to minimize the perturbation norm required to achieve a target label." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 582, + 250, + 596 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 582, + 250, + 596 + ], + "spans": [ + { + "bbox": [ + 47, + 582, + 250, + 596 + ], + "type": "text", + "content": "3. Multi-Label Learning Attack (MLA)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 599, + 146, + 612 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 599, + 146, + 612 + ], + "spans": [ + { + "bbox": [ + 47, + 599, + 146, + 612 + ], + "type": "text", + "content": "3.1. Problem Setting" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 617, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 617, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 617, + 288, + 713 + ], + "type": "text", + "content": "We study generating adversarial attacks for the Multi-Label Learning (MLL) task. In MLL, multiple labels can appear in an image, see Figure 3, as opposed to the multi-class recognition (MCR), where each image has only one label. Let " + }, + { + "bbox": [ + 46, + 617, + 288, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{C}" + }, + { + "bbox": [ + 46, + 617, + 288, + 713 + ], + "type": "text", + "content": " denote the set of all labels. For an image " + }, + { + "bbox": [ + 46, + 617, + 288, + 713 + ], + "type": "inline_equation", + "content": "x\\in \\mathbb{R}^d" + }, + { + "bbox": [ + 46, + 617, + 288, + 713 + ], + "type": "text", + "content": ", let " + }, + { + "bbox": [ + 46, + 617, + 288, + 713 + ], + "type": "inline_equation", + "content": "\\pmb {y}\\in \\{0,1\\}^{|\\mathcal{C}|}" + }, + { + "bbox": [ + 46, + 617, + 288, + 713 + ], + "type": "text", + "content": " denote the set of its labels, indicating the presence (1) or absence (0) of each label in " + }, + { + "bbox": [ + 46, + 617, + 288, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{C}" + }, + { + "bbox": [ + 46, + 617, + 288, + 713 + ], + "type": "text", + "content": " in the image. Let " + }, + { + "bbox": [ + 46, + 617, + 288, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{F}:\\mathbb{R}^d\\to \\mathbb{R}^{|\\mathcal{C}|}" + }, + { + "bbox": [ + 46, + 617, + 288, + 713 + ], + "type": "text", + "content": " be a multi-label classifier, which we assume" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "text", + "content": "has already been learned using training images. The multi-label classifier " + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "inline_equation", + "content": "\\mathcal{F} = \\{f_1, f_2, \\ldots, f_{|\\mathcal{C}|}\\}" + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "text", + "content": " consists of " + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "inline_equation", + "content": "|\\mathcal{C}|" + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "text", + "content": " binary classifiers for each label, where " + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "inline_equation", + "content": "f_c(\\pmb{x}) \\in (-\\infty, +\\infty)" + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "text", + "content": " is the score of the classifier " + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "text", + "content": ". Therefore, the probability of label " + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "text", + "content": " being present in the image " + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "text", + "content": " is given by " + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "inline_equation", + "content": "\\hat{y}_c = \\sigma(f_c(\\pmb{x}))" + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "inline_equation", + "content": "\\sigma(\\cdot)" + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "text", + "content": " is the sigmoid function. Finally, let " + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "inline_equation", + "content": "\\Omega_{\\pmb{x}} \\subseteq \\mathcal{C}" + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "text", + "content": " denote the target set of labels in the image " + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "text", + "content": " which we want to attack, i.e., after the attack the present labels in " + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "inline_equation", + "content": "\\Omega_{\\pmb{x}}" + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "text", + "content": " must become absent and vice versa. In the next subsection, we study the existing approaches [71] to generate multi-label attacks and identify their drawbacks." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 305, + 209, + 485, + 221 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 209, + 485, + 221 + ], + "spans": [ + { + "bbox": [ + 305, + 209, + 485, + 221 + ], + "type": "text", + "content": "3.2. Naive Multi-Label Attack (MLA)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 225, + 545, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 225, + 545, + 274 + ], + "spans": [ + { + "bbox": [ + 304, + 225, + 545, + 274 + ], + "type": "text", + "content": "For an attack on " + }, + { + "bbox": [ + 304, + 225, + 545, + 274 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 304, + 225, + 545, + 274 + ], + "type": "text", + "content": " that modifies the labels in " + }, + { + "bbox": [ + 304, + 225, + 545, + 274 + ], + "type": "inline_equation", + "content": "\\Omega_{\\pmb{x}}" + }, + { + "bbox": [ + 304, + 225, + 545, + 274 + ], + "type": "text", + "content": ", one can generate a small perturbation " + }, + { + "bbox": [ + 304, + 225, + 545, + 274 + ], + "type": "inline_equation", + "content": "e \\in \\mathbb{R}^d" + }, + { + "bbox": [ + 304, + 225, + 545, + 274 + ], + "type": "text", + "content": " by minimizing the negative multi-label learning loss for labels in " + }, + { + "bbox": [ + 304, + 225, + 545, + 274 + ], + "type": "inline_equation", + "content": "\\Omega_{\\pmb{x}}" + }, + { + "bbox": [ + 304, + 225, + 545, + 274 + ], + "type": "text", + "content": " while restricting the magnitude of " + }, + { + "bbox": [ + 304, + 225, + 545, + 274 + ], + "type": "inline_equation", + "content": "e" + }, + { + "bbox": [ + 304, + 225, + 545, + 274 + ], + "type": "text", + "content": ". More precisely, we can solve" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 315, + 285, + 545, + 301 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 285, + 545, + 301 + ], + "spans": [ + { + "bbox": [ + 315, + 285, + 545, + 301 + ], + "type": "interline_equation", + "content": "\\text {M L A - U :} \\min _ {\\boldsymbol {e}} - \\mathcal {L} _ {b c e} (\\boldsymbol {x} + \\boldsymbol {e}, \\Omega_ {\\boldsymbol {x}}) \\text {s . t .} \\| \\boldsymbol {e} \\| _ {p} \\leq \\epsilon , \\tag {1}", + "image_path": "dde16e549104f28c3d8ba1a4718e82eec1e484a602ac75e938c70a95e518e468.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 310, + 545, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 310, + 545, + 335 + ], + "spans": [ + { + "bbox": [ + 304, + 310, + 545, + 335 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 310, + 545, + 335 + ], + "type": "inline_equation", + "content": "\\| \\cdot \\| _p" + }, + { + "bbox": [ + 304, + 310, + 545, + 335 + ], + "type": "text", + "content": " is the " + }, + { + "bbox": [ + 304, + 310, + 545, + 335 + ], + "type": "inline_equation", + "content": "\\ell_p" + }, + { + "bbox": [ + 304, + 310, + 545, + 335 + ], + "type": "text", + "content": "-norm and " + }, + { + "bbox": [ + 304, + 310, + 545, + 335 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{ce}(\\boldsymbol{x}',\\Gamma_{\\boldsymbol{x}'})" + }, + { + "bbox": [ + 304, + 310, + 545, + 335 + ], + "type": "text", + "content": " is the binary cross-entropy loss for image " + }, + { + "bbox": [ + 304, + 310, + 545, + 335 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}'" + }, + { + "bbox": [ + 304, + 310, + 545, + 335 + ], + "type": "text", + "content": " on labels in " + }, + { + "bbox": [ + 304, + 310, + 545, + 335 + ], + "type": "inline_equation", + "content": "\\Gamma_{\\boldsymbol{x}'}" + }, + { + "bbox": [ + 304, + 310, + 545, + 335 + ], + "type": "text", + "content": ", defined as" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 315, + 346, + 381, + 360 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 346, + 381, + 360 + ], + "spans": [ + { + "bbox": [ + 315, + 346, + 381, + 360 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {b c e} \\left(\\boldsymbol {x} ^ {\\prime}, \\Omega_ {\\boldsymbol {x} ^ {\\prime}}\\right) \\triangleq", + "image_path": "f935748b35f6474f74451df2d119e2fa92e155d1edf5a677c2081f1cf06f1856.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 322, + 361, + 545, + 386 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 322, + 361, + 545, + 386 + ], + "spans": [ + { + "bbox": [ + 322, + 361, + 545, + 386 + ], + "type": "interline_equation", + "content": "\\sum_ {c \\in \\Omega_ {\\boldsymbol {x} ^ {\\prime}}} - y _ {c} \\log \\sigma \\left(f _ {c} \\left(\\boldsymbol {x} ^ {\\prime}\\right)\\right) - \\left(1 - y _ {c}\\right) \\log \\left(1 - \\sigma \\left(f _ {c} \\left(\\boldsymbol {x} ^ {\\prime}\\right)\\right)\\right). \\tag {2}", + "image_path": "0235f20db26c545e10523a6f436ebcc6ba23a3f056a84de0b434398dd8170b14.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 395, + 545, + 466 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 395, + 545, + 466 + ], + "spans": [ + { + "bbox": [ + 304, + 395, + 545, + 466 + ], + "type": "text", + "content": "The drawback of (1) is that attack on " + }, + { + "bbox": [ + 304, + 395, + 545, + 466 + ], + "type": "inline_equation", + "content": "\\Omega_{\\mathbf{x}}" + }, + { + "bbox": [ + 304, + 395, + 545, + 466 + ], + "type": "text", + "content": " can lead to changing the predictions for other labels too, see Figure 2 (right). This often leads to inconsistent predictions, which can simply be used to detect the attack (e.g., turning off 'pedestrian' can turn on 'bike' and 'stop sign' while turning off 'road'), hence significantly reducing the effectiveness of the attack." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 467, + 545, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 467, + 545, + 525 + ], + "spans": [ + { + "bbox": [ + 304, + 467, + 545, + 525 + ], + "type": "text", + "content": "To address this drawback, one can try to prevent changing predictions of other labels " + }, + { + "bbox": [ + 304, + 467, + 545, + 525 + ], + "type": "inline_equation", + "content": "(\\bar{\\Omega}_{\\pmb{x}})" + }, + { + "bbox": [ + 304, + 467, + 545, + 525 + ], + "type": "text", + "content": ", which is the complement of " + }, + { + "bbox": [ + 304, + 467, + 545, + 525 + ], + "type": "inline_equation", + "content": "\\Omega_{\\pmb{x}}" + }, + { + "bbox": [ + 304, + 467, + 545, + 525 + ], + "type": "text", + "content": " with respect to " + }, + { + "bbox": [ + 304, + 467, + 545, + 525 + ], + "type": "inline_equation", + "content": "\\mathcal{C}" + }, + { + "bbox": [ + 304, + 467, + 545, + 525 + ], + "type": "text", + "content": " by crafting the attack while including a loss that enforces predictions of other labels to stay intact. More precisely, one can solve" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 318, + 533, + 545, + 563 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 533, + 545, + 563 + ], + "spans": [ + { + "bbox": [ + 318, + 533, + 545, + 563 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\text {M L A - C :} \\min _ {\\boldsymbol {e}} - \\mathcal {L} _ {b c e} (\\boldsymbol {x} + \\boldsymbol {e}, \\Omega_ {\\boldsymbol {x}}) + \\mathcal {L} _ {b c e} (\\boldsymbol {x} + \\boldsymbol {e}, \\bar {\\Omega} _ {\\boldsymbol {x}}), \\tag {3} \\\\ \\begin{array}{l} \\text {s . t .} \\| e \\| _ {p} \\leq \\epsilon , \\end{array} \\\\ \\end{array}", + "image_path": "bb4ef8f5cd3331747c42241b7947b2d3bd6a6e8a32d38decd4cba208f50a0486.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "type": "text", + "content": "where the first term in the objective function tries to flip the labels in " + }, + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\Omega_{x}" + }, + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "type": "text", + "content": " while the second term preserves the labels in " + }, + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\bar{\\Omega}_{x}" + }, + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "type": "text", + "content": ". Notice that with the additional objective, the space of perturbations in (3) is smaller than that in (1), yet it ensures not modifying labels outside the target set. However, as we verify by empirical results, the gradient of the loss for fixing other labels often is highly negatively correlated with the gradient of the loss on the target labels, hence, counteracting the effect of each other. We hypothesize that this effect is due to strong spurious correlations among labels, learnt by the model during training. Given two highly-correlated labels in an image, attacking one label while fixing the other" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "24253" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 180 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 180 + ], + "type": "text", + "content": "using (3) would lead to opposite gradients. This problem gets more pronounced when the number of labels increases (e.g., in Open Images dataset [40] with 9,600 labels) and the gradient of this additional loss gets larger too. Moreover, fixing predictions for labels in " + }, + { + "bbox": [ + 46, + 72, + 289, + 180 + ], + "type": "inline_equation", + "content": "\\bar{\\Omega}_{\\pmb{x}}" + }, + { + "bbox": [ + 46, + 72, + 289, + 180 + ], + "type": "text", + "content": " still may lead to semantic inconsistencies in predictions (e.g., turning off 'vehicle' requires turning off 'car' and 'truck', otherwise 'vehicle' being off while 'car' being on can be used to detect the attack), hence, reducing the attack effectiveness." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 189, + 275, + 202 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 189, + 275, + 202 + ], + "spans": [ + { + "bbox": [ + 47, + 189, + 275, + 202 + ], + "type": "text", + "content": "4. Generalized Multi-Label Attack (GMLA)" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 209, + 289, + 341 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 209, + 289, + 341 + ], + "spans": [ + { + "bbox": [ + 46, + 209, + 289, + 341 + ], + "type": "text", + "content": "We develop a framework for crafting adversarial attacks for MLL that addresses the challenges of conventional MLA, discussed above. First, to obtain an attack on a target label set " + }, + { + "bbox": [ + 46, + 209, + 289, + 341 + ], + "type": "inline_equation", + "content": "\\Omega_{x}" + }, + { + "bbox": [ + 46, + 209, + 289, + 341 + ], + "type": "text", + "content": " that leads to semantically consistent predictions across all labels, we find a minimal superset of the target set " + }, + { + "bbox": [ + 46, + 209, + 289, + 341 + ], + "type": "inline_equation", + "content": "\\Psi_{x}" + }, + { + "bbox": [ + 46, + 209, + 289, + 341 + ], + "type": "text", + "content": " (referred to as consistent target set) that needs to be attacked/modified. Given that there are often multiple such superset, we develop an efficient search algorithm over a knowledge graph " + }, + { + "bbox": [ + 46, + 209, + 289, + 341 + ], + "type": "inline_equation", + "content": "\\mathcal{G}" + }, + { + "bbox": [ + 46, + 209, + 289, + 341 + ], + "type": "text", + "content": " that encodes label dependencies. We denote by " + }, + { + "bbox": [ + 46, + 209, + 289, + 341 + ], + "type": "inline_equation", + "content": "\\Psi_{x} = h\\bigl (\\Omega_{x},\\mathcal{G}\\bigr)" + }, + { + "bbox": [ + 46, + 209, + 289, + 341 + ], + "type": "text", + "content": " the output of the search algorithm, which we will describe in detail later in this section." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 346, + 178, + 360 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 346, + 178, + 360 + ], + "spans": [ + { + "bbox": [ + 47, + 346, + 178, + 360 + ], + "type": "text", + "content": "4.1. Proposed Optimization" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 365, + 287, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 365, + 287, + 413 + ], + "spans": [ + { + "bbox": [ + 46, + 365, + 287, + 413 + ], + "type": "text", + "content": "We then study a projection-based optimization that searches for an attack that modifies the predictions of labels in " + }, + { + "bbox": [ + 46, + 365, + 287, + 413 + ], + "type": "inline_equation", + "content": "\\Psi_{\\pmb{x}}" + }, + { + "bbox": [ + 46, + 365, + 287, + 413 + ], + "type": "text", + "content": " while ensuring that other labels " + }, + { + "bbox": [ + 46, + 365, + 287, + 413 + ], + "type": "inline_equation", + "content": "\\bar{\\Psi}_{\\pmb{x}}" + }, + { + "bbox": [ + 46, + 365, + 287, + 413 + ], + "type": "text", + "content": " will not get affected. More specifically, we propose to solve" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 60, + 418, + 287, + 468 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 418, + 287, + 468 + ], + "spans": [ + { + "bbox": [ + 60, + 418, + 287, + 468 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\text {G M L A :} \\min _ {\\boldsymbol {e}} - \\mathcal {L} _ {b c e} (\\boldsymbol {x} + \\boldsymbol {e}, \\Psi_ {\\boldsymbol {x}}), \\\\ \\text {s . t .} \\mathcal {L} _ {b c e} (\\boldsymbol {x} + \\boldsymbol {e}, \\bar {\\Psi} _ {\\boldsymbol {x}}) = \\mathcal {L} _ {b c e} (\\boldsymbol {x}, \\bar {\\Psi} _ {\\boldsymbol {x}}), \\tag {4} \\\\ \\| \\boldsymbol {e} \\| _ {p} \\leq \\epsilon , \\Psi_ {\\boldsymbol {x}} = h (\\Omega_ {\\boldsymbol {x}}, \\mathcal {G}), \\\\ \\end{array}", + "image_path": "ebe4259decd8aa2797b7285d90f56bd0820a1e88773356315d7986b2312fa112.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 471, + 287, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 471, + 287, + 578 + ], + "spans": [ + { + "bbox": [ + 46, + 471, + 287, + 578 + ], + "type": "text", + "content": "where we only minimize the attack loss on the consistent target set " + }, + { + "bbox": [ + 46, + 471, + 287, + 578 + ], + "type": "inline_equation", + "content": "\\Psi_{\\mathbf{x}}" + }, + { + "bbox": [ + 46, + 471, + 287, + 578 + ], + "type": "text", + "content": ", while requiring that the binary cross-entropy loss on other labels " + }, + { + "bbox": [ + 46, + 471, + 287, + 578 + ], + "type": "inline_equation", + "content": "\\bar{\\Psi}_{\\mathbf{x}}" + }, + { + "bbox": [ + 46, + 471, + 287, + 578 + ], + "type": "text", + "content": " stay the same after the attack. This means that instead of trying to make the predictions on other labels more confident as in (3), we try to keep them stay the same after the attack. As we also show in the experiments (see Figure 8), this significantly boosts the attack by resolving the high negative correlation of the gradients of the two losses in (3) and finding better attack directions." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 578, + 287, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 578, + 287, + 615 + ], + "spans": [ + { + "bbox": [ + 46, + 578, + 287, + 615 + ], + "type": "text", + "content": "Since solving the optimization in (4) that ensures the first constraint is satisfied is difficult, we take a first-order approximation on this constraint around " + }, + { + "bbox": [ + 46, + 578, + 287, + 615 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 46, + 578, + 287, + 615 + ], + "type": "text", + "content": " (as " + }, + { + "bbox": [ + 46, + 578, + 287, + 615 + ], + "type": "inline_equation", + "content": "e" + }, + { + "bbox": [ + 46, + 578, + 287, + 615 + ], + "type": "text", + "content": " is small)," + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 77, + 618, + 287, + 662 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 618, + 287, + 662 + ], + "spans": [ + { + "bbox": [ + 77, + 618, + 287, + 662 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} _ {b c e} (\\boldsymbol {x} + \\boldsymbol {e}, \\bar {\\Psi} _ {\\boldsymbol {x}}) \\approx \\mathcal {L} _ {b c e} (\\boldsymbol {x}, \\bar {\\Psi} _ {\\boldsymbol {x}}) + \\boldsymbol {g} _ {\\boldsymbol {x}, \\bar {\\Psi} _ {\\boldsymbol {x}}} ^ {\\top} \\boldsymbol {e}, \\\\ \\text {w h e r e ,} \\quad \\boldsymbol {g} _ {\\boldsymbol {x}, \\bar {\\Psi} _ {\\boldsymbol {x}}} \\triangleq \\frac {\\partial \\mathcal {L} _ {b c e} (\\boldsymbol {x} , \\bar {\\Psi} _ {\\boldsymbol {x}})}{\\partial \\boldsymbol {x}}. \\end{array} \\tag {5}", + "image_path": "c9358a7a0a02e727f4e132e5384acaaefc0a0496559b7176569a3a40f03b0f98.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 665, + 160, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 665, + 160, + 676 + ], + "spans": [ + { + "bbox": [ + 47, + 665, + 160, + 676 + ], + "type": "text", + "content": "Thus, we can rewrite (4) as" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 61, + 681, + 287, + 716 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 681, + 287, + 716 + ], + "spans": [ + { + "bbox": [ + 61, + 681, + 287, + 716 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\min _ {\\boldsymbol {e}} - \\mathcal {L} _ {b c e} (\\boldsymbol {x} + \\boldsymbol {e}, \\Psi_ {\\boldsymbol {x}}), \\tag {6} \\\\ \\begin{array}{l} \\text {s . t .} \\quad \\boldsymbol {g} _ {\\boldsymbol {x}, \\bar {\\Psi} _ {\\boldsymbol {x}}} ^ {\\top} \\boldsymbol {e} = \\mathbf {0}, \\| \\boldsymbol {e} \\| _ {p} \\leq \\epsilon , \\Psi_ {\\boldsymbol {x}} = h \\big (\\Omega_ {\\boldsymbol {x}}, \\mathcal {G} \\big). \\end{array} \\\\ \\end{array}", + "image_path": "c9fbc49bccf15c156af51052e3c818de069d775fe45cf030985239e5b919d6c3.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 72, + 545, + 108 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 72, + 545, + 108 + ], + "spans": [ + { + "bbox": [ + 305, + 72, + 545, + 108 + ], + "type": "text", + "content": "The constraint " + }, + { + "bbox": [ + 305, + 72, + 545, + 108 + ], + "type": "inline_equation", + "content": "g_{x,\\bar{\\Psi}_x}^\\top e = 0" + }, + { + "bbox": [ + 305, + 72, + 545, + 108 + ], + "type": "text", + "content": " implies that " + }, + { + "bbox": [ + 305, + 72, + 545, + 108 + ], + "type": "inline_equation", + "content": "e" + }, + { + "bbox": [ + 305, + 72, + 545, + 108 + ], + "type": "text", + "content": " must be in the orthogonal space to the gradient direction " + }, + { + "bbox": [ + 305, + 72, + 545, + 108 + ], + "type": "inline_equation", + "content": "g_{x,\\bar{\\Psi}_x}" + }, + { + "bbox": [ + 305, + 72, + 545, + 108 + ], + "type": "text", + "content": ", hence not changing other labels. Thus, we can write" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 331, + 111, + 545, + 142 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 111, + 545, + 142 + ], + "spans": [ + { + "bbox": [ + 331, + 111, + 545, + 142 + ], + "type": "interline_equation", + "content": "\\boldsymbol {e} = \\boldsymbol {P} _ {\\boldsymbol {x}, \\bar {\\Psi} _ {\\boldsymbol {x}}} \\alpha , \\quad \\boldsymbol {P} _ {\\boldsymbol {x}, \\bar {\\Psi} _ {\\boldsymbol {x}}} \\triangleq \\boldsymbol {I} - \\frac {\\boldsymbol {g} _ {\\boldsymbol {x} , \\bar {\\Psi} _ {\\boldsymbol {x}}} \\boldsymbol {g} _ {\\boldsymbol {x} , \\bar {\\Psi} _ {\\boldsymbol {x}}} ^ {\\top}}{\\| \\boldsymbol {g} _ {\\boldsymbol {x} , \\bar {\\Psi} _ {\\boldsymbol {x}}} \\| _ {2} ^ {2}}, \\tag {7}", + "image_path": "f787165ead25371efeaf9f7ae75164f26939ad07d9927f416e768a6058b78924.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 151, + 545, + 187 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 151, + 545, + 187 + ], + "spans": [ + { + "bbox": [ + 305, + 151, + 545, + 187 + ], + "type": "text", + "content": "for some " + }, + { + "bbox": [ + 305, + 151, + 545, + 187 + ], + "type": "inline_equation", + "content": "\\alpha \\in \\mathbb{R}^d" + }, + { + "bbox": [ + 305, + 151, + 545, + 187 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 305, + 151, + 545, + 187 + ], + "type": "inline_equation", + "content": "P_{x,\\bar{\\Psi}_x}" + }, + { + "bbox": [ + 305, + 151, + 545, + 187 + ], + "type": "text", + "content": " is the orthogonal projection matrix on the gradient " + }, + { + "bbox": [ + 305, + 151, + 545, + 187 + ], + "type": "inline_equation", + "content": "g_{x,\\bar{\\Psi}_x}" + }, + { + "bbox": [ + 305, + 151, + 545, + 187 + ], + "type": "text", + "content": ". Thus, we can write the optimization in (4) as" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 314, + 196, + 545, + 230 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 196, + 545, + 230 + ], + "spans": [ + { + "bbox": [ + 314, + 196, + 545, + 230 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\text {G M L A :} \\min _ {\\boldsymbol {\\alpha}} - \\mathcal {L} _ {b c e} (\\boldsymbol {x} + \\boldsymbol {P} _ {\\boldsymbol {x}, \\bar {\\Psi} _ {\\boldsymbol {x}}} \\boldsymbol {\\alpha}, \\Psi_ {\\boldsymbol {x}}), \\tag {8} \\\\ \\begin{array}{l} \\text {s . t .} \\| P _ {\\boldsymbol {x}, \\bar {\\Psi} _ {\\boldsymbol {x}}} \\boldsymbol {\\alpha} \\| _ {p} \\leq \\epsilon , \\quad \\Psi_ {\\boldsymbol {x}} = h (\\Omega_ {\\boldsymbol {x}}, \\mathcal {G}). \\end{array} \\\\ \\end{array}", + "image_path": "fe3ca47340f0e92ba36fa682a6fb5f5ad1fa9621a98fc4b0a45f3bae74544c18.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 305, + 237, + 545, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 237, + 545, + 275 + ], + "spans": [ + { + "bbox": [ + 305, + 237, + 545, + 275 + ], + "type": "text", + "content": "We follow AutoPGD [17] to iteratively solve (8). At each iteration, we linearly approximate the objective function and solve " + }, + { + "bbox": [ + 305, + 237, + 545, + 275 + ], + "type": "inline_equation", + "content": "(\\pmb{g}_{\\pmb{x},\\Psi_{\\pmb{x}}}" + }, + { + "bbox": [ + 305, + 237, + 545, + 275 + ], + "type": "text", + "content": " is the gradient of " + }, + { + "bbox": [ + 305, + 237, + 545, + 275 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{bce}(\\pmb {x},\\Psi_{\\pmb{x}}))" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 340, + 283, + 545, + 306 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 340, + 283, + 545, + 306 + ], + "spans": [ + { + "bbox": [ + 340, + 283, + 545, + 306 + ], + "type": "interline_equation", + "content": "\\min _ {\\boldsymbol {\\alpha}} - \\mathbf {g} _ {\\boldsymbol {x}, \\Psi_ {\\boldsymbol {x}}} ^ {\\top} \\left(\\boldsymbol {P} _ {\\boldsymbol {x}, \\bar {\\Psi} _ {\\boldsymbol {x}}} \\boldsymbol {\\alpha}\\right), \\tag {9}", + "image_path": "cd597db08e9f91f8e2da58b66c92c2c96321bb54df72e525d68c42d3a9fadef7.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 343, + 304, + 512, + 319 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 343, + 304, + 512, + 319 + ], + "spans": [ + { + "bbox": [ + 343, + 304, + 512, + 319 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\text {s . t .} \\| P _ {\\boldsymbol {x}, \\bar {\\Psi} _ {\\boldsymbol {x}}} \\boldsymbol {\\alpha} \\| _ {p} \\leq \\epsilon , \\quad \\Psi_ {\\boldsymbol {x}} = h (\\Omega_ {\\boldsymbol {x}}, \\mathcal {G}). \\end{array}", + "image_path": "859c40833a9213edf74100d8db0e82ab375df313e6a11a1dc6264f3aabb6c8d5.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 306, + 327, + 545, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 327, + 545, + 350 + ], + "spans": [ + { + "bbox": [ + 306, + 327, + 545, + 350 + ], + "type": "text", + "content": "As we show in the supplementary materials, we can solve (9) for " + }, + { + "bbox": [ + 306, + 327, + 545, + 350 + ], + "type": "inline_equation", + "content": "p = \\infty" + }, + { + "bbox": [ + 306, + 327, + 545, + 350 + ], + "type": "text", + "content": " and get the closed form update for " + }, + { + "bbox": [ + 306, + 327, + 545, + 350 + ], + "type": "inline_equation", + "content": "e" + }, + { + "bbox": [ + 306, + 327, + 545, + 350 + ], + "type": "text", + "content": " as" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 322, + 358, + 545, + 387 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 322, + 358, + 545, + 387 + ], + "spans": [ + { + "bbox": [ + 322, + 358, + 545, + 387 + ], + "type": "interline_equation", + "content": "e = \\epsilon \\cdot \\frac {\\boldsymbol {P} _ {\\boldsymbol {x} , \\bar {\\Psi} _ {\\boldsymbol {x}}} \\boldsymbol {\\nu}}{\\| \\boldsymbol {P} _ {\\boldsymbol {x} , \\bar {\\Psi} _ {\\boldsymbol {x}}} \\boldsymbol {\\nu} \\| _ {\\infty}}, \\quad \\boldsymbol {\\nu} \\triangleq \\operatorname {s g n} \\left(\\boldsymbol {g} _ {\\boldsymbol {x}, \\Psi_ {\\boldsymbol {x}}}\\right). \\tag {10}", + "image_path": "db7a56b9c704a1e5e239b8ec68a6c31d5776fec472ca73e747a37196c2a622e4.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 305, + 394, + 545, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 394, + 545, + 453 + ], + "spans": [ + { + "bbox": [ + 305, + 394, + 545, + 453 + ], + "type": "text", + "content": "We further enhance the effectiveness of the attack, especially for the case when the gradients of both the targeted and non-targeted classes are aligned (have positive correlation). In such instances, our approach involves finding the direction " + }, + { + "bbox": [ + 305, + 394, + 545, + 453 + ], + "type": "inline_equation", + "content": "e" + }, + { + "bbox": [ + 305, + 394, + 545, + 453 + ], + "type": "text", + "content": " using" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 310, + 462, + 545, + 488 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 462, + 545, + 488 + ], + "spans": [ + { + "bbox": [ + 310, + 462, + 545, + 488 + ], + "type": "interline_equation", + "content": "\\min _ {e} e ^ {T} \\left(- \\frac {\\mathbf {g} _ {\\mathbf {x} , \\Psi_ {\\mathbf {x}}}}{\\| \\mathbf {g} _ {\\mathbf {x} , \\Psi_ {\\mathbf {x}}} \\| _ {2}} + \\frac {\\mathbf {g} _ {\\mathbf {x} , \\bar {\\Psi} _ {\\mathbf {x}}}}{\\| \\mathbf {g} _ {\\mathbf {x} , \\bar {\\Psi} _ {\\mathbf {x}}} \\| _ {2}}\\right) \\text {s . t .} \\| e \\| _ {p} \\leq \\epsilon . \\tag {11}", + "image_path": "6e0d99b31a9ab59d128944b49e0c157eeebd1115854232831ff9d9508a4b349d.jpg" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 306, + 496, + 545, + 508 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 496, + 545, + 508 + ], + "spans": [ + { + "bbox": [ + 306, + 496, + 545, + 508 + ], + "type": "text", + "content": "We provide more details and analysis in the supplementary." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 305, + 515, + 534, + 529 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 515, + 534, + 529 + ], + "spans": [ + { + "bbox": [ + 305, + 515, + 534, + 529 + ], + "type": "text", + "content": "4.2. Consistent Target Set via Knowledge Graph" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 304, + 534, + 545, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 534, + 545, + 665 + ], + "spans": [ + { + "bbox": [ + 304, + 534, + 545, + 665 + ], + "type": "text", + "content": "We obtain a consistent target set by developing an efficient search algorithm over a knowledge graph " + }, + { + "bbox": [ + 304, + 534, + 545, + 665 + ], + "type": "inline_equation", + "content": "\\mathcal{G}" + }, + { + "bbox": [ + 304, + 534, + 545, + 665 + ], + "type": "text", + "content": " that encodes label dependencies. Assume " + }, + { + "bbox": [ + 304, + 534, + 545, + 665 + ], + "type": "inline_equation", + "content": "\\mathcal{G} = (\\mathcal{C},\\mathcal{E})" + }, + { + "bbox": [ + 304, + 534, + 545, + 665 + ], + "type": "text", + "content": " is a directed acyclic knowledge graph built on the labels " + }, + { + "bbox": [ + 304, + 534, + 545, + 665 + ], + "type": "inline_equation", + "content": "\\mathcal{C}" + }, + { + "bbox": [ + 304, + 534, + 545, + 665 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 534, + 545, + 665 + ], + "type": "inline_equation", + "content": "\\mathcal{E}" + }, + { + "bbox": [ + 304, + 534, + 545, + 665 + ], + "type": "text", + "content": " denotes the set of edges (see below for details about building this graph). A consistent target set " + }, + { + "bbox": [ + 304, + 534, + 545, + 665 + ], + "type": "inline_equation", + "content": "\\Psi_{x}" + }, + { + "bbox": [ + 304, + 534, + 545, + 665 + ], + "type": "text", + "content": " is defined as a superset of the target nodes/labels " + }, + { + "bbox": [ + 304, + 534, + 545, + 665 + ], + "type": "inline_equation", + "content": "\\Omega_{x}" + }, + { + "bbox": [ + 304, + 534, + 545, + 665 + ], + "type": "text", + "content": " that if attacked successfully leads to MLL outputs so that " + }, + { + "bbox": [ + 304, + 534, + 545, + 665 + ], + "type": "inline_equation", + "content": "i)" + }, + { + "bbox": [ + 304, + 534, + 545, + 665 + ], + "type": "text", + "content": " when MLL predicts 1 for a parent node/label, then at least one of its children is also predicted as 1; " + }, + { + "bbox": [ + 304, + 534, + 545, + 665 + ], + "type": "inline_equation", + "content": "ii)" + }, + { + "bbox": [ + 304, + 534, + 545, + 665 + ], + "type": "text", + "content": " when all children of a node/label are predicted as 0, then the parent is predicted as 0." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 304, + 666, + 545, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 666, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 666, + 545, + 714 + ], + "type": "text", + "content": "Algorithm 1 shows our algorithm and the time complexity for each step to obtain the consistent target set. The algorithm works as follows. Given the target set " + }, + { + "bbox": [ + 304, + 666, + 545, + 714 + ], + "type": "inline_equation", + "content": "\\Omega_{x}" + }, + { + "bbox": [ + 304, + 666, + 545, + 714 + ], + "type": "text", + "content": ", MLL predictions " + }, + { + "bbox": [ + 304, + 666, + 545, + 714 + ], + "type": "inline_equation", + "content": "\\mathcal{S}" + }, + { + "bbox": [ + 304, + 666, + 545, + 714 + ], + "type": "text", + "content": ", and the adjacency matrix " + }, + { + "bbox": [ + 304, + 666, + 545, + 714 + ], + "type": "inline_equation", + "content": "\\mathcal{E}" + }, + { + "bbox": [ + 304, + 666, + 545, + 714 + ], + "type": "text", + "content": " of the knowledge" + } + ] + } + ], + "index": 25 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "24254" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 240 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 240 + ], + "type": "text", + "content": "graph, the algorithm finds the minimal superset of " + }, + { + "bbox": [ + 46, + 72, + 289, + 240 + ], + "type": "inline_equation", + "content": "\\Omega_{\\mathbf{x}}" + }, + { + "bbox": [ + 46, + 72, + 289, + 240 + ], + "type": "text", + "content": " to be modified. While attacking a label, we need to maintain its consistency with respect to its children and parents. To maintain children consistency, each child of the target node must be turned OFF unless that child has multiple parents ON. We parse the path from target node to the leaf nodes and perform the same operation on every node. Similarly, to maintain parents consistency, all parents must be turned OFF unless some parent has more than one child ON. We perform this process for each node along the path from target node to the root until there are no more nodes to modify. The upper bound of algorithm's time complexity is " + }, + { + "bbox": [ + 46, + 72, + 289, + 240 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(\\Omega \\mathcal{C})" + }, + { + "bbox": [ + 46, + 72, + 289, + 240 + ], + "type": "text", + "content": ". As Figure 4 shows, on the same graph, consistent target sets depend on the MLL predictions." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 246, + 289, + 414 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 246, + 289, + 414 + ], + "spans": [ + { + "bbox": [ + 46, + 246, + 289, + 414 + ], + "type": "text", + "content": "Knowledge Graph Construction. To construct " + }, + { + "bbox": [ + 46, + 246, + 289, + 414 + ], + "type": "inline_equation", + "content": "\\mathcal{G}" + }, + { + "bbox": [ + 46, + 246, + 289, + 414 + ], + "type": "text", + "content": ", we use WordNet [54], which contains rich semantic relationships between labels" + }, + { + "bbox": [ + 46, + 246, + 289, + 414 + ], + "type": "inline_equation", + "content": "^2" + }, + { + "bbox": [ + 46, + 246, + 289, + 414 + ], + "type": "text", + "content": ". One can also use other sources, such as ConceptNet [72] or OpenImages semantic hierarchy [40]. We build a tree " + }, + { + "bbox": [ + 46, + 246, + 289, + 414 + ], + "type": "inline_equation", + "content": "\\mathcal{G} = (\\mathcal{C}, \\mathcal{E})" + }, + { + "bbox": [ + 46, + 246, + 289, + 414 + ], + "type": "text", + "content": " on all labels " + }, + { + "bbox": [ + 46, + 246, + 289, + 414 + ], + "type": "inline_equation", + "content": "\\mathcal{C}" + }, + { + "bbox": [ + 46, + 246, + 289, + 414 + ], + "type": "text", + "content": " using hypernym and hyponym relations of labels. This can also be easily extended to other relationships e.g., antonymy, entailment, etc. For each label in " + }, + { + "bbox": [ + 46, + 246, + 289, + 414 + ], + "type": "inline_equation", + "content": "\\mathcal{C}" + }, + { + "bbox": [ + 46, + 246, + 289, + 414 + ], + "type": "text", + "content": ", we use WordNet to extract its parent and child labels (e.g., for 'car', we obtain 'vehicle' as parent using its hybernyms). Since a word can be associated with several synsets, we choose the synset with the closest match to the label description. To build the tree, we use the maximum WUP similarity [80] between a child and multiple parent nodes to select a single parent." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 425, + 128, + 438 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 425, + 128, + 438 + ], + "spans": [ + { + "bbox": [ + 47, + 425, + 128, + 438 + ], + "type": "text", + "content": "5. Experiments" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 445, + 163, + 458 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 445, + 163, + 458 + ], + "spans": [ + { + "bbox": [ + 47, + 445, + 163, + 458 + ], + "type": "text", + "content": "5.1. Experimental Setup" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 463, + 287, + 668 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 463, + 287, + 668 + ], + "spans": [ + { + "bbox": [ + 46, + 463, + 287, + 668 + ], + "type": "text", + "content": "Datasets. We use Pascal-VOC [24], NUS-WIDE [16] and OpenImages [40] for studying the effectiveness of multi-label attacks. For Pascal-VOC, we trained each MLL model on 8,000 images from the training sets of PASCAL-VOC 2007 and PASCAL-VOC 2012 and created the adversarial examples for the test set of PASCAL-VOC 2007. To build " + }, + { + "bbox": [ + 46, + 463, + 287, + 668 + ], + "type": "inline_equation", + "content": "\\mathcal{G}" + }, + { + "bbox": [ + 46, + 463, + 287, + 668 + ], + "type": "text", + "content": ", we extracted abstract classes from WordNet using which and the original 20 labels, we obtained 35 labels/nodes. For NUS-WIDE, we trained each MLL model on 150K images from the training set and attacked the models using the test set of the dataset. We used Wordnet to extract abstract classes and built a tree on labels. The total number of labels are 116, which includes 80 original labels and 36 additional abstract classes from WordNet. For OpenImages, we used pre-trained model from [64] and used test images to generate the attacks. We use the official class hierarchy provided in OpenImages as semantic relationship information." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 673, + 286, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 673, + 286, + 685 + ], + "spans": [ + { + "bbox": [ + 47, + 673, + 286, + 685 + ], + "type": "text", + "content": "Multi-Label Recognition Models. We investigate the ef" + } + ] + } + ], + "index": 5 + }, + { + "type": "code", + "bbox": [ + 306, + 88, + 545, + 344 + ], + "blocks": [ + { + "bbox": [ + 312, + 75, + 512, + 87 + ], + "lines": [ + { + "bbox": [ + 312, + 75, + 512, + 87 + ], + "spans": [ + { + "bbox": [ + 312, + 75, + 512, + 87 + ], + "type": "text", + "content": "Algorithm 1: Consistent Target Set Construction" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "lines": [ + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "spans": [ + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "text", + "content": "Input: " + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "inline_equation", + "content": "\\Omega" + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "text", + "content": " : Target Set, " + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "text", + "content": " : MLL Label Predictions, " + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "inline_equation", + "content": "\\mathcal{E}" + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "text", + "content": " : Knowledge Graph's Adjacency Matrix Output: " + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "text", + "content": " Expanded Target Set Procedure: " + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "inline_equation", + "content": "f_{select}(X)" + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "text", + "content": " : return " + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "inline_equation", + "content": "\\{i:X_i = True\\}" + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "text", + "content": " Procedure " + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "inline_equation", + "content": "f_{child.}(n,\\mathcal{E},\\mathcal{S})" + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "text", + "content": " .. return " + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "inline_equation", + "content": "f_{select}(\\mathcal{E}_{[n,:]}\\odot \\mathcal{S} == 1)" + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "text", + "content": " Procedure " + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "inline_equation", + "content": "f_{par.}(n,\\mathcal{E},\\mathcal{S})" + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "text", + "content": " .. return " + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "inline_equation", + "content": "f_{select}(\\mathcal{E}_{[:n]}\\odot \\mathcal{S} == 1)" + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "text", + "content": " \nProcedure Consistent_Comp " + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "inline_equation", + "content": "(n,V,\\Gamma ,f_1,f_2)" + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "text", + "content": " . Queue Q I " + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "inline_equation", + "content": "\\leftarrow f_1(n,\\mathcal{E},\\mathcal{S})" + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "inline_equation", + "content": "\\triangleright \\mathcal{O}(1)" + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "text", + "content": " Q.enqueue(I) " + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "inline_equation", + "content": "\\triangleright \\mathcal{O}(1)" + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "text", + "content": " while " + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "inline_equation", + "content": "\\mathcal{Q}" + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "text", + "content": " is not empty do " + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "inline_equation", + "content": "\\triangleright \\mathcal{O}(\\mathcal{C})" + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "inline_equation", + "content": "v_{n} = \\mathcal{Q}.dequeue()" + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "text", + "content": " if " + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "inline_equation", + "content": "v_{n}\\notin \\mathcal{V}" + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "text", + "content": " then " + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "inline_equation", + "content": "\\nu \\gets \\nu \\cup \\{v_n\\}" + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "inline_equation", + "content": "\\triangleright \\mathcal{O}(1)" + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "inline_equation", + "content": "I\\gets f_2(v_n,\\mathcal{E},\\mathcal{S})\\backslash \\Gamma" + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "text", + "content": " if " + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "inline_equation", + "content": "|I| < 2" + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "text", + "content": " then " + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "inline_equation", + "content": "\\Gamma \\leftarrow \\Gamma \\cup \\{v_n\\}" + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "inline_equation", + "content": "\\triangleright \\mathcal{O}(1)" + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "inline_equation", + "content": "I\\gets f_1(v_n,\\mathcal{E},\\mathcal{S})" + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "text", + "content": " Q.enqueue(I) \n" + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "inline_equation", + "content": "\\Gamma = \\{\\}" + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "text", + "content": " \nforeach " + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "inline_equation", + "content": "n\\in \\Omega" + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "text", + "content": " do " + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "inline_equation", + "content": "\\triangleright \\mathcal{O}(\\Omega)" + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "text", + "content": " V = {n} \n" + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "inline_equation", + "content": "\\Gamma \\leftarrow" + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "text", + "content": " Consistent_Comp(n,V,Γ,fchild., " + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "inline_equation", + "content": "f_{par.})" + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "inline_equation", + "content": "\\triangleright \\mathcal{O}(\\mathcal{C})" + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "inline_equation", + "content": "\\Gamma \\leftarrow" + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "text", + "content": " Consistent_Comp(n,V,Γ,fpar., " + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "inline_equation", + "content": "f_{child.})" + }, + { + "bbox": [ + 306, + 88, + 545, + 344 + ], + "type": "inline_equation", + "content": "\\triangleright \\mathcal{O}(\\mathcal{C})" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "code_body" + } + ], + "index": 8, + "sub_type": "algorithm" + }, + { + "type": "image", + "bbox": [ + 313, + 356, + 538, + 427 + ], + "blocks": [ + { + "bbox": [ + 313, + 356, + 538, + 427 + ], + "lines": [ + { + "bbox": [ + 313, + 356, + 538, + 427 + ], + "spans": [ + { + "bbox": [ + 313, + 356, + 538, + 427 + ], + "type": "image", + "image_path": "493e62d6104bbb3d867c5856ea4fe00074275e03170d6275985f5b07d42b3833.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 431, + 545, + 471 + ], + "lines": [ + { + "bbox": [ + 305, + 431, + 545, + 471 + ], + "spans": [ + { + "bbox": [ + 305, + 431, + 545, + 471 + ], + "type": "text", + "content": "Figure 4. Examples of different consistent target sets obtained by Algorithm 1. Green nodes show the present labels predicted by the MLL and " + }, + { + "bbox": [ + 305, + 431, + 545, + 471 + ], + "type": "inline_equation", + "content": "\\Omega = \\{t\\}" + }, + { + "bbox": [ + 305, + 431, + 545, + 471 + ], + "type": "text", + "content": " is the target. The labels to be modified, " + }, + { + "bbox": [ + 305, + 431, + 545, + 471 + ], + "type": "inline_equation", + "content": "\\Psi" + }, + { + "bbox": [ + 305, + 431, + 545, + 471 + ], + "type": "text", + "content": " are shown within the red region and the labels to be fixed " + }, + { + "bbox": [ + 305, + 431, + 545, + 471 + ], + "type": "inline_equation", + "content": "\\bar{\\Psi}" + }, + { + "bbox": [ + 305, + 431, + 545, + 471 + ], + "type": "text", + "content": " are shown within the green region." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 484, + 533, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 484, + 533, + 496 + ], + "spans": [ + { + "bbox": [ + 305, + 484, + 533, + 496 + ], + "type": "text", + "content": "fectiveness of multi-label attacks on three MLL models." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 499, + 545, + 685 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 304, + 499, + 545, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 499, + 545, + 571 + ], + "spans": [ + { + "bbox": [ + 304, + 499, + 545, + 571 + ], + "type": "text", + "content": "- ML-GCN [15]: It explicitly learns relationships among labels using Graph Convolutional Networks (GCN). It builds a graph using the word embeddings and the cooccurrence matrix of labels and uses a GCN to extract information about label relationships. We trained the model using the binary cross-entropy loss." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 573, + 545, + 633 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 573, + 545, + 633 + ], + "spans": [ + { + "bbox": [ + 304, + 573, + 545, + 633 + ], + "type": "text", + "content": "- Asymmetric Loss (ASL) [64]: It is an effective multi-label learning method that uses a novel loss for better optimization over highly imbalanced positive and negative class distributions. Following their experimental setting, we trained the TResNet-L [63] backbone." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 636, + 545, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 636, + 545, + 685 + ], + "spans": [ + { + "bbox": [ + 304, + 636, + 545, + 685 + ], + "type": "text", + "content": "- ML-Decoder [65]: It is an attention-based unified decoder architecture for zero-shot, single-label, and multi-label classification. It uses a group-decoding scheme to alleviate the problem of scaling to large number of classes." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 689, + 545, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 689, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 306, + 689, + 545, + 714 + ], + "type": "text", + "content": "Perturbation Generation. For PASCAL-VOC and NUS-WIDE, we show results on a range of perturbation budgets." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 693, + 287, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 693, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 693, + 287, + 713 + ], + "type": "text", + "content": "2WordNet is a lexical database for the English language, containing 155,327 words organized in 175,979 synsets." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "24255" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 72, + 71, + 182, + 168 + ], + "blocks": [ + { + "bbox": [ + 72, + 71, + 182, + 168 + ], + "lines": [ + { + "bbox": [ + 72, + 71, + 182, + 168 + ], + "spans": [ + { + "bbox": [ + 72, + 71, + 182, + 168 + ], + "type": "image", + "image_path": "a766f3d13113a029f1ff315e8ec21514cfe74780e4f57afb984469b3a7ae8182.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 171, + 547, + 190 + ], + "lines": [ + { + "bbox": [ + 46, + 171, + 547, + 190 + ], + "spans": [ + { + "bbox": [ + 46, + 171, + 547, + 190 + ], + "type": "text", + "content": "Figure 5. Naive fooling rate " + }, + { + "bbox": [ + 46, + 171, + 547, + 190 + ], + "type": "inline_equation", + "content": "(\\mathrm{FR}_N)" + }, + { + "bbox": [ + 46, + 171, + 547, + 190 + ], + "type": "text", + "content": " and graph-based fooling rate " + }, + { + "bbox": [ + 46, + 171, + 547, + 190 + ], + "type": "inline_equation", + "content": "(\\mathrm{FR}_S)" + }, + { + "bbox": [ + 46, + 171, + 547, + 190 + ], + "type": "text", + "content": " of different attacks on ML-GCN model, trained on PASCAL-VOC for one and two label/node attacks. The x-axis shows the upper bound on the " + }, + { + "bbox": [ + 46, + 171, + 547, + 190 + ], + "type": "inline_equation", + "content": "l_{\\infty}" + }, + { + "bbox": [ + 46, + 171, + 547, + 190 + ], + "type": "text", + "content": "-norm of perturbations " + }, + { + "bbox": [ + 46, + 171, + 547, + 190 + ], + "type": "inline_equation", + "content": "(\\epsilon)" + }, + { + "bbox": [ + 46, + 171, + 547, + 190 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 185, + 72, + 294, + 168 + ], + "blocks": [ + { + "bbox": [ + 185, + 72, + 294, + 168 + ], + "lines": [ + { + "bbox": [ + 185, + 72, + 294, + 168 + ], + "spans": [ + { + "bbox": [ + 185, + 72, + 294, + 168 + ], + "type": "image", + "image_path": "e1d49b422ba75be5eb73d3a2b25c250a34ba77532aea66c9e6c041fe11b9a9ca.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 298, + 72, + 406, + 168 + ], + "blocks": [ + { + "bbox": [ + 298, + 72, + 406, + 168 + ], + "lines": [ + { + "bbox": [ + 298, + 72, + 406, + 168 + ], + "spans": [ + { + "bbox": [ + 298, + 72, + 406, + 168 + ], + "type": "image", + "image_path": "1f9e5078065947deb5521b92330279779a76f8db0c34f4b06724f4fe3c6ab520.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 409, + 72, + 518, + 168 + ], + "blocks": [ + { + "bbox": [ + 409, + 72, + 518, + 168 + ], + "lines": [ + { + "bbox": [ + 409, + 72, + 518, + 168 + ], + "spans": [ + { + "bbox": [ + 409, + 72, + 518, + 168 + ], + "type": "image", + "image_path": "b7eff2bc4297de6a49667ff653d328d72bcfb2ee6f694a4fd1e7e731acacb9a4.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 52, + 201, + 544, + 365 + ], + "blocks": [ + { + "bbox": [ + 52, + 201, + 544, + 365 + ], + "lines": [ + { + "bbox": [ + 52, + 201, + 544, + 365 + ], + "spans": [ + { + "bbox": [ + 52, + 201, + 544, + 365 + ], + "type": "table", + "html": "
DatasetPASCAL-VOCNUS-WIDE
Target Set Size|Ω| = 1|Ω| = 2|Ω| = 1|Ω| = 2
ModelAttack↑FRN↑FRS↓NTR↑SSIM↑FRN↑FRS↓NTR↑SSIM↑FRN↑FRS↓NTR↑SSIM↑FRN↑FRS↓NTR↑SSIM
ML-GEN [15]MLA-U [71]100.075.54.90.97100.068.04.50.9799.743.51.50.9699.331.71.60.96
MLA-C [71]99.968.93.00.9699.860.22.80.9796.427.40.40.9792.418.50.40.97
MLA-LP [65]56.16.700.10.9946.76.000.30.9919.33.500.10.9811.43.300.00.98
GMLA (Ours)100.099.42.70.97100.098.42.50.9899.295.80.50.9799.191.30.40.97
ASL [64]MLA-U [71]100.052.84.60.97100.048.34.80.98100.050.02.00.97100.043.32.10.97
MLA-C [71]100.039.72.30.9799.733.22.10.98100.035.50.70.97100.030.00.70.96
MLA-LP [65]15.82.400.10.9911.92.900.50.9920.84.800.00.9816.13.100.00.98
GMLA (Ours)100.098.82.20.97100.098.82.00.98100.096.10.80.97100.093.20.70.97
ML-Dec [65]MLA-U [71]99.766.25.30.9799.862.05.70.9898.856.44.10.9797.950.44.60.98
MLA-C [71]99.150.62.70.9897.540.72.40.9773.630.41.00.9768.226.70.90.97
MLA-LP [65]19.43.700.10.9817.63.200.20.9813.34.100.00.979.72.900.00.98
GMLA (Ours)99.196.22.70.9899.397.12.50.9795.184.91.10.9793.982.01.00.98
", + "image_path": "aa60567be53d1241dba89384f11135bed19b7234b00317ff37d72e4439351929.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 368, + 546, + 388 + ], + "lines": [ + { + "bbox": [ + 46, + 368, + 546, + 388 + ], + "spans": [ + { + "bbox": [ + 46, + 368, + 546, + 388 + ], + "type": "text", + "content": "Table 1. Experimental evaluation of the four attack methods on three models for " + }, + { + "bbox": [ + 46, + 368, + 546, + 388 + ], + "type": "inline_equation", + "content": "\\epsilon = 0.01" + }, + { + "bbox": [ + 46, + 368, + 546, + 388 + ], + "type": "text", + "content": ". The values represent the mean computed using the attack performance across all the combinations of target classes of size " + }, + { + "bbox": [ + 46, + 368, + 546, + 388 + ], + "type": "inline_equation", + "content": "|\\Omega|" + }, + { + "bbox": [ + 46, + 368, + 546, + 388 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 46, + 409, + 287, + 493 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 409, + 287, + 493 + ], + "spans": [ + { + "bbox": [ + 46, + 409, + 287, + 493 + ], + "type": "text", + "content": "For OpenImages with 9,600 labels, we perform experiments for large-scale attacks with different sizes of the target set for a fixed epsilon value. To generate the target sets for attack, we randomly draw 100 samples of size " + }, + { + "bbox": [ + 46, + 409, + 287, + 493 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 46, + 409, + 287, + 493 + ], + "type": "text", + "content": " labels. For each draw from OpenImages, we randomly sample " + }, + { + "bbox": [ + 46, + 409, + 287, + 493 + ], + "type": "inline_equation", + "content": "k / 2" + }, + { + "bbox": [ + 46, + 409, + 287, + 493 + ], + "type": "text", + "content": " leaf nodes (labels) from the graph " + }, + { + "bbox": [ + 46, + 409, + 287, + 493 + ], + "type": "inline_equation", + "content": "\\mathcal{G}" + }, + { + "bbox": [ + 46, + 409, + 287, + 493 + ], + "type": "text", + "content": " and sample the remaining labels which are not part of the graph." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 501, + 287, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 501, + 287, + 658 + ], + "spans": [ + { + "bbox": [ + 46, + 501, + 287, + 658 + ], + "type": "text", + "content": "Baselines. We use MLA-U and MLA-C as baselines, following Song et al. [71]. Additionally, we use MLA-LP [65] as a baseline, which generates adversarial perturbation for multi-label recognition by solving a linear programming problem using the interior point method while minimizing the " + }, + { + "bbox": [ + 46, + 501, + 287, + 658 + ], + "type": "inline_equation", + "content": "l_{\\infty}" + }, + { + "bbox": [ + 46, + 501, + 287, + 658 + ], + "type": "text", + "content": " norm. In contrast to other methods, it requires computing the Jacobian at each optimization step. In our experiments, MLA-LP did not converge for OpenImages. To provide a comprehensive comparison, we extend our evaluation to ML-DP [71], a greedy algorithm that computes multi-label attack perturbations using constraint linearization as introduced in DeepFool [55]. We show the results for ML-DP in supplementary material." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 665, + 287, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 665, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 665, + 287, + 715 + ], + "type": "text", + "content": "Evaluation Metrics. Let " + }, + { + "bbox": [ + 46, + 665, + 287, + 715 + ], + "type": "inline_equation", + "content": "\\mathcal{I}" + }, + { + "bbox": [ + 46, + 665, + 287, + 715 + ], + "type": "text", + "content": " be the set of images that are attacked and " + }, + { + "bbox": [ + 46, + 665, + 287, + 715 + ], + "type": "inline_equation", + "content": "\\mathcal{A} \\subseteq \\mathcal{I}" + }, + { + "bbox": [ + 46, + 665, + 287, + 715 + ], + "type": "text", + "content": " denote the set of images that are successfully attacked, i.e., for " + }, + { + "bbox": [ + 46, + 665, + 287, + 715 + ], + "type": "inline_equation", + "content": "x \\in \\mathcal{A}" + }, + { + "bbox": [ + 46, + 665, + 287, + 715 + ], + "type": "text", + "content": ", all labels in " + }, + { + "bbox": [ + 46, + 665, + 287, + 715 + ], + "type": "inline_equation", + "content": "\\Omega_x" + }, + { + "bbox": [ + 46, + 665, + 287, + 715 + ], + "type": "text", + "content": " change after the attack. Let " + }, + { + "bbox": [ + 46, + 665, + 287, + 715 + ], + "type": "inline_equation", + "content": "\\mathcal{A}_{\\mathcal{G}} \\subseteq \\mathcal{A}" + }, + { + "bbox": [ + 46, + 665, + 287, + 715 + ], + "type": "text", + "content": " denote the subset of " + }, + { + "bbox": [ + 46, + 665, + 287, + 715 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 46, + 665, + 287, + 715 + ], + "type": "text", + "content": " for" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 409, + 544, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 409, + 544, + 433 + ], + "spans": [ + { + "bbox": [ + 304, + 409, + 544, + 433 + ], + "type": "text", + "content": "which the attack produces semantically consistent predictions in the output of MLL according to " + }, + { + "bbox": [ + 304, + 409, + 544, + 433 + ], + "type": "inline_equation", + "content": "\\mathcal{G}" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 434, + 545, + 457 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 434, + 545, + 457 + ], + "spans": [ + { + "bbox": [ + 304, + 434, + 545, + 457 + ], + "type": "text", + "content": "We define naive fooling rate, " + }, + { + "bbox": [ + 304, + 434, + 545, + 457 + ], + "type": "inline_equation", + "content": "FR_{N}" + }, + { + "bbox": [ + 304, + 434, + 545, + 457 + ], + "type": "text", + "content": " and semantic-based fooling rate, " + }, + { + "bbox": [ + 304, + 434, + 545, + 457 + ], + "type": "inline_equation", + "content": "FR_{S}" + }, + { + "bbox": [ + 304, + 434, + 545, + 457 + ], + "type": "text", + "content": ", as" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 368, + 462, + 545, + 486 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 368, + 462, + 545, + 486 + ], + "spans": [ + { + "bbox": [ + 368, + 462, + 545, + 486 + ], + "type": "interline_equation", + "content": "F R _ {N} = \\frac {| \\mathcal {A} |}{| \\mathcal {I} |}, F R _ {S} = \\frac {| \\mathcal {A} _ {\\mathcal {G}} |}{| \\mathcal {I} |}. \\tag {12}", + "image_path": "7242d160601aee822f7baa968b1e72eafad10fff44f15037e12f3321caab7f0c.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 491, + 545, + 587 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 491, + 545, + 587 + ], + "spans": [ + { + "bbox": [ + 304, + 491, + 545, + 587 + ], + "type": "text", + "content": "Thus, " + }, + { + "bbox": [ + 304, + 491, + 545, + 587 + ], + "type": "inline_equation", + "content": "FR_{N}" + }, + { + "bbox": [ + 304, + 491, + 545, + 587 + ], + "type": "text", + "content": " measures fraction of attacked images whose attacks has been successful, without considering whether the MLL predictions are semantically consistent. On the other hand, " + }, + { + "bbox": [ + 304, + 491, + 545, + 587 + ], + "type": "inline_equation", + "content": "FR_{S}" + }, + { + "bbox": [ + 304, + 491, + 545, + 587 + ], + "type": "text", + "content": " captures fraction of attacked images whose attacks have been successful and produced semantically consistent MLL predictions. We also define non-target flip rate, " + }, + { + "bbox": [ + 304, + 491, + 545, + 587 + ], + "type": "inline_equation", + "content": "NT_{R}" + }, + { + "bbox": [ + 304, + 491, + 545, + 587 + ], + "type": "text", + "content": ", which is the percentage of semantically unrelated labels (labels in " + }, + { + "bbox": [ + 304, + 491, + 545, + 587 + ], + "type": "inline_equation", + "content": "\\bar{\\Psi}_{k}" + }, + { + "bbox": [ + 304, + 491, + 545, + 587 + ], + "type": "text", + "content": ") which were flipped by the attack, i.e.," + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 339, + 594, + 545, + 625 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 339, + 594, + 545, + 625 + ], + "spans": [ + { + "bbox": [ + 339, + 594, + 545, + 625 + ], + "type": "interline_equation", + "content": "N T _ {R} = \\frac {1}{| \\mathcal {A} |} \\sum_ {k \\in \\mathcal {A}} \\frac {\\sum_ {i \\in \\bar {\\Psi} _ {k}} \\left(1 - \\delta \\left(f _ {i} ^ {(k)} , y _ {i} ^ {(k)}\\right)\\right)}{| \\bar {\\Psi} _ {k} |}, \\tag {13}", + "image_path": "6db977dfe4ef70d8bce0dcfec4bee3358b912f2dab11527214bc11fc9dd110fc.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 628, + 545, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 628, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 628, + 545, + 714 + ], + "type": "text", + "content": "where, " + }, + { + "bbox": [ + 304, + 628, + 545, + 714 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 304, + 628, + 545, + 714 + ], + "type": "text", + "content": " is Kronecker delta function that equals 1 when the two inputs are equal and 0 otherwise, " + }, + { + "bbox": [ + 304, + 628, + 545, + 714 + ], + "type": "inline_equation", + "content": "y_{i}^{(k)},f_{i}^{(k)}\\in \\{0,1\\}" + }, + { + "bbox": [ + 304, + 628, + 545, + 714 + ], + "type": "text", + "content": " are the model predictions on clean and adversarial images respectively, of " + }, + { + "bbox": [ + 304, + 628, + 545, + 714 + ], + "type": "inline_equation", + "content": "i^{th}" + }, + { + "bbox": [ + 304, + 628, + 545, + 714 + ], + "type": "text", + "content": " non-target class of " + }, + { + "bbox": [ + 304, + 628, + 545, + 714 + ], + "type": "inline_equation", + "content": "k^{th}" + }, + { + "bbox": [ + 304, + 628, + 545, + 714 + ], + "type": "text", + "content": " successfully attacked image. Finally, we measure the imperceptibility of the perturbations using average structural similarity (SSIM) between pairs of original and adversarial images." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "24256" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 52, + 72, + 165, + 181 + ], + "blocks": [ + { + "bbox": [ + 52, + 72, + 165, + 181 + ], + "lines": [ + { + "bbox": [ + 52, + 72, + 165, + 181 + ], + "spans": [ + { + "bbox": [ + 52, + 72, + 165, + 181 + ], + "type": "image", + "image_path": "32ef6eeb8e4b544a25824f5f8631165aa5bc7b21672169b575d3abea9dde8c17.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 47, + 184, + 286, + 205 + ], + "lines": [ + { + "bbox": [ + 47, + 184, + 286, + 205 + ], + "spans": [ + { + "bbox": [ + 47, + 184, + 286, + 205 + ], + "type": "text", + "content": "Figure 6. Performance of different multi-label attacks with fixed " + }, + { + "bbox": [ + 47, + 184, + 286, + 205 + ], + "type": "inline_equation", + "content": "\\epsilon = 0.05" + }, + { + "bbox": [ + 47, + 184, + 286, + 205 + ], + "type": "text", + "content": " on OpenImages as we increase the target set size." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 168, + 72, + 279, + 181 + ], + "blocks": [ + { + "bbox": [ + 168, + 72, + 279, + 181 + ], + "lines": [ + { + "bbox": [ + 168, + 72, + 279, + 181 + ], + "spans": [ + { + "bbox": [ + 168, + 72, + 279, + 181 + ], + "type": "image", + "image_path": "6dd053c946d34acba1843bbb6779842a7f3b9d734715ca6194ca6bb8c7faadfe.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 225, + 286, + 250 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 225, + 286, + 250 + ], + "spans": [ + { + "bbox": [ + 47, + 225, + 286, + 250 + ], + "type": "text", + "content": "Note that " + }, + { + "bbox": [ + 47, + 225, + 286, + 250 + ], + "type": "inline_equation", + "content": "FR_{N}, FR_{S}" + }, + { + "bbox": [ + 47, + 225, + 286, + 250 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 47, + 225, + 286, + 250 + ], + "type": "inline_equation", + "content": "SSIM" + }, + { + "bbox": [ + 47, + 225, + 286, + 250 + ], + "type": "text", + "content": " should be high while " + }, + { + "bbox": [ + 47, + 225, + 286, + 250 + ], + "type": "inline_equation", + "content": "NT_{R}" + }, + { + "bbox": [ + 47, + 225, + 286, + 250 + ], + "type": "text", + "content": " should be low for a good attack method." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 258, + 170, + 270 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 258, + 170, + 270 + ], + "spans": [ + { + "bbox": [ + 47, + 258, + 170, + 270 + ], + "type": "text", + "content": "5.2. Experimental Results" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 277, + 286, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 277, + 286, + 373 + ], + "spans": [ + { + "bbox": [ + 46, + 277, + 286, + 373 + ], + "type": "text", + "content": "Figure 5 shows the performance of different attack methods on PASCAL-VOC for one- and two-node attacks for different epsilon values using ML-GCN classifier. In Table 1, we show the evaluation across the three MLL models for a fixed " + }, + { + "bbox": [ + 46, + 277, + 286, + 373 + ], + "type": "inline_equation", + "content": "\\epsilon = 0.01" + }, + { + "bbox": [ + 46, + 277, + 286, + 373 + ], + "type": "text", + "content": " for which the performance of all attacks has plateaued3. We also show the evaluation on OpenImages for different target sizes in Fig. 6 and Tab. 2. From the results, we make the following conclusions:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 376, + 286, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 376, + 286, + 483 + ], + "spans": [ + { + "bbox": [ + 46, + 376, + 286, + 483 + ], + "type": "text", + "content": "- As Fig. 5 shows, all methods achieve high naive fooling rate " + }, + { + "bbox": [ + 46, + 376, + 286, + 483 + ], + "type": "inline_equation", + "content": "FR_{N}" + }, + { + "bbox": [ + 46, + 376, + 286, + 483 + ], + "type": "text", + "content": " given large enough perturbation budget, yet once we filter out the attacks leading to semantically inconsistent predictions, the performance " + }, + { + "bbox": [ + 46, + 376, + 286, + 483 + ], + "type": "inline_equation", + "content": "(FR_{S})" + }, + { + "bbox": [ + 46, + 376, + 286, + 483 + ], + "type": "text", + "content": " of all baselines significantly decreases. However, our GMLA achieves very high semantic-based fooling rate than baselines. From Tab. 1 and 2, our method achieves naive fooling rate " + }, + { + "bbox": [ + 46, + 376, + 286, + 483 + ], + "type": "inline_equation", + "content": "FR_{N}" + }, + { + "bbox": [ + 46, + 376, + 286, + 483 + ], + "type": "text", + "content": " comparable to the other methods but outperforms them over " + }, + { + "bbox": [ + 46, + 376, + 286, + 483 + ], + "type": "inline_equation", + "content": "FR_{S}" + }, + { + "bbox": [ + 46, + 376, + 286, + 483 + ], + "type": "text", + "content": " by a significant margin." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 487, + 286, + 631 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 487, + 286, + 631 + ], + "spans": [ + { + "bbox": [ + 46, + 487, + 286, + 631 + ], + "type": "text", + "content": "- Notice from Fig. 5 and 6 that MLA-U has higher naive and semantic-based fooling rates than MLA-C. The reason is the strong positive correlations learned among related cooccurring labels during model training, which MLA-U implicitly exploits. However, MLA-U being oblivious to the relationships among labels can inevitably affect unrelated labels, as shown in Tab. 1 and 2. This explains why MLA-U has the highest " + }, + { + "bbox": [ + 46, + 487, + 286, + 631 + ], + "type": "inline_equation", + "content": "N T_{R}" + }, + { + "bbox": [ + 46, + 487, + 286, + 631 + ], + "type": "text", + "content": " across different settings. The difference becomes more apparent as we move to attack larger datasets e.g. OpenImages. This is because, a larger number of labels increases the chances of learning spurious correlations among unrelated labels." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 634, + 286, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 634, + 286, + 693 + ], + "spans": [ + { + "bbox": [ + 46, + 634, + 286, + 693 + ], + "type": "text", + "content": "- Based on Fig. 5, MLA-LP achieves lowest performance compared to other attack methods for both fooling rates on PASCAL-VOC and NUS-WIDE datasets, and does not converge for OpenImages experiments. This is because MLA-LP uses interior point method at each iteration to solve a" + } + ] + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 309, + 71, + 541, + 114 + ], + "blocks": [ + { + "bbox": [ + 309, + 71, + 541, + 114 + ], + "lines": [ + { + "bbox": [ + 309, + 71, + 541, + 114 + ], + "spans": [ + { + "bbox": [ + 309, + 71, + 541, + 114 + ], + "type": "table", + "html": "
Attack|Ω|=1|Ω|=2|Ω|=3|Ω|=4|Ω|=5
MLA-U0.47 ± 0.020.57 ± 0.030.66 ± 0.030.75 ± 0.040.87 ± 0.03
MLA-C0.32 ± 0.090.31 ± 0.090.09 ± 0.070.06 ± 0.040.0 ± 0.0
GMLA (Ours)0.32 ± 0.140.16 ± 0.120.21 ± 0.130.11 ± 0.070.06 ± 0.04
", + "image_path": "d0423635a6e5826d15830a97174e14d50ccda97393cd19844b098cb255bb2056.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 118, + 545, + 138 + ], + "lines": [ + { + "bbox": [ + 306, + 118, + 545, + 138 + ], + "spans": [ + { + "bbox": [ + 306, + 118, + 545, + 138 + ], + "type": "text", + "content": "Table 2. Percentage of semantically unrelated labels " + }, + { + "bbox": [ + 306, + 118, + 545, + 138 + ], + "type": "inline_equation", + "content": "(NT_R)" + }, + { + "bbox": [ + 306, + 118, + 545, + 138 + ], + "type": "text", + "content": " affected at " + }, + { + "bbox": [ + 306, + 118, + 545, + 138 + ], + "type": "inline_equation", + "content": "\\epsilon = 0.05" + }, + { + "bbox": [ + 306, + 118, + 545, + 138 + ], + "type": "text", + "content": " for ASL[64] on OpenImages." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 310, + 152, + 539, + 277 + ], + "blocks": [ + { + "bbox": [ + 310, + 152, + 539, + 277 + ], + "lines": [ + { + "bbox": [ + 310, + 152, + 539, + 277 + ], + "spans": [ + { + "bbox": [ + 310, + 152, + 539, + 277 + ], + "type": "image", + "image_path": "6c9bf462a4ff3740658d5cf5d127a6069bae81a6e3fab69cd1b1a2f29d732a36.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 280, + 545, + 310 + ], + "lines": [ + { + "bbox": [ + 305, + 280, + 545, + 310 + ], + "spans": [ + { + "bbox": [ + 305, + 280, + 545, + 310 + ], + "type": "text", + "content": "Figure 7. Transferability across models on PASCAL-VOC. The y-axis shows the source model which generates the perturbation and x-axis shows the target model evaluated on that perturbation." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 327, + 545, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 327, + 545, + 445 + ], + "spans": [ + { + "bbox": [ + 304, + 327, + 545, + 445 + ], + "type": "text", + "content": "system of equations, which define the constraints on the target and non-target labels. Because of the complex relationships among different labels, the feasible region for the given linear problem might be empty. This has also been identified by [96]. When the LP problem has a feasible solution, MLA-LP successfully finds the perturbation that satisfy the attack constraints. This explains why, for the small number of successfully attacked images, MLA-LP affects the least percentage of non-targeted labels, achieving low " + }, + { + "bbox": [ + 304, + 327, + 545, + 445 + ], + "type": "inline_equation", + "content": "\\mathrm{NT}_R" + }, + { + "bbox": [ + 304, + 327, + 545, + 445 + ], + "type": "text", + "content": " as shown in Tab. 1." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 450, + 545, + 522 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 450, + 545, + 522 + ], + "spans": [ + { + "bbox": [ + 304, + 450, + 545, + 522 + ], + "type": "text", + "content": "- Each attack method produces imperceptible perturbations, as we constrain the maximum infinity norm of the perturbation to 0.01 (on images with pixel values between 0 to 1). Notice also from Table 1 that the average SSIM scores between the adversarial and original images is very close to 1, showing imperceptibility of perturbations." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 526, + 545, + 681 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 526, + 545, + 681 + ], + "spans": [ + { + "bbox": [ + 304, + 526, + 545, + 681 + ], + "type": "text", + "content": "- Notice from Fig. 6 that MLA-C fails to successfully attack large-scale datasets and its performance drops drastically as we increase the target set size. As mentioned earlier, this is attributed to the observation that gradients of target and non-targeted classes are often opposite (as shown in Fig. 8) and as MLA-C optimizes the target and non-target loss simultaneously, the resulting perturbations are sub-optimal. From Tab. 2, MLA-C achieves lowest " + }, + { + "bbox": [ + 304, + 526, + 545, + 681 + ], + "type": "inline_equation", + "content": "\\mathrm{NT}_R" + }, + { + "bbox": [ + 304, + 526, + 545, + 681 + ], + "type": "text", + "content": " for target sizes greater than 2 but also performs poorly on fooling rates. Note that despite achieving high fooling rates " + }, + { + "bbox": [ + 304, + 526, + 545, + 681 + ], + "type": "inline_equation", + "content": "\\mathrm{FR}_N" + }, + { + "bbox": [ + 304, + 526, + 545, + 681 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 526, + 545, + 681 + ], + "type": "inline_equation", + "content": "\\mathrm{FR}_G" + }, + { + "bbox": [ + 304, + 526, + 545, + 681 + ], + "type": "text", + "content": ", our GMLA method affects very small percentage of semantically unrelated labels, which shows the success of our constraint proposed in (6)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "text", + "content": "Attack Transferability. Figure 7 shows the cross-model transferability of different attacks. For each source model," + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 58, + 702, + 285, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 702, + 285, + 713 + ], + "spans": [ + { + "bbox": [ + 58, + 702, + 285, + 713 + ], + "type": "text", + "content": "3We show results of ablation experiment on GMLA in supplementary." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "24257" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 52, + 69, + 164, + 151 + ], + "blocks": [ + { + "bbox": [ + 52, + 69, + 164, + 151 + ], + "lines": [ + { + "bbox": [ + 52, + 69, + 164, + 151 + ], + "spans": [ + { + "bbox": [ + 52, + 69, + 164, + 151 + ], + "type": "image", + "image_path": "cf4f16901dbae9f6a2bf53647b471bfe5733bb9aa4dc2b1fd0440a56da6f002d.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 47, + 155, + 287, + 204 + ], + "lines": [ + { + "bbox": [ + 47, + 155, + 287, + 204 + ], + "spans": [ + { + "bbox": [ + 47, + 155, + 287, + 204 + ], + "type": "text", + "content": "Figure 8. Stacked bar charts showing the correlation between the gradient of the loss on target labels " + }, + { + "bbox": [ + 47, + 155, + 287, + 204 + ], + "type": "inline_equation", + "content": "g_{\\boldsymbol{x}, \\Psi_{\\boldsymbol{x}}}" + }, + { + "bbox": [ + 47, + 155, + 287, + 204 + ], + "type": "text", + "content": " and on other labels " + }, + { + "bbox": [ + 47, + 155, + 287, + 204 + ], + "type": "inline_equation", + "content": "g_{\\boldsymbol{x}, \\Psi_{\\boldsymbol{x}}}" + }, + { + "bbox": [ + 47, + 155, + 287, + 204 + ], + "type": "text", + "content": " for different sizes of the target set on OpenImages. Left: Using (3) as objective. Right: using our proposed (6) that optimizes the loss on target labels while keeping the loss on non-target labels the same (as a constraint)." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 169, + 70, + 279, + 151 + ], + "blocks": [ + { + "bbox": [ + 169, + 70, + 279, + 151 + ], + "lines": [ + { + "bbox": [ + 169, + 70, + 279, + 151 + ], + "spans": [ + { + "bbox": [ + 169, + 70, + 279, + 151 + ], + "type": "image", + "image_path": "809578826816b7b0766edb079e1ba6eff1f7f675c241b895830aa9e5f59b8d7a.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 53, + 216, + 282, + 331 + ], + "blocks": [ + { + "bbox": [ + 53, + 216, + 282, + 331 + ], + "lines": [ + { + "bbox": [ + 53, + 216, + 282, + 331 + ], + "spans": [ + { + "bbox": [ + 53, + 216, + 282, + 331 + ], + "type": "image", + "image_path": "a2058a7676f0f1da527cdc506401ccaeaa97740fbe2f35c5679231a0eda71ac2.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 338, + 288, + 397 + ], + "lines": [ + { + "bbox": [ + 46, + 338, + 288, + 397 + ], + "spans": [ + { + "bbox": [ + 46, + 338, + 288, + 397 + ], + "type": "text", + "content": "Figure 9. Results of attacking ML-GCN on PASCAL-VOC (first two columns) and NUS-WIDE (last two columns). Each column shows the model predictions for clean " + }, + { + "bbox": [ + 46, + 338, + 288, + 397 + ], + "type": "inline_equation", + "content": "(\\epsilon = 0)" + }, + { + "bbox": [ + 46, + 338, + 288, + 397 + ], + "type": "text", + "content": " and attacked images. Rounded rectangles group semantically related labels. Inconsistent predictions caused around target labels are shown with red rectangles. The red labels at the top are targeted labels and the arrows show the relationships." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 415, + 287, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 415, + 287, + 536 + ], + "spans": [ + { + "bbox": [ + 46, + 415, + 287, + 536 + ], + "type": "text", + "content": "we compute the perturbations (scaled to " + }, + { + "bbox": [ + 46, + 415, + 287, + 536 + ], + "type": "inline_equation", + "content": "\\epsilon = 0.1" + }, + { + "bbox": [ + 46, + 415, + 287, + 536 + ], + "type": "text", + "content": ") for images and evaluate the target models exclusively on the images that were successfully attacked by the respective source model (hence the diagonal values are all 1). Notice that although all attacks, other than MLA-LP, are transferable, GMLA semantic attack transfers better and achieves the highest " + }, + { + "bbox": [ + 46, + 415, + 287, + 536 + ], + "type": "inline_equation", + "content": "\\mathrm{FR}_N" + }, + { + "bbox": [ + 46, + 415, + 287, + 536 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 415, + 287, + 536 + ], + "type": "inline_equation", + "content": "\\mathrm{FR}_S" + }, + { + "bbox": [ + 46, + 415, + 287, + 536 + ], + "type": "text", + "content": ". From Table 1, notice that all attacks were able to achieve non-trivial graph-based fooling rate. However, GMLA is the most effective method to generate semantically consistent and generally transferrable attacks." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 540, + 287, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 540, + 287, + 649 + ], + "spans": [ + { + "bbox": [ + 46, + 540, + 287, + 649 + ], + "type": "text", + "content": "Gradient Correlations. Figure 8 shows the correlation between the gradient of the loss on target labels (to be modified), " + }, + { + "bbox": [ + 46, + 540, + 287, + 649 + ], + "type": "inline_equation", + "content": "g_{x,\\Psi_x}" + }, + { + "bbox": [ + 46, + 540, + 287, + 649 + ], + "type": "text", + "content": ", and on other labels (to be fixed), " + }, + { + "bbox": [ + 46, + 540, + 287, + 649 + ], + "type": "inline_equation", + "content": "g_{x,\\bar{\\Psi}_x}" + }, + { + "bbox": [ + 46, + 540, + 287, + 649 + ], + "type": "text", + "content": ", for different sizes of the target set on OpenImages. Notice that adding the two losses leads to highly negatively correlated gradients for them. However, only optimizing the loss on target labels while keeping the loss on non-target labels the same (as a constraint) leads to significant increase in gradient correlations, which can justify the success of GMLA." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 653, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 653, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 653, + 288, + 715 + ], + "type": "text", + "content": "Qualitative Results. Figure 9 shows qualitative results of attacking ML-GCN using PASCAL-VOC and NUS-WIDE. Notice that in all four cases, respectively, MLA-U and MLA-C lead to inconsistencies. For example, to turn off the boat label in the first image, MLA-U attacks the boat and" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 310, + 70, + 541, + 242 + ], + "blocks": [ + { + "bbox": [ + 310, + 70, + 541, + 242 + ], + "lines": [ + { + "bbox": [ + 310, + 70, + 541, + 242 + ], + "spans": [ + { + "bbox": [ + 310, + 70, + 541, + 242 + ], + "type": "image", + "image_path": "9dc3923be7314c7ad03040db56f9d9cdbf2019286621f15d0504a561eba448eb.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 246, + 547, + 316 + ], + "lines": [ + { + "bbox": [ + 304, + 246, + 547, + 316 + ], + "spans": [ + { + "bbox": [ + 304, + 246, + 547, + 316 + ], + "type": "text", + "content": "Figure 10. Since the adversarial images have imperceptible changes, we visualize the perturbations computed using different methods for various target classes of PASCAL-VOC. The perturbations are computed by setting the maximum budget " + }, + { + "bbox": [ + 304, + 246, + 547, + 316 + ], + "type": "inline_equation", + "content": "\\epsilon = 0.01" + }, + { + "bbox": [ + 304, + 246, + 547, + 316 + ], + "type": "text", + "content": " and are scaled for visualization. For each perturbation, we compute it's dot product (D) with the perturbation computed using our proposed attack - GMLA, and the structural similarity (S) of the original and the adversarial image (after adding the perturbation)." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 330, + 545, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 330, + 545, + 437 + ], + "spans": [ + { + "bbox": [ + 304, + 330, + 545, + 437 + ], + "type": "text", + "content": "craft labels but does not attack the vehicle label, leading to semantically inconsistent prediction. MLA-C successfully attacks boat, but keeps all other labels fixed, causing inconsistent predictions. For the second image, MLA-U successfully kept consistency around one group of labels but causes inconsistency in the other group. Similar to MLA-C, MLA-LP causes semantic inconsistencies for all images. Notice that in all cases, GMLA successfully modifies the necessary labels to ensure semantic consistency." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 438, + 545, + 522 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 438, + 545, + 522 + ], + "spans": [ + { + "bbox": [ + 304, + 438, + 545, + 522 + ], + "type": "text", + "content": "In Figure 10, we visualize the perturbations computed by different methods and compare the SSIM (S) of baselines with GMLA. We also show the dot product (D) between the perturbation computed using each baseline method and the one computed using GMLA. We can see that GMLA finds different attack directions than the baseline methods, which results in semantically consistent and transferable attacks." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 534, + 384, + 547 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 534, + 384, + 547 + ], + "spans": [ + { + "bbox": [ + 306, + 534, + 384, + 547 + ], + "type": "text", + "content": "6. Conclusions" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 554, + 545, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 554, + 545, + 639 + ], + "spans": [ + { + "bbox": [ + 304, + 554, + 545, + 639 + ], + "type": "text", + "content": "We developed an efficient framework to generate attacks for multi-label recognition that ensures semantic consistency of the output labels based on relationships among labels while effectively attacking a large number of labels. By extensive experiments on three datasets and several MLL models, we showed that our method generates both semantically consistent and successful adversarial attacks." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 651, + 409, + 664 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 651, + 409, + 664 + ], + "spans": [ + { + "bbox": [ + 306, + 651, + 409, + 664 + ], + "type": "text", + "content": "Acknowledgements" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 665, + 545, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 545, + 714 + ], + "type": "text", + "content": "This work is sponsored by Khoury College of Northeastern funds, DARPA (HR00112220001), NSF (IIS-2115110), ARO (W911NF2110276). Content does not necessarily reflect the position/policy of the Government." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "24258" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 145 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 145 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 145 + ], + "type": "text", + "content": "[1] Abhishek Aich, Calvin-Khang Ta, Akash Gupta, Chengyu Song, Srikanth Krishnamurthy, Salman Asif, and Amit Roy-Chowdhury. Gama: Generative adversarial multi-object scene attacks. Advances in Neural Information Processing Systems, 35:36914-36930, 2022. 1, 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 147, + 288, + 213 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 147, + 288, + 213 + ], + "spans": [ + { + "bbox": [ + 53, + 147, + 288, + 213 + ], + "type": "text", + "content": "[2] Abhishek Aich, Shasha Li, Chengyu Song, M Salman Asif, Srikanth V Krishnamurthy, and Amit K Roy-Chowdhury. Leveraging local patch differences in multi-object scenes for generative adversarial attacks. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 1308-1318, 2023. 1, 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 214, + 286, + 236 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 214, + 286, + 236 + ], + "spans": [ + { + "bbox": [ + 53, + 214, + 286, + 236 + ], + "type": "text", + "content": "[3] N. Akhtar and A. Mian. Threat of adversarial attacks on deep learning in computer vision: A survey. arXiv, 2018. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 238, + 286, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 238, + 286, + 270 + ], + "spans": [ + { + "bbox": [ + 53, + 238, + 286, + 270 + ], + "type": "text", + "content": "[4] A. Athalye, N. Carlini, and D. A. Wagner. Obfuscated gradients give a false sense of security: Circumventing defenses to adversarial examples. 2018. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 272, + 286, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 272, + 286, + 304 + ], + "spans": [ + { + "bbox": [ + 53, + 272, + 286, + 304 + ], + "type": "text", + "content": "[5] Yuanhao Ban and Yinpeng Dong. Pre-trained adversarial perturbations. In Advances in Neural Information Processing Systems, pages 1196-1209. Curran Associates, Inc., 2022. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 306, + 286, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 306, + 286, + 350 + ], + "spans": [ + { + "bbox": [ + 53, + 306, + 286, + 350 + ], + "type": "text", + "content": "[6] Emanuel Ben-Baruch, Tal Ridnik, Itamar Friedman, Avi Ben-Cohen, Nadav Zamir, Asaf Noy, and Lihi Zelnik-Manor. Multi-label classification with partial annotations using class-aware selective loss. 2022. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 351, + 286, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 351, + 286, + 396 + ], + "spans": [ + { + "bbox": [ + 53, + 351, + 286, + 396 + ], + "type": "text", + "content": "[7] Zikui Cai, Xinxin Xie, Shasha Li, Mingjun Yin, Chengyu Song, Srikanth V. Krishnamurthy, Amit K. Roy-Chowdhury, and M. Salman Asif. Context-aware transfer attacks for object detection. ArXiv, 2021. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 396, + 286, + 451 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 396, + 286, + 451 + ], + "spans": [ + { + "bbox": [ + 53, + 396, + 286, + 451 + ], + "type": "text", + "content": "[8]Zikui Cai, Shantanu Rane, Alejandro E. Brito, Chengyu Song,Srikanth V.Krishnamurthy,Amit K.Roy-Chowdhury, and M.Salman Asif.Zero-query transfer attacks on context-aware object detectors.IEEE Conference on Computer Vision and Pattern Recognition,2022.3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 453, + 286, + 485 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 453, + 286, + 485 + ], + "spans": [ + { + "bbox": [ + 53, + 453, + 286, + 485 + ], + "type": "text", + "content": "[9] N. Carlini and D. Wagner. Adversarial examples are not easily detected: Bypassing ten detection methods. Workshop on Artificial Intelligence and Security, 2017. 1" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 487, + 286, + 519 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 487, + 286, + 519 + ], + "spans": [ + { + "bbox": [ + 48, + 487, + 286, + 519 + ], + "type": "text", + "content": "[10] N. Carlini and D. Wagner. Towards evaluating the robustness of neural networks. IEEE Symposium on Security and Privacy, 2017. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 521, + 286, + 553 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 521, + 286, + 553 + ], + "spans": [ + { + "bbox": [ + 48, + 521, + 286, + 553 + ], + "type": "text", + "content": "[11] Y. Carmon, A. Raghunathan, L. Schmidt, P. Liang, and J. C. Duchi. Unlabeled data improves adversarial robustness. Neural Information Processing Systems, 2019. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 555, + 286, + 597 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 555, + 286, + 597 + ], + "spans": [ + { + "bbox": [ + 48, + 555, + 286, + 597 + ], + "type": "text", + "content": "[12] P.-Y. Chen, Y. Sharma, H. Zhang, J. Yi, and C.-J. Hsieh. Ead: Elastic-net attacks to deep neural networks via adversarial examples. AAAI Conference on Artificial Intelligence, 2018. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 600, + 286, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 600, + 286, + 643 + ], + "spans": [ + { + "bbox": [ + 48, + 600, + 286, + 643 + ], + "type": "text", + "content": "[13] T. Chen, M. Xu, X. Hui, H. Wu, and L. Lin. Learning semantic-specific graph representation for multi-label image recognition. IEEE International Conference on Computer Vision, 2019. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 646, + 286, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 646, + 286, + 689 + ], + "spans": [ + { + "bbox": [ + 48, + 646, + 286, + 689 + ], + "type": "text", + "content": "[14] Zhao-Min Chen, Xiu-Shen Wei, Xin Jin, and Yanwen Guo. Multi-label image recognition with joint class-aware map disentangling and label correlation embedding. IEEE International Conference on Multimedia and Expo, 2019. 1" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 48, + 691, + 286, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 691, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 691, + 286, + 713 + ], + "type": "text", + "content": "[15] Z. M. Chen, X. S. Wei, P. Wang, and Y. Guo. Multi-label image recognition with graph convolutional networks. IEEE" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 308, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 33, + "blocks": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "text", + "content": "Conference on Computer Vision and Pattern Recognition, abs/1904.03582, 2019. 5, 6" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 96, + 545, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 96, + 545, + 140 + ], + "spans": [ + { + "bbox": [ + 308, + 96, + 545, + 140 + ], + "type": "text", + "content": "[16] T. S. Chua, J. Tang, R. Hong, H. Li, Z. Luo, and Y. T. Zheng. Nus-wide: A real-world web image database from national university of bangalore. ACM International Conference on Image and Video Retrieval, 2009. 5" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 142, + 545, + 175 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 142, + 545, + 175 + ], + "spans": [ + { + "bbox": [ + 308, + 142, + 545, + 175 + ], + "type": "text", + "content": "[17] F. Croce and M. Hein. Reliable evaluation of adversarial robustness with an ensemble of diverse parameter-free attacks. ArXiv, 2020. 4" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 177, + 545, + 209 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 177, + 545, + 209 + ], + "spans": [ + { + "bbox": [ + 308, + 177, + 545, + 209 + ], + "type": "text", + "content": "[18] S. D. Dao, E. Zhao, D. Phung, and J. Cai. Multi-label image classification with contrastive learning. arXiv preprint, arXiv:2107.11626, 2021. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 211, + 545, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 211, + 545, + 255 + ], + "spans": [ + { + "bbox": [ + 308, + 211, + 545, + 255 + ], + "type": "text", + "content": "[19] J. Deng, N. Ding, Y. Jia, A. Frome, K. Murphy, S. Bengio, Y. Li, H. Neven, and H. Adam. Large-scale object classification using label relation graphs. European Conference on Computer Vision, 2014. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 257, + 545, + 300 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 257, + 545, + 300 + ], + "spans": [ + { + "bbox": [ + 308, + 257, + 545, + 300 + ], + "type": "text", + "content": "[20] G. W. Ding, Y. Sharma, K. Y. Lui, and R. Huang. Max-margin adversarial (mma) training: Direct input space margin maximization through adversarial training. arXiv, 2020. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 302, + 545, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 302, + 545, + 346 + ], + "spans": [ + { + "bbox": [ + 308, + 302, + 545, + 346 + ], + "type": "text", + "content": "[21] Zixuan Ding, Ao Wang, Hui Chen, Qiang Zhang, Pengzhang Liu, Yongjun Bao, Weipeng Yan, and Jungong Han. Exploring structured semantic prior for multi label recognition with incomplete labels. 2023. 1" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 348, + 545, + 413 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 348, + 545, + 413 + ], + "spans": [ + { + "bbox": [ + 308, + 348, + 545, + 413 + ], + "type": "text", + "content": "[22] Junhao Dong, Seyed-Mohsen Moosavi-Dezfooli, Jianhuang Lai, and Xiaohua Xie. The enemy of my enemy is my friend: Exploring inverse adversaries for improving adversarial training. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 24678–24687, 2023. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 415, + 545, + 448 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 415, + 545, + 448 + ], + "spans": [ + { + "bbox": [ + 308, + 415, + 545, + 448 + ], + "type": "text", + "content": "[23] Y. Dong, Z. Deng, T. Pang, H. Su, and J. Zhu. Adversarial distributional training for robust deep learning. arXiv, 2020. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 450, + 545, + 494 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 450, + 545, + 494 + ], + "spans": [ + { + "bbox": [ + 308, + 450, + 545, + 494 + ], + "type": "text", + "content": "[24] M. Everingham, S. M. A. Eslami, L. Van-Gool, C. K. I. Williams, J. Winn, and A. Zisserman. The Pascal visual object classes (voc) challenge. International Journal of Computer Vision, 2010. 5" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 496, + 545, + 529 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 496, + 545, + 529 + ], + "spans": [ + { + "bbox": [ + 308, + 496, + 545, + 529 + ], + "type": "text", + "content": "[25] K. Eykholt, I. Evtimov, E. Fernandes, B. Li, A. Rahmati, F. Tramér, A. Prakash, T. Kohno, and D. X. Song. Physical adversarial examples for object detectors. arXiv, 2018. 1" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 308, + 530, + 545, + 563 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 530, + 545, + 563 + ], + "spans": [ + { + "bbox": [ + 308, + 530, + 545, + 563 + ], + "type": "text", + "content": "[26] L. Feng, B. An, and S. He. Collaboration based multi-label learning. AAAI Conference on Artificial Intelligence, 2019. 1" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 308, + 565, + 545, + 597 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 565, + 545, + 597 + ], + "spans": [ + { + "bbox": [ + 308, + 565, + 545, + 597 + ], + "type": "text", + "content": "[27] I. J. Goodfellow, J. Shlens, and C. Szegedy. Explaining and harnessing adversarial examples. International Conference on Learning Representations, 2015. 1" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 308, + 600, + 545, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 600, + 545, + 643 + ], + "spans": [ + { + "bbox": [ + 308, + 600, + 545, + 643 + ], + "type": "text", + "content": "[28] W. He, J. Wei, X. Chen, N. Carlini, and D. Song. Adversarial example defense: Ensembles of weak defenses are not strong. USENIX Workshop on Offensive Technologies, 2017. 2" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 308, + 645, + 545, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 645, + 545, + 667 + ], + "spans": [ + { + "bbox": [ + 308, + 645, + 545, + 667 + ], + "type": "text", + "content": "[29] D. Hendrycks, K. Lee, and M. Mazeika. Using pre-training can improve model robustness and uncertainty. 2019. 2" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 308, + 670, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 670, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 308, + 670, + 545, + 713 + ], + "type": "text", + "content": "[30] Dan Hendrycks, Kevin Zhao, Steven Basart, Jacob Steinhardt, and Dawn Song. Natural adversarial examples. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15262-15271, 2021. 1" + } + ] + } + ], + "index": 32 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "type": "text", + "content": "24259" + } + ] + } + ], + "index": 34 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 712 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 128 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 128 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 128 + ], + "type": "text", + "content": "[31] Lei Hsiung, Yun-Yun Tsai, Pin-Yu Chen, and Tsung-Yi Ho. Towards compositional adversarial robustness: Generalizing adversarial training to composite semantic perturbations. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 24658-24667, 2023. 1" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 129, + 287, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 129, + 287, + 150 + ], + "spans": [ + { + "bbox": [ + 48, + 129, + 287, + 150 + ], + "type": "text", + "content": "[32] S. Hu, L. Ke, X. Wang, and S. Lyu. Tkml-ap: Adversarial attacks to top-k multi-label learning. arXiv, 2021. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 152, + 287, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 152, + 287, + 184 + ], + "spans": [ + { + "bbox": [ + 48, + 152, + 287, + 184 + ], + "type": "text", + "content": "[33] D. T. Huynh and E. Elhamifar. Interactive multi-label cnn learning with partial labels. IEEE Conference on Computer Vision and Pattern Recognition, 2020. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 186, + 287, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 186, + 287, + 228 + ], + "spans": [ + { + "bbox": [ + 48, + 186, + 287, + 228 + ], + "type": "text", + "content": "[34] Tooba Imtiaz, Morgan Kohler, Jared Miller, Zifeng Wang, Mario Sznaier, Octavia I Camps, and Jennifer G Dy. Saif: Sparse adversarial and interpretable attack framework. arXiv preprint arXiv:2212.07495, 2022. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 231, + 287, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 231, + 287, + 262 + ], + "spans": [ + { + "bbox": [ + 48, + 231, + 287, + 262 + ], + "type": "text", + "content": "[35] J. Li R. Ji, H. Liu, X. Hong, Y. Gao, and Q. Tian. Universal perturbation attack against image retrieval. International Conference on Computer Vision, 2019. 1" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 264, + 287, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 264, + 287, + 308 + ], + "spans": [ + { + "bbox": [ + 48, + 264, + 287, + 308 + ], + "type": "text", + "content": "[36] Jinyuan Jia, Wenjie Qu, and Neil Zhenqiang Gong. Multiguard: Provably robust multi-label classification against adversarial examples. Advances in Neural Information Processing Systems, 2022. 1, 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 309, + 287, + 341 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 309, + 287, + 341 + ], + "spans": [ + { + "bbox": [ + 48, + 309, + 287, + 341 + ], + "type": "text", + "content": "[37] Youngwook Kim, Jae Myung Kim, Zeynep Akata, and Jungwoo Lee. Large loss matters in weakly supervised multi-label classification. 2022. 1" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 342, + 267, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 342, + 267, + 354 + ], + "spans": [ + { + "bbox": [ + 48, + 342, + 267, + 354 + ], + "type": "text", + "content": "[38] Takumi Kobayashi. Two-way multi-label loss. 2023. 1" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 355, + 287, + 388 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 355, + 287, + 388 + ], + "spans": [ + { + "bbox": [ + 48, + 355, + 287, + 388 + ], + "type": "text", + "content": "[39] A. Kurakin, I. Goodfellow, and S. Bengio. Adversarial machine learning at scale. International Conference on Learning Representations, 2017. 1" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 388, + 287, + 454 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 388, + 287, + 454 + ], + "spans": [ + { + "bbox": [ + 48, + 388, + 287, + 454 + ], + "type": "text", + "content": "[40] A. Kuznetsova, H. Rom, N. Alldrin, J. Uijlings, I. Krasin, J. Pont-Tuset, S. Kamali, S. Popov, M. Malloci, A. Kolesnikov, T. Duerig, and V. Ferrari. The open images dataset v4: Unified image classification, object detection, and visual relationship detection at scale. International Journal of Computer Vision, 2016. 2, 4, 5" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 456, + 287, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 456, + 287, + 498 + ], + "spans": [ + { + "bbox": [ + 48, + 456, + 287, + 498 + ], + "type": "text", + "content": "[41] J. Lanchantin, T. Wang, V. Ordonez, and Y. Qi. General multi-label image classification with transformers. IEEE Conference on Computer Vision and Pattern Recognition, 2021. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 500, + 287, + 532 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 500, + 287, + 532 + ], + "spans": [ + { + "bbox": [ + 48, + 500, + 287, + 532 + ], + "type": "text", + "content": "[42] K. Lee, K. Lee, H. Lee, and J. Shin. A simple unified framework for detecting out-of-distribution samples and adversarial attacks. 2018. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 534, + 287, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 534, + 287, + 567 + ], + "spans": [ + { + "bbox": [ + 48, + 534, + 287, + 567 + ], + "type": "text", + "content": "[43] Peng Li, Peng Chen, Yonghong Xie, and Dezheng Zhang. Bi-modal learning with channel-wise attention for multi-label image classification. IEEE Access, 2020. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 568, + 287, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 568, + 287, + 601 + ], + "spans": [ + { + "bbox": [ + 48, + 568, + 287, + 601 + ], + "type": "text", + "content": "[44] Q. Li, M. Qiao, W. Bian, and D. Tao. Conditional graphical lasso for multi-label image classification. IEEE Conference on Computer Vision and Pattern Recognition, 2016. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 602, + 287, + 634 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 602, + 287, + 634 + ], + "spans": [ + { + "bbox": [ + 48, + 602, + 287, + 634 + ], + "type": "text", + "content": "[45] Q. Li, X. Peng, Y. Qiao, and Q. Peng. Learning label correlations for multi-label image recognition with graph networks. Pattern Recognition Letters, 2020. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 48, + 635, + 287, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 635, + 287, + 667 + ], + "spans": [ + { + "bbox": [ + 48, + 635, + 287, + 667 + ], + "type": "text", + "content": "[46] X. Li, F. Zhao, and Y. Guo. Multi-label image classification with a probabilistic label enhancement model. In UAI, 2014. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 48, + 670, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 670, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 670, + 287, + 712 + ], + "type": "text", + "content": "[47] Y. Li and L. Yang. More correlations better performance: Fully associative networks for multi-label image classification. International Conference on Pattern Recognition, 2021. 2" + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 712 + ], + "type": "list", + "angle": 0, + "index": 35, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "type": "text", + "content": "[48] Y. Li, Y. Song, and J. Luo. Improving pairwise ranking for multi-label image classification. IEEE Conference on Computer Vision and Pattern Recognition, 2017. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 107, + 545, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 107, + 545, + 138 + ], + "spans": [ + { + "bbox": [ + 307, + 107, + 545, + 138 + ], + "type": "text", + "content": "[49] Z. Li, W. Lu, Z. Sun, and W. Xing. Improving multi-label classification using scene cues. Multimedia Tools and Applications, 2017. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 140, + 545, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 140, + 545, + 161 + ], + "spans": [ + { + "bbox": [ + 308, + 140, + 545, + 161 + ], + "type": "text", + "content": "[50] Dekun Lin. Probability guided loss for long-tailed multi-label image classification. 2023. 1" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 162, + 545, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 162, + 545, + 205 + ], + "spans": [ + { + "bbox": [ + 308, + 162, + 545, + 205 + ], + "type": "text", + "content": "[51] A. Madry, A. Makelov, L. Schmidt, D. Tsipras, and A. Vladu. Towards deep learning models resistant to adversarial attacks. International Conference on Learning Representations, 2018. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 206, + 545, + 237 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 206, + 545, + 237 + ], + "spans": [ + { + "bbox": [ + 308, + 206, + 545, + 237 + ], + "type": "text", + "content": "[52] S. Melacci, G. Ciravegna, A. Sotgiu, A. Demontis, B. Biggio, M. Gori, and F. Roli. Domain knowledge alleviates adversarial attacks in multi-label classifiers. 2021. 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 239, + 545, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 239, + 545, + 281 + ], + "spans": [ + { + "bbox": [ + 308, + 239, + 545, + 281 + ], + "type": "text", + "content": "[53] J.-H. Metzen, M.-C. Kumar, T. Brox, and V. Fischer. Universal adversarial perturbations against semantic image segmentation. International Conference on Computer Vision, 2019. 1" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 283, + 545, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 283, + 545, + 304 + ], + "spans": [ + { + "bbox": [ + 308, + 283, + 545, + 304 + ], + "type": "text", + "content": "[54] G. A. Miller. Wordnet: a lexical database for english. Communications of the ACM, 38(11), 1995. 5" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 305, + 545, + 348 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 305, + 545, + 348 + ], + "spans": [ + { + "bbox": [ + 308, + 305, + 545, + 348 + ], + "type": "text", + "content": "[55] S. Moosavi-Dezfooli, A. Fawzi, and P. Frossard. Deepfool: a simple and accurate method to fool deep neural networks. IEEE Conference on Computer Vision and Pattern Recognition, 2016. 6" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 350, + 545, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 350, + 545, + 392 + ], + "spans": [ + { + "bbox": [ + 308, + 350, + 545, + 392 + ], + "type": "text", + "content": "[56] J. Nam, E. L. Mencia, H. J. Kim, and J. Furnkranz. Maximizing subset accuracy with recurrent neural networks in multi-label classification. Neural Information Processing Systems, 2017. 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 393, + 545, + 426 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 393, + 545, + 426 + ], + "spans": [ + { + "bbox": [ + 308, + 393, + 545, + 426 + ], + "type": "text", + "content": "[57] T. Pang, K. Xu, C. Du, N. Chen, and J. Zhu. Improving adversarial robustness via promoting ensemble diversity. International Conference on Machine learning, 2019. 2" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 308, + 427, + 545, + 458 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 427, + 545, + 458 + ], + "spans": [ + { + "bbox": [ + 308, + 427, + 545, + 458 + ], + "type": "text", + "content": "[58] T. Pang, K. Xu, Y. Dong, C. Du, N. Chen, and J. Zhu. Rethinking softmax cross-entropy loss for adversarial robustness. arXiv, 2020." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 308, + 460, + 545, + 492 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 460, + 545, + 492 + ], + "spans": [ + { + "bbox": [ + 308, + 460, + 545, + 492 + ], + "type": "text", + "content": "[59] T. Pang, X. Yang, Y. Dong, K. Xu, H. Su, and J. Zhu. Boosting adversarial training with hypersphere embedding. Neural Information Processing Systems, 2020. 2" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 308, + 494, + 545, + 546 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 494, + 545, + 546 + ], + "spans": [ + { + "bbox": [ + 308, + 494, + 545, + 546 + ], + "type": "text", + "content": "[60] Nicolas Papernot, Patrick Mcdaniel, Somesh Jha, Matt Fredrikson, Z. Berkay Celik, and Ananthram Swami. The limitations of deep learning in adversarial settings. IEEE European Symposium on Security and Privacy (EuroS&P), 2016. 1" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 308, + 548, + 545, + 581 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 548, + 545, + 581 + ], + "spans": [ + { + "bbox": [ + 308, + 548, + 545, + 581 + ], + "type": "text", + "content": "[61] Tao Pu, Tianshui Chen, Hefeng Wu, and Liang Lin. Semantic-aware representation blending for multi-label image recognition with partial labels. 2022. 1" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 308, + 582, + 545, + 634 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 582, + 545, + 634 + ], + "spans": [ + { + "bbox": [ + 308, + 582, + 545, + 634 + ], + "type": "text", + "content": "[62] Zeyu Qin, Yanbo Fan, Yi Liu, Li Shen, Yong Zhang, Jue Wang, and Baoyuan Wu. Boosting the transferability of adversarial attacks with reverse adversarial perturbation. Advances in Neural Information Processing Systems, 35:29845-29858, 2022. 2" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 308, + 636, + 545, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 636, + 545, + 669 + ], + "spans": [ + { + "bbox": [ + 308, + 636, + 545, + 669 + ], + "type": "text", + "content": "[63] T. Ridnik, H. Lawen, A. Noy, and I. Friedman. Tresnet: High performancegpu-dedicated architecture. ArXiv preprint arXiv:2003.13630, 2020.5" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 308, + 670, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 670, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 308, + 670, + 545, + 712 + ], + "type": "text", + "content": "[64] Tal Ridnik, Emanuel Ben-Baruch, Nadav Zamir, Asaf Noy, Itamar Friedman, Matan Protter, and Lihi Zelnik-Manor. Asymmetric loss for multi-label classification. 2021. 5, 6, 7" + } + ] + } + ], + "index": 34 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "type": "text", + "content": "24260" + } + ] + } + ], + "index": 36 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 712 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 106 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 106 + ], + "type": "text", + "content": "[65] Tal Ridnik, Gilad Sharir, Avi Ben-Cohen, Emanuel Ben-Baruch, and Asaf Noy. Ml-decoder: Scalable and versatile classification head. 2023. 5, 6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 107, + 288, + 173 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 107, + 288, + 173 + ], + "spans": [ + { + "bbox": [ + 48, + 107, + 288, + 173 + ], + "type": "text", + "content": "[66] Jérôme Rony, Luiz G Hafemann, Luiz S Oliveira, Ismail Ben Ayed, Robert Sabourin, and Eric Granger. Decoupling direction and norm for efficient gradient-based 12 adversarial attacks and defenses. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4322-4330, 2019. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 175, + 288, + 208 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 175, + 288, + 208 + ], + "spans": [ + { + "bbox": [ + 48, + 175, + 288, + 208 + ], + "type": "text", + "content": "[67] J. Cohen and E. Rosenfeld and Z. Kolter. Certified adversarial robustness via randomized smoothing. International Conference on Machine learning, 2019. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 210, + 287, + 253 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 210, + 287, + 253 + ], + "spans": [ + { + "bbox": [ + 48, + 210, + 287, + 253 + ], + "type": "text", + "content": "[68] A. Shafahi, M. Najibi, A. Ghiasi, Z. Xu, J. Dickerson, C. Studer, L. Davis, G. Taylor, and T. Goldstein. Adversarial training for free! Neural Information Processing Systems, 2019." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 256, + 288, + 311 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 256, + 288, + 311 + ], + "spans": [ + { + "bbox": [ + 48, + 256, + 288, + 311 + ], + "type": "text", + "content": "[69] Nasim Shafiee and Ehsan Elhamifar. Zero-shot attribute attacks on fine-grained recognition models. In Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part V, pages 262-282. Springer, 2022. 1, 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 312, + 287, + 357 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 312, + 287, + 357 + ], + "spans": [ + { + "bbox": [ + 48, + 312, + 287, + 357 + ], + "type": "text", + "content": "[70] Nitish Shukla and Sudipta Banerjee. Generating adversarial attacks in the latent space. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 730-739, 2023. 1" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 358, + 287, + 391 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 358, + 287, + 391 + ], + "spans": [ + { + "bbox": [ + 48, + 358, + 287, + 391 + ], + "type": "text", + "content": "[71] Q. Song, H. Jin, X. Huang, and X. Hu. Multi-label adversarial perturbations. IEEE International Conference on Data Mining, 2018. 1, 2, 3, 6" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 393, + 287, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 393, + 287, + 415 + ], + "spans": [ + { + "bbox": [ + 48, + 393, + 287, + 415 + ], + "type": "text", + "content": "[72] R. Speer, J. Chin, and C. Havasi. Conceptnet 5.5: An open multilingual graph of general knowledge. 2017. 5" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 417, + 287, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 417, + 287, + 460 + ], + "spans": [ + { + "bbox": [ + 48, + 417, + 287, + 460 + ], + "type": "text", + "content": "[73] C. Szegedy, W. Zaremba, I. Sutskever, J. Bruna, D. Erhan, I. Goodfellow, and R. Fergus. Intriguing properties of neural networks. International Conference on Learning Representations, 2014. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 462, + 287, + 485 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 462, + 287, + 485 + ], + "spans": [ + { + "bbox": [ + 48, + 462, + 287, + 485 + ], + "type": "text", + "content": "[74] N. Tursynbek, A. Petiushko, and I. Oseledets. Geometry-inspired top-k adversarial perturbations. arXiv, 2020. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 487, + 287, + 529 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 487, + 287, + 529 + ], + "spans": [ + { + "bbox": [ + 48, + 487, + 287, + 529 + ], + "type": "text", + "content": "[75] J. Uesato, J. B. Alayrac, P. Huang, R. Stanforth, A. Fawzi, and P. Kohli. Are labels required for improving adversarial robustness? Neural Information Processing Systems, 2019. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 532, + 287, + 575 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 532, + 287, + 575 + ], + "spans": [ + { + "bbox": [ + 48, + 532, + 287, + 575 + ], + "type": "text", + "content": "[76] Thomas Verelst, Paul K Rubenstein, Marcin Eichner, Tinne Tuytelaars, and Maxim Berman. Spatial consistency loss for training multi-label classifiers from single-label annotations. 2023. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 578, + 287, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 578, + 287, + 621 + ], + "spans": [ + { + "bbox": [ + 48, + 578, + 287, + 621 + ], + "type": "text", + "content": "[77] J. Wang, Y. Yang, J. Mao, Z. Huang, C. Huang, and W. Xu. Cnn-rnn: A unified framework for multi-label image classification. 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 624, + 287, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 624, + 287, + 666 + ], + "spans": [ + { + "bbox": [ + 48, + 624, + 287, + 666 + ], + "type": "text", + "content": "[78] Z. Wang, T. Chen, G. Li, G. Li, and L. Lin. Multi-label image recognition by recurrently discovering attentional regions. IEEE International Conference on Computer Vision, 2017. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 669, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 669, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 669, + 287, + 712 + ], + "type": "text", + "content": "[79] Y. Wu, H. Liu, S. Feng, Y. Jin, G. Lyu, and Z. Wu. Gm-mlic: Graph matching based multi-label image classification. International Joint Conference on Artificial Intelligence, 2021. 2" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 712 + ], + "type": "list", + "angle": 0, + "index": 33, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "type": "text", + "content": "[80] Z. Wu and M. Palmer. Verbs semantics and lexical selection. Annual Meeting on Association for Computational Linguistics, 1994. 5" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 107, + 545, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 107, + 545, + 150 + ], + "spans": [ + { + "bbox": [ + 307, + 107, + 545, + 150 + ], + "type": "text", + "content": "[81] C. Xie, Z. Zhang, Y. Zhou, S. Bai, J. Wang, Z. Ren, and A. Yuille. Improving transferability of adversarial examples with input diversity. IEEE Conference on Computer Vision and Pattern Recognition, 2019. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 152, + 545, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 152, + 545, + 184 + ], + "spans": [ + { + "bbox": [ + 307, + 152, + 545, + 184 + ], + "type": "text", + "content": "[82] Ming-Kun Xie, Jiahao Xiao, and Sheng-Jun Huang. Label-aware global consistency for multi-label learning with single positive labels. 2022. 1" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 186, + 545, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 186, + 545, + 217 + ], + "spans": [ + { + "bbox": [ + 307, + 186, + 545, + 217 + ], + "type": "text", + "content": "[83] J. Xu, H. Tian, Z. Wang, Y. Wang, W. Kang, and F. Chen. Joint input and output space learning for multi-label image classification. IEEE Transactions on Multimedia, 2020. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 220, + 545, + 252 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 220, + 545, + 252 + ], + "spans": [ + { + "bbox": [ + 307, + 220, + 545, + 252 + ], + "type": "text", + "content": "[84] W. Xu, D. Evans, and Y. Qi. Feature squeezing: Detecting adversarial examples in deep neural networks. Network and Distributed Systems Security Symposium, 2018. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 254, + 545, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 254, + 545, + 297 + ], + "spans": [ + { + "bbox": [ + 307, + 254, + 545, + 297 + ], + "type": "text", + "content": "[85] H. Yang, J. T. Zhou, Y. Zhang, B. Gao, J. Wu, and J. Cai. Exploit bounding box annotations for multi-label object recognition. IEEE Conference on Computer Vision and Pattern Recognition, 2016. 1" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 298, + 545, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 298, + 545, + 330 + ], + "spans": [ + { + "bbox": [ + 307, + 298, + 545, + 330 + ], + "type": "text", + "content": "[86] Zhuo Yang, Yufei Han, and Xiangliang Zhang. Characterizing the evasion attackability of multi-label classifiers. 2021. 1, 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 332, + 545, + 364 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 332, + 545, + 364 + ], + "spans": [ + { + "bbox": [ + 307, + 332, + 545, + 364 + ], + "type": "text", + "content": "[87] Z. Yang, Y. Han, and X. Zhang. Attack transferability characterization for adversarially robust multi-label classification. 2021. 1, 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 365, + 545, + 408 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 365, + 545, + 408 + ], + "spans": [ + { + "bbox": [ + 307, + 365, + 545, + 408 + ], + "type": "text", + "content": "[88] J. Ye, J. He, X. Peng, W. Wu, and Y. Qiao. Attention-driven dynamic graph convolutional network for multi-label image recognition. European Conference on Computer Vision, 2020. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 411, + 545, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 411, + 545, + 453 + ], + "spans": [ + { + "bbox": [ + 307, + 411, + 545, + 453 + ], + "type": "text", + "content": "[89] R. You, Z. Guo, L. Cui, X. Long, S. Y. Bao, and S. Wen. Cross-modality attention with semantic graph embedding for multi-label classification. AAAI Conference on Artificial Intelligence, 2020. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 456, + 545, + 488 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 456, + 545, + 488 + ], + "spans": [ + { + "bbox": [ + 307, + 456, + 545, + 488 + ], + "type": "text", + "content": "[90] X. Yuan, P. He, Q. Zhu, and X. Li. Adversarial examples: Attacks and defenses for deep learning. IEEE Transactions on Neural Networks and Learning Systems, 2019. 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 490, + 545, + 532 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 490, + 545, + 532 + ], + "spans": [ + { + "bbox": [ + 307, + 490, + 545, + 532 + ], + "type": "text", + "content": "[91] ML. Zhang and Z. Zhou. Multilabel neural networks with applications to functional genomics and text categorization. IEEE Transactions on Knowledge and Data Engineering, 2006. 2" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 534, + 545, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 534, + 545, + 567 + ], + "spans": [ + { + "bbox": [ + 307, + 534, + 545, + 567 + ], + "type": "text", + "content": "[92] Shu Zhang, Ran Xu, Caiming Xiong, and Chetan Ramaiah. Use all the labels: A hierarchical multi-label contrastive learning framework. 2022. 2" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 307, + 568, + 545, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 568, + 545, + 600 + ], + "spans": [ + { + "bbox": [ + 307, + 568, + 545, + 600 + ], + "type": "text", + "content": "[93] Z. Zhao, G. Chen, J. Wang, Y. Yang, F. Song, and J. Sun. Attack as defense: Characterizing adversarial examples using robustness. arXiv, 2021. 2" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 307, + 602, + 545, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 602, + 545, + 635 + ], + "spans": [ + { + "bbox": [ + 307, + 602, + 545, + 635 + ], + "type": "text", + "content": "[94] Donghao Zhou, Pengfei Chen, Qiong Wang, Guangyong Chen, and Pheng-Ann Heng. Acknowledging the unknown for multi-label learning with single positive labels. 2022. 1" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 307, + 636, + 545, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 636, + 545, + 668 + ], + "spans": [ + { + "bbox": [ + 307, + 636, + 545, + 668 + ], + "type": "text", + "content": "[95] N. Zhou, W. Luo, X. Lin, P. Xu, and Z.. Zhang. Generating multi-label adversarial examples by linear programming. International Joint Conference on Neural Networks, 2020. 3" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 307, + 670, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 670, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 307, + 670, + 545, + 712 + ], + "type": "text", + "content": "[96] N. Zhou, W. Luo, J. Zhang, L. Kong, and H. Zhang. Hiding all labels for multi-label images: An empirical study of adversarial examples. International Joint Conference on Neural Networks, 2021. 2, 7" + } + ] + } + ], + "index": 32 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "text", + "content": "24261" + } + ] + } + ], + "index": 34 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 150 + ], + "type": "list", + "angle": 0, + "index": 2, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 106 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 106 + ], + "type": "text", + "content": "[97] Y. Zhu, J. T. Kwok, and Z. Zhou. Multi-label learning with global and local label correlation. IEEE Transactions on Knowledge and Data Engineering, 2018. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 107, + 287, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 107, + 287, + 150 + ], + "spans": [ + { + "bbox": [ + 48, + 107, + 287, + 150 + ], + "type": "text", + "content": "[98] D. Zügner, A. Akbarnejad, and S. Gümnmann. Adversarial attacks on neural networks for graph data. International Conference on Knowledge Discovery & Data Mining, 2018. 1" + } + ] + } + ], + "index": 1 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "text", + "content": "24262" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/Semantic-aware SAM for Point-Prompted Instance Segmentation/98ef3dac-add6-4e97-bd9e-baeff12acffa_content_list.json b/2024/Semantic-aware SAM for Point-Prompted Instance Segmentation/98ef3dac-add6-4e97-bd9e-baeff12acffa_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..3adad3ed6dd015a4b00e21d4cc6874cd26e5b7a2 --- /dev/null +++ b/2024/Semantic-aware SAM for Point-Prompted Instance Segmentation/98ef3dac-add6-4e97-bd9e-baeff12acffa_content_list.json @@ -0,0 +1,1533 @@ +[ + { + "type": "text", + "text": "Semantic-aware SAM for Point-Prompted Instance Segmentation", + "text_level": 1, + "bbox": [ + 153, + 130, + 815, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Zhaoyang Wei $^{1*}$ , Pengfei Chen $^{1*}$ , Xuehui Yu $^{1*}$ , Guorong Li $^{1}$ , Jianbin Jiao $^{1}$ , Zhenjun Han $^{1\\dagger}$", + "bbox": [ + 243, + 179, + 723, + 215 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1University of Chinese Academy of Sciences(UCAS)", + "bbox": [ + 326, + 217, + 643, + 233 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 268, + 313, + 284 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Single-point annotation in visual tasks, with the goal of minimizing labelling costs, is becoming increasingly prominent in research. Recently, visual foundation models, such as Segment Anything (SAM), have gained widespread usage due to their robust zero-shot capabilities and exceptional annotation performance. However, SAM's class-agnostic output and high confidence in local segmentation introduce semantic ambiguity, posing a challenge for precise category-specific segmentation. In this paper, we introduce a cost-effective category-specific segmenter using SAM. To tackle this challenge, we have devised a Semantic-Aware Instance Segmentation Network (SAPNet) that integrates Multiple Instance Learning (MIL) with matching capability and SAM with point prompts. SAPNet strategically selects the most representative mask proposals generated by SAM to supervise segmentation, with a specific focus on object category information. Moreover, we introduce the Point Distance Guidance and Box Mining Strategy to mitigate inherent challenges: group and local issues in weakly supervised segmentation. These strategies serve to further enhance the overall segmentation performance. The experimental results on Pascal VOC and COCO demonstrate the promising performance of our proposed SAPNet, emphasizing its semantic matching capabilities and its potential to advance point-prompted instance segmentation. The code is available at https://github.com/zhaoyangwei123/SAPNet.", + "bbox": [ + 76, + 300, + 473, + 694 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 720, + 209, + 736 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Instance segmentation seeks to discern pixel-level labels for both instances of interest and their semantic content in images, a crucial function in domains like autonomous driving, image editing, and human-computer interaction. Despite impressive results demonstrated by various studies [5, 11, 16, 40-42], the majority of these high-performing methods are trained in a fully supervised manner and heavily dependent on detailed pixel-level mask annotations,", + "bbox": [ + 75, + 744, + 468, + 867 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/812a1aefd54f5b63c9e2f44b1709b59996855a7d9a975724833467707765d8e9.jpg", + "image_caption": [ + "Figure 1. Three Challenges Brought by SAM and single-MIL. Orange dash box illustrates that semantic ambiguity in SAM-generated masks, where it erroneously assigns higher scores to non-object categories like clothes, despite the person being our desired target. Green dash box depicts a comparison between mask proposals using single-MIL and SAPNet. It illustrates two primary challenges: 'group', where segmentation encounters difficulties in isolating individual targets among adjacent objects of the same category, and 'local', where MIL favors foreground-dominant regions, resulting in overlooked local details." + ], + "image_footnote": [], + "bbox": [ + 501, + 265, + 903, + 420 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "thereby incurring significant labeling costs. To address this challenge, researchers are increasingly focusing on weakly supervised instance segmentation, leveraging cost-effective supervision methods, such as bounding boxes [23, 27, 39], points [14, 28], and image-level labels [21, 45].", + "bbox": [ + 496, + 579, + 890, + 655 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recently, visual foundation models, such as Segment Anything (SAM)[22], have been widely employed by researchers for their exceptional generalization capabilities and impressive annotation performance. Numerous studies based on SAM, such as [20, 44] have emerged, building upon the foundations of SAM to further enhance its generalization capabilities and efficiency. However, these efforts have predominantly focused on improving the annotation performance of SAM. One limitation arises from SAM's lack of classification ability, resulting in class-agnostic segmentation results that fail to accurately segment specific categories as desired.", + "bbox": [ + 496, + 656, + 892, + 837 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "To tackle the inherent semantic ambiguity in SAM and achieve specific-category segmentation, we propose integrating weak annotations with SAM, employing point annotations as prompts to imbue semantic information into", + "bbox": [ + 498, + 839, + 893, + 900 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 807, + 44 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "* Equal contribution.", + "bbox": [ + 94, + 875, + 209, + 886 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "† Corresponding authors. (hanzhj@ucas.ac.cn)", + "bbox": [ + 94, + 887, + 344, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "3585", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "SAM's outputs. A straightforward approach involves leveraging SAM's intrinsic scoring mechanism, selecting the top-scoring mask as the corresponding label for each category. However, when annotating object points are fed into the SAM, its category-agnostic characteristic tends to assign higher scores to parts of the object, resulting in generated mask annotations that fail to encompass the object as a whole. In Fig. 1 orange dashed box, we aim to obtain the 'person' mask annotation, but SAM predicts the proposals of 'clothes', 'clothes+trousers' and 'person'. Relying solely on the score SAM provides is insufficient, as the highest score corresponds to 'clothes' (col-2), which does not meet our specific needs.", + "bbox": [ + 75, + 90, + 472, + 287 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To address this challenge, we have proposed SAPNet, a semantically-aware instance segmentation network designed for high-quality, end-to-end segmentation. In this study, we design a proposal selection module (PSM) using the Multiple Instance Learning (MIL) paradigm to choose proposals that align closely with the specified semantic label. However, the MIL-based method relies on the classification score, often leading to group and local predictions [4, 21, 24]. In Fig. 1 green dashed box, the group issue is evident, where two objects of the same category are often both included when they are in close proximity. It also illustrates the local issue, where the MIL classifier frequently predicts the most discriminative region instead of the entire object. To overcome these limitations, we have introduced Point Distance Guidance (PDG) and Box Mining Strategies (BMS). Specifically, we penalize the selection results by calculating the Euclidean distances between the annotated points of identical categories enclosed within the proposals. Additionally, for more localized proposals, we filter out higher-quality proposals from their corresponding bags and dynamically merge them in scale. By fully exploiting the positional clues to prevent local and group prediction, we aim to select the proposal that most effectively represents the object category in refinement stage. The primary contributions of this work can be outlined as follows:", + "bbox": [ + 75, + 289, + 472, + 667 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1) We introduce SAPNet, an end-to-end semantic-aware instance segmentation network based on point prompts. SAPNet combines the visual foundation model SAM with semantic information to address its inherent semantic ambiguity, facilitating the generation of semantically-aware proposal masks.", + "2) We incorporate Point Distance Guidance (PDG) and Box Mining Strategies (BMS) to prevent local and group predictions induced by MIL-based classifiers in both the proposal selection and refinement stages.", + "3) SAPNet achieves state-of-the-art performance in Point-Prompted Instance Segmentation (PPIS), significantly bridging the gap between point-prompted and fully-supervised segmentation methods on two challenging benchmarks (COCO and VOC2012)." + ], + "bbox": [ + 75, + 669, + 468, + 898 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 500, + 89, + 640, + 106 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Weakly-Supervised Instance Segmentation (WSIS) offers a practical approach for accurate object masks using minimal supervision. It spans a range of annotations, from image labels to bounding boxes. Research has focused on narrowing the performance gap between weakly and fully-supervised methods, primarily through box-level [18, 25, 39] and image-level annotations [1, 21]. Box-based methods have explored structural constraints to guide the segmentation, as seen in BBTP [18], BoxInst [39], and Box2Mask [29], and applied structural constraints to drive segmentation, treating it as a multiple-instance learning task or enforcing color consistency based on CondInst [40]. These approaches, while innovative, can complicate training and sometimes neglect the object's overall shape due to their focus on local features and proposal generation, like MCG [2]. Conversely, the proposal-free methods, like IRN [1], rely on class relationships for mask production but can falter in accurately separating instances. To preserve object integrity, recent methods such as Discobox [23] and BESTIE [21] integrate advanced semantic insights into instance segmentation using pairwise losses or saliency cues [30, 39, 42]. However, semantic drift remains an issue, with mislabeling or missed instances resulting in inferior pseudo labels [3] compromising segmentation quality.", + "bbox": [ + 496, + 114, + 890, + 477 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Pointly-Supervised Detection and Segmentation (PSDS) cleverly balances minimal annotation costs with satisfactory localization accuracy. By introducing point annotations, WISE-Net [24], P2BNet [9] and BESTIE [21] improve upon weakly supervised methods that suffer from vague localizations. That only slightly increases the costs (by about $10\\%$ ) and is almost as quick as the image-level annotation, but that is far speedier than more detailed bounding box or mask annotations. Such precision allows for tackling semantic bias, as seen in methods like PointRend [12], which utilize multiple points for improved accuracy, despite requiring additional bounding box supervision. Recent advancements in point-supervised instance segmentation, employed by WISE-Net and Point2Mask [28], show that even single-point annotations can yield precise mask proposals. WISE-Net skillfully localizes objects and selects masks, while BESTIE enhances accuracy using instance cues and self-correction to reduce semantic drift. Attnshift [31] advances this by extending single points to reconstruct entire objects. Apart from their complexity, these methods have yet to fully demonstrate their effectiveness, indicating ongoing challenges in harnessing single-point annotations for image segmentation and presenting clear avenues for further research.", + "bbox": [ + 496, + 478, + 890, + 839 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Prompting and Foundation Models. Prompt-based learning enables pretrained foundation models to adapt to various tasks using well-crafted prompts. SAM [22], a prominent example in computer vision, exemplifies robust zero", + "bbox": [ + 496, + 839, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "3586", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "shot generalization and interactive segmentation across multiple applications. Additionally, SAM-based models like Fast-SAM [44] increases speed, HQ-SAM [20] improves segmentation quality, and Semantic-SAM [26] optimizes performance by training on diverse data granularities. Foundational models, pre-trained on large datasets, help improve generalization in downstream tasks, especially in data-scarce scenarios. Basing on SAM, Rsprompter [8] utilizes SAM-derived pseudo labels for improved remote sensing segmentation, meanwhile, adaptations for medical imaging and video tracking are explored in A-SAM [17] and Tracking Anything [43]. Further, [10] and [19] have integrated SAM with Weakly Supervised Semantic Segmentation networks to refine pseudo labels. Our research builds upon these innovations, transforming point annotations into mask proposals in instance segmentation to significantly enhancing performance.", + "bbox": [ + 75, + 90, + 472, + 349 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Methodology", + "text_level": 1, + "bbox": [ + 76, + 359, + 210, + 378 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Overview", + "text_level": 1, + "bbox": [ + 76, + 386, + 187, + 400 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The overview of our method is illustrated in Fig. 2, SAPNet comprises of two branches: one dedicated to the selection and refinement of mask proposals to generate pseudo-labels and the other employing solov2 head [42] for instance segmentation supervised by the generated pseudo labels. The central focus of our approach is the pseudo-label generation branch, exclusively utilized during the training phase, which includes the PSM, PNPG, and PRM modules. Following the initial proposal inputs, the PSM employs multi-instance learning and a point-distance penalty to identify semantically rich proposals. Subsequently, coupled with selected proposals from the PSM stage, the PNPG generates quality positive-negative bags to mitigate background and locality issues, emphasizing the primary regions of interest. Then, the PRM processes these bags, which selects refined proposals from positive bags to improve final box quality. Ultimately, the mask mappings derived from these box proposals are utilized to guide the segmentation branch. This guarantees the acquisition of high-quality category-specified mask proposals to supervise the segmentation branch.", + "bbox": [ + 75, + 410, + 472, + 727 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. Proposal Selection Module", + "text_level": 1, + "bbox": [ + 76, + 739, + 318, + 753 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "SAM's limited semantic discernment causes category-agnostic labeling, leading to inconsistent proposal quality for the same objects. Employing these proposals directly for segmentation supervision could introduce noise and impair performance. Our goal is to design a category-specific segmenter, which needs to select the most semantically representative proposals for robust supervision.", + "bbox": [ + 75, + 763, + 468, + 868 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Motivated by the insights from WSDDN [4] and P2BNet [9], our proposal selection module employs multi-instance", + "bbox": [ + 76, + 869, + 468, + 901 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "learning and leverages labeling information to prioritize high-confidence proposals for segmentation. In the training phase, we leverage SAM[22] solely to generate category-agnostic proposals. To avoid excessive memory use and slow training, we convert them into box proposals using the minimum bounding rectangle, and combine with depth features $F \\in \\mathbb{R}^{H \\times W \\times D}$ from the image $I \\in \\mathbb{R}^{H \\times W}$ , serve as input to the PSM. Utilizing our designed MIL loss, PSM precisely predicts each proposal's class and instance details. It selects the highest-scoring proposal as the semantically richest bounding box for each object, effectively choosing higher quality mask proposals.", + "bbox": [ + 496, + 90, + 890, + 271 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Given an image $I$ with $N$ point annotations $Y_{n} = \\{(p_{i},c_{i})\\}_{i = 1}^{N}$ , where $p_i$ is the coordinate of the annotated point and $c_{i}$ is the class index. We transform each class-informative point $p_i$ into $M$ semantic mask proposals, which is further converted to a semantic proposal bag $B_{i}\\in \\mathbb{R}^{M\\times 4}$ . As illustrated in Fig. 2, after passing through a 7x7 RoIAlign layer and two fully-connected layers, features $F_{i}\\in \\mathbb{R}^{M\\times H\\times W\\times D}$ are extracted from proposal bag $B_{i}$ . Like in [4] and [37], the features $F$ serve as input for the classification branch and instance branch, using fully-connected layer $f$ and $f^{\\prime}$ to generate $\\mathbf{W}_{cls}\\in \\mathbb{R}^{M\\times K}$ and $\\mathbf{W}_{ins}\\in \\mathbb{R}^{M\\times K}$ . A softmax activation function over $K$ class and $M$ instance dimensions yields the classification scores $\\mathbf{S}_{cls}\\in \\mathbb{R}^{M\\times K}$ and instance scores $\\mathbf{S}_{ins}\\in \\mathbb{R}^{M\\times K}$ .", + "bbox": [ + 498, + 272, + 892, + 484 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathbf {W} _ {c l s} = f (\\mathbf {F}); [ \\mathbf {S} _ {c l s} ] _ {m k} = e ^ {[ \\mathbf {W} _ {c l s} ] _ {m k}} / \\sum_ {k = 1} ^ {K} e ^ {[ \\mathbf {W} _ {c l s} ] _ {m k}}. \\\\ \\mathbf {W} _ {i n s} = f ^ {\\prime} (\\mathbf {F}); [ \\mathbf {S} _ {i n s} ] _ {m k} = e ^ {[ \\mathbf {W} _ {i n s} ] _ {m k}} / \\sum_ {m = 1} ^ {M} e ^ {[ \\mathbf {W} _ {i n s} ] _ {m k}}. \\tag {1} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 511, + 489, + 890, + 534 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $[\\cdot]_{mk}$ is the value in row $m$ and column $k$ of matrix.", + "bbox": [ + 500, + 537, + 885, + 551 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Point Distance Guidance. SAM and MIL struggle with distinguishing adjacent objects of the same category, often merging two separate objects into one and giving high score. To combat this, we incorporate instance-level annotated point information and introduce a spatially aware selection with a point-distance penalty mechanism.", + "bbox": [ + 496, + 551, + 890, + 643 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To address the challenge of overlapping objects and thereby enhance model optimization, we propose a strategy specifically aimed at penalizing instances of object overlap. For each m-th proposal within the set $B_{i}$ , we define $t_{mj} = 1$ to denote an overlap with any proposal in another identical class bag $B_{j}$ ; otherwise, $t_{mj} = 0$ . The penalty imposed increases in proportion to the distance of the overlapping objects from the proposal in question. This penalty, $W_{dis}$ , is represented using the Euclidean distance between the annotated points of the overlapping proposals. Subsequently, the reciprocal of $W_{dis}$ is then passed through a sigmoid function to compute the distance score $\\mathbf{S}_{dis}$ for the proposal.", + "bbox": [ + 496, + 643, + 892, + 825 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\left[ \\mathbf {W} _ {d i s} \\right] _ {i m} = \\sum_ {j = 1, j \\neq i} ^ {N} \\| p _ {i} - p _ {j} \\| * t _ {m j}. \\tag {2} \\\\ [ \\mathbf {S} _ {d i s} ] _ {i m} = (1 / e ^ {- (1 / [ \\mathbf {W} _ {d i s} ] _ {i m})) ^ {d}}. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 583, + 829, + 890, + 883 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3587", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/6d66dc9f49b4155db0bdf1985b5c709d0f637159652c5b601a231cd7ce856475.jpg", + "image_caption": [ + "Figure 2. The framework of SAPNet comprises two components: one for generating mask proposals and another for their utilization in instance segmentation. The process starts with generating category-agnostic mask proposals using point prompts within a visual foundation model. That is followed by an initial proposal selection via MIL combined with PDG. Next, the PRM refines these proposals using positive and negative samples from PNPG, capturing global object semantics. Finally, augmented with the multi-mask proposal supervision, the segmentation branch aims to improve segmentation quality." + ], + "image_footnote": [], + "bbox": [ + 98, + 88, + 883, + 334 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/0cfbabbee76143e3c4f53d38feb7e5afc47bd365548088625de6f90afda6b588.jpg", + "image_caption": [ + "Figure 3. The mechanism of the proposal selection module." + ], + "image_footnote": [], + "bbox": [ + 78, + 416, + 472, + 508 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\left[\\cdot\\right]_{im}$ is the value at the row $i$ and column $m$ in the matrix, and $d$ is the exponential factor.", + "bbox": [ + 76, + 532, + 468, + 561 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "PSM Loss. The final score $\\mathbf{S}$ of each proposal is obtained by computing the Hadamard product of the classification score, the instance score, and the distance score, while the score $\\widehat{\\mathbf{S}}$ for each proposal bag $B_{i}$ is obtained by summing the scores of the proposals in $B_{i}$ . The MILloss of the PSM is constructed using the form of binary crossentropy, and it is defined as follows:", + "bbox": [ + 75, + 561, + 468, + 667 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {S} = \\mathbf {S} _ {c l s} \\odot \\mathbf {S} _ {i n s} \\odot \\mathbf {S} _ {d i s} \\in \\mathbb {R} ^ {M \\times K}; \\widehat {\\mathbf {S}} = \\sum_ {m = 1} ^ {M} [ \\mathbf {S} ] _ {m} \\in \\mathbb {R} ^ {K}.\n$$\n", + "text_format": "latex", + "bbox": [ + 76, + 670, + 410, + 705 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {p s m} = C E (\\widehat {\\mathbf {S}}, \\mathbf {c}) = - \\frac {1}{N} \\sum_ {n = 1} ^ {N} \\sum_ {k = 1} ^ {K} \\mathbf {c} _ {k} \\log (\\widehat {\\mathbf {S}} _ {k}) + (1 - \\mathbf {c} _ {k}) \\log (1 - \\widehat {\\mathbf {S}} _ {k})\n$$\n", + "text_format": "latex", + "bbox": [ + 76, + 708, + 483, + 741 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathbf{c} \\in \\{0,1\\}^K$ is the one-hot category's label.", + "bbox": [ + 76, + 744, + 410, + 762 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Utilizing the MILloss, the PSM module skillfully identifies each proposal's category and instance. The module selects the proposal with the highest score, marked as S, for a specific object and identifies a bounding box enriched with semantic information.", + "bbox": [ + 75, + 762, + 468, + 838 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3. Positive and Negative Proposals Generator", + "text_level": 1, + "bbox": [ + 76, + 847, + 441, + 864 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To further refine the selection of more accurate bounding boxes, we employ PNPG based on $box_{psm}$ selected via", + "bbox": [ + 75, + 869, + 468, + 902 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "PSM. That consists of two components: PPG and NPG. The PPG is designed to generate a richer set of positive samples, enhancing bag's quality. Concurrently, the NPG is responsible for generating negative samples, which are crucial for assisting model training. These negative samples, including background samples for all objects and part samples for each, are crucial in resolving part issues and ensuring high-quality bounding box selection. The positive sample set $B^{+}$ produced by PPG and the negative sample set $\\mathcal{U}$ generated by NPG are utilized for training the subsequent PRM.", + "bbox": [ + 496, + 416, + 893, + 568 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Positive Proposals Generator (PPG). Within this phase, to implement adaptive sampling for the identified bounding box, we capitalize on the $box_{psm}$ derived from the PSM stage, coupled with the point distance penalty score $\\mathbf{S}_{dis}$ attributed to each proposal. To further elaborate, for each $box_{psm}$ (denoted as $b_x^*, b_y^*, b_w^*, b_h^*$ ) isolated during the PSM phase, its dimensions are meticulously recalibrated leveraging a scale factor $v$ and its associated within-category inclusion score $\\mathbf{S}_{dis}$ to generate an augmented set of positive proposals $(b_x, b_y, b_w, b_h)$ . The formulation is defined as follows:", + "bbox": [ + 496, + 571, + 892, + 734 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} b _ {w} = \\left(1 \\pm v / \\mathbf {S} _ {d i s}\\right) \\cdot b _ {w} ^ {*}, \\quad b _ {h} = \\left(1 \\pm v / \\mathbf {S} _ {d i s}\\right) \\cdot b _ {h} ^ {*}, \\\\ b _ {x} = b _ {x} ^ {*} \\pm \\left(b _ {w} - b _ {w} ^ {*}\\right) / 2, \\quad b _ {y} = b _ {y} ^ {*} \\pm \\left(b _ {h} - b _ {h} ^ {*}\\right) / 2. \\tag {4} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 537, + 736, + 890, + 770 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "These newly cultivated positive proposals are carefully integrated into the existing set $B_{i}$ to enhance the positive instances' pool. Such enhancements are pivotal in optimizing the training of the forthcoming PRM.", + "bbox": [ + 496, + 775, + 892, + 835 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Negative Proposals Generator(NPG). MIL-based selection within a single positive bag may overemphasize the background noise, leading to inadequate focus on the object. To solve this, we create a negative bag from the back-", + "bbox": [ + 496, + 839, + 893, + 902 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "3588", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 3 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [], + "code_body": "Algorithm 1 Positive and Negative Proposals Generation \nInput: $T_{neg1}, T_{neg2}, box_{psm}$ from PSM stage, image $I$ , positive bags $B^{+}$ . \nOutput: Positive proposal bags $B^{+}$ , Negative proposal set $\\mathcal{U}$ . \n1: // Step1: positive proposals sampling \n2: for $i \\in N, N$ is the number of objects in image $I$ do \n3: $B_{i}^{+} \\gets B_{i}, B_{i} \\in B$ ; \n4: $B_{i}^{+} = B_{i}^{+} \\bigcup PPG(\\text{box}_{psm}^{i})$ ; \n5: end for \n6: // Step2: background negative proposals sampling \n7: $\\mathcal{U} \\gets \\{\\}$ ; \n8: proposals $\\gets$ random_sampling(1) for each image $I$ ; \n9: $iou = IOU(proposals, B_{i})$ for each $B_{i} \\in B$ ; \n10: if $iou < T_{neg1}$ then \n11: $\\mathcal{U} = \\mathcal{U} \\bigcup$ proposals; \n12: end if \n13: // Step3: part negative proposals sampling \n14: for $i \\in N, N$ is the number of objects in image $I$ do \n15: proposals $\\gets$ part_neg_sampling( $box_{psm}^{i}$ ); \n16: $iou = IOU(proposals, box_{psm}^{i})$ ; \n17: if $iou < T_{neg2}$ then \n18: $\\mathcal{U} = \\mathcal{U} \\bigcup$ proposals; \n19: end if \n20: end for", + "bbox": [ + 76, + 90, + 468, + 429 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "ground proposals post-positive bag training, which helps MIL maximize the attention towards the object.", + "bbox": [ + 75, + 458, + 468, + 488 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Considering the image dimensions, we randomly sample proposals according to each image's width and height, for negative instance sampling. We assess the Intersection over Union (IoU) between these negatives and the positive sets, filtering out those below a threshold $T_{neg1}$ .", + "bbox": [ + 75, + 489, + 468, + 565 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Additionally, to rectify MIL localization errors, we enforce the sampling of smaller proposals with an IoU under a second threshold, $T_{\\text{neg2}}$ , from inside boxpsm based on its width and height, that is scored highest in PSM, as negative examples. These negative instances, partially capturing the object, drive the model to select high-quality bounding boxes that encompass the entire object. The PNPG is systematically elaborated upon in Algorithm1.", + "bbox": [ + 75, + 565, + 468, + 686 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.4. Proposals Refinement Module", + "text_level": 1, + "bbox": [ + 76, + 695, + 344, + 710 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In the PSM phase, we employ MIL to select high-quality proposals from bag $B^{+}$ . However, as shown in Fig. 2, the box $psm$ outcomes derived solely from a single-stage MIL are suboptimal and localized. Inspired by PCL [38], we consider refining the proposals in a second phase. However, in contrast to most WSOD methods which choose to continue refining using classification information in subsequent stages, we have established high-quality positive and negative bags, and further combined both classification and instance branches to introduce the PRM module to refine the proposals, aiming to obtain a high-quality bounding box.", + "bbox": [ + 75, + 718, + 468, + 883 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The PRM module, extending beyond the scope of PSM,", + "bbox": [ + 96, + 885, + 468, + 900 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "focuses on both selection and refinement. It combines positive instances from the PPG with the initial set, forming an enriched $B^{+}$ . Simultaneously, it incorporates the negative instance set $\\mathcal{U}$ from NPG, providing a comprehensive foundation for PRM. This integration leads to a restructured MIL loss in PRM, replacing the conventional CELoss with Focal Loss for positive instances. The modified positive loss function is as follows:", + "bbox": [ + 496, + 90, + 890, + 210 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {p o s} = \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\left\\langle \\mathbf {c} _ {i} ^ {\\mathrm {T}}, \\widehat {\\mathbf {S}} _ {i} \\right\\rangle \\cdot \\operatorname {F L} \\left(\\widehat {\\mathbf {S}} _ {i} ^ {*}, \\mathbf {c} _ {i}\\right). \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 566, + 212, + 890, + 252 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where FL is the focal loss [32], $\\widehat{\\mathbf{S}}_i^*$ and $\\widehat{\\mathbf{S}}_i$ represent the bag score predicted by PRM and PSM, respectively. $\\left\\langle \\mathbf{c}_i^{\\mathrm{T}},\\widehat{\\mathbf{S}}_i\\right\\rangle$ represents the inner product of the two vectors, meaning the predicted bag score of the ground-truth category.", + "bbox": [ + 496, + 265, + 890, + 333 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Enhancing background suppression, we use negative proposals and introduce a dedicated loss for these instances. Notably, these negative instances pass only through the classification branch for instance score computation, with their scores derived exclusively from classification. The specific formulation of this loss function is detailed below:", + "bbox": [ + 496, + 334, + 890, + 422 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\beta = \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\left\\langle \\mathbf {c} _ {i} ^ {\\mathrm {T}}, \\widehat {\\mathbf {S}} _ {i} \\right\\rangle , \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 617, + 426, + 890, + 467 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {n e g} = - \\frac {1}{| \\mathcal {U} |} \\sum_ {\\mathcal {U}} \\sum_ {k = 1} ^ {K} \\beta \\cdot \\left(\\left[ \\mathbf {S} _ {n e g} ^ {c l s} \\right] _ {k}\\right) ^ {2} \\log \\left(1 - \\left[ \\mathbf {S} _ {n e g} ^ {c l s} \\right] _ {k}\\right). \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 514, + 474, + 890, + 527 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The PRM loss consists of the MIL loss $\\mathcal{L}_{pos}$ for positive bags and negative loss $\\mathcal{L}_{neg}$ for negative samples, i.e.,", + "bbox": [ + 496, + 529, + 890, + 560 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {p r m} = \\alpha \\mathcal {L} _ {p o s} + (1 - \\alpha) \\mathcal {L} _ {n e g}, \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 586, + 571, + 890, + 588 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\alpha = 0.25$ by default.", + "bbox": [ + 498, + 599, + 683, + 613 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Box Mining Strategy. MIL's preference for segments with more foreground presence and SAM's tendency to capture only parts of an object often bring to final bounding boxes, $box_{prim}$ , the 'local' issue of MIL inadequately covers the instances. To improve the bounding box quality, we introduce a box mining strategy that adaptively expands $box_{select}$ from proposal selection in PRM, by merging it with the original proposals filter, aiming to address MIL's localization challenges.", + "bbox": [ + 496, + 613, + 890, + 750 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The Box Mining Strategy (BMS) consists of two primary components: (i) We select the top $k$ proposals from the positive proposal bag $B^{+}$ , to create a set $G$ . We evaluate the proposals in $G$ against box_select based on IoU and size, using a threshold $T_{min1}$ . Proposals larger than box_select and with an IoU above $T_{min1}$ undergo dynamic expansion through IoU consideration, which allows for the adaptive integration with box_select. That mitigates the 'local' issue and maintains the bounding box's consistency to the object's true boundaries. (ii) Frequently, issues related to lo", + "bbox": [ + 496, + 750, + 890, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "3589", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "cality can lead to an exceedingly low IoU between proposals and box_select. Nonetheless, the ground truth box can fully encompass the box_part. Therefore, when component (i) conditions are unmet, if a proposal can entirely encapsulate box_select, we reset the threshold $T_{min2}$ . Proposals surpassing this threshold adaptively merge with box_select to generate the final box_prm, used to yield Mask_prm. These two components collectively form our BMS strategy. A detailed procedure of this approach will be delineated in Algorithm2 of the supplementary materials.", + "bbox": [ + 75, + 90, + 472, + 241 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Loss Function. After acquiring the final supervision masks, $Mask_{prm}$ and the filtered $Mask_{sam}$ in Multi-mask Proposals Supervision(MPS) in Sec. 7 of supplementary, we use them together to guide the dynamic segmentation branch. To comprehensively train SAPNet, we integrate the loss functions from the PSM and PRM, culminating in the formulation of the total loss for our model, denoted as $L_{total}$ . The aggregate loss function, $L_{total}$ can be articulated as:", + "bbox": [ + 75, + 242, + 472, + 375 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {t o t a l}} = \\mathcal {L} _ {\\text {m a s k}} + \\mathcal {L} _ {\\text {c l s}} + \\lambda \\cdot \\mathcal {L} _ {\\text {p s m}} + \\mathcal {L} _ {\\text {p r m}} \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 135, + 378, + 468, + 393 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where, $\\mathcal{L}_{Dice}$ is the Dice Loss [35], $\\mathcal{L}_{cls}$ is the Focal Loss[32], and $\\lambda$ is set as 0.25.", + "bbox": [ + 75, + 393, + 468, + 425 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4. Experiment", + "text_level": 1, + "bbox": [ + 76, + 439, + 202, + 455 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1. Experimental Settings", + "text_level": 1, + "bbox": [ + 76, + 464, + 284, + 481 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Datasets. We use the publicly available MS COCO[33] and VOC2012SBD [13] datasets for experiments. COCO17 has 118k training and 5k validation images with 80 common object categories. VOC consists of 20 categories and contains 10,582 images for model training and 1,449 validation images for evaluation.", + "bbox": [ + 75, + 487, + 468, + 578 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Evaluation Metric. We use mean average precision mAP@[.5,.95] for the MS-COCO. The $\\{AP, AP_{50}, AP_{75}, AP_{Small}, AP_{Middle}, AP_{Large}\\}$ is reported for MS-COCO and for VOC12SBD segmentation, and we report $AP_{25,50,75}$ . The $mIoU_{box}$ is the average IoU between predicted pseudo-boxes and GT-boxes in the training set. It measures SAPNet's ability to select mask proposals without using the segmentation branch.", + "bbox": [ + 75, + 579, + 468, + 699 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Implementation Details. In our study, we employed the Stochastic Gradient Descent (SGD) optimizer, as detailed in [6]. Our experiments were conducted using the mmdetection toolbox [7], following standard training protocols for each dataset. We used the ResNet architecture [15], pretrained on ImageNet [36], as the backbone. For COCO, batch size was set at four images per GPU across eight GPUs, and for VOC2012, it was four GPUs. More details of the experiment are in Sec. 8 of the supplementary.", + "bbox": [ + 75, + 700, + 468, + 838 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Experimental Comparisons", + "text_level": 1, + "bbox": [ + 76, + 845, + 326, + 863 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Tab. 1 shows the comparison results between our method and previous SOTA approaches [11, 16, 34, 40, 42] on", + "bbox": [ + 75, + 869, + 468, + 901 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "COCO. In our experiments, we provide SAM with both the labeled points and the annotations generated by the point annotation enhancer [9]. SAM then utilizes these inputs to generate subsequent mask proposals for selection and supervision. For fair comparison, we design two baselines: the top-1 scored mask from SAM and MIL-selected SAM mask proposals are used as SOLOv2 supervision, respectively. Tab. 1 shows our method substantially surpasses these baselines in performance.", + "bbox": [ + 496, + 90, + 890, + 226 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Comparison with point-annotated methods. Our approach achieves a 31.2 AP performance with a ResNet-50 backbone, surpassing all previous point-annotated methods, including BESTIE on HRNet-48 and AttnShift on Vit-B. Our model exhibits significant improvements under a 1x training schedule, with a 13.5 AP increase when compared to the previous SOTA method, BESTIE. Furthermore, under a 3x training schedule, SAPNet outperforms AttnShift, which relies on large model training, with 13.4 AP, improvements. Importantly, our method is trained end-to-end without needing post-processing, achieving SOTA performance in point-annotated instance segmentation.", + "bbox": [ + 496, + 227, + 892, + 409 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Comparison with other annotation-based methods. Our SAPNet has significantly elevated point annotation, regardless of point annotation's limitations in annotation time and quality compared to box annotation. Utilizing a ResNet-101 backbone and a 3x training schedule, SAPNet surpasses most box-annotated instance segmentation methods, achieving a 1.4 AP improvement over BoxInst. Moreover, SAPNet's segmentation performance nearly matches the mask-annotated methods, effectively bridging the gap between point-annotated and these techniques.", + "bbox": [ + 496, + 410, + 890, + 560 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Segmentation performance on VOC2012SBD. Tab. 2 compares segmentation methods under different supervisions on the VOC2012 dataset. SAPNet reports an enhancement of 7.7 $AP$ over the AttnShift approach, evidencing a notable advancement in performance. Thereby, it significantly outstrips image-level supervised segmentation methods. Additionally, SAPNet surpasses box-annotated segmentation methods, such as BoxInst by 3.4 $AP_{50}$ and DiscoBox by 32.6 $AP_{50}$ . Further, our point-prompted method achieves $92.3\\%$ of the Mask-R-CNN.", + "bbox": [ + 496, + 561, + 892, + 712 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3. Ablation Studies", + "text_level": 1, + "bbox": [ + 500, + 724, + 663, + 739 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "More experiments have been conducted on COCO to further analyze SAPNet's effectiveness and robustness.", + "bbox": [ + 496, + 748, + 890, + 777 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Training Stage in SAPNet. The ablation study of the training stage is given in Tab. 3. We trained solov2 using the top-1 scored mask provided by SAM and compared it to the two training strategies of SAPNet. In the two-stage approach, the segmentation branch and multiple-mask supervision of SAPNet are removed. Instead, we use the selected mask to train a standalone instance segmentation model, as described by [42]. The end-to-end training method corre", + "bbox": [ + 496, + 779, + 890, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "3590", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/77de95328c10c81a448c9b6cb5aef62fa56ccddd27290fb275cf47e1e1846cd0.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodAnn.Backbonesched.Arch.mAP\\( \\mathrm{mAP}_{50} \\)\\( \\mathrm{mAP}_{75} \\)\\( \\mathrm{mAP}_{\\mathrm{s}} \\)\\( \\mathrm{mAP}_{\\mathrm{m}} \\)\\( \\mathrm{mAP}_{1} \\)
Fully-supervised instance segmentation models.
Mask R-CNN [16]\\( \\mathcal{M} \\)ResNet-501xMask R-CNN34.656.536.618.337.447.2
YOLACT-700 [5]\\( \\mathcal{M} \\)ResNet-1014.5xYOLACT31.254.032.812.133.347.
PolarMask [16]\\( \\mathcal{M} \\)ResNet-1012xPolarMask32.153.733.114.733.845.3
SOLOv2 [42]\\( \\mathcal{M} \\)ResNet-501xSOLOv234.854.936.913.437.853.7
CondInst [40]\\( \\mathcal{M} \\)ResNet-501xCondInst35.356.437.418.039.450.4
SwinMR [34]\\( \\mathcal{M} \\)Swin-S50eSwinMR43.267.046.124.846.362.1
Mask2Former [11]\\( \\mathcal{M} \\)Swin-S50eMask2Former46.169.452.825.449.768.5
Weakly-supervised instance segmentation models.
IRNet [45]\\( \\mathcal{I} \\)ResNet-501xMask R-CNN6.111.75.5---
BESTIE [21]\\( \\mathcal{I} \\)HRNet-481xMask R-CNN14.328.013.2---
BBTP [18]\\( \\mathcal{B} \\)ResNet-1011xMask R-CNN21.145.517.211.222.029.8
BoxInst [39]\\( \\mathcal{B} \\)ResNet-1013xCondInst33.256.533.616.235.345.1
DiscoBox [23]\\( \\mathcal{B} \\)ResNet-503xSOLOv232.053.632.611.733.748.4
Boxlevelset [27]\\( \\mathcal{B} \\)ResNet-1013xSOLOv233.456.834.115.236.846.8
WISE-Net [24]\\( \\mathcal{P} \\)ResNet-501xMask R-CNN7.818.28.8---
BESTIE†[21]\\( \\mathcal{P} \\)HRNet-481xMask R-CNN17.734.016.4---
AttnShift [31]\\( \\mathcal{P} \\)Vit-B50eMask R-CNN21.243.519.4---
SAM-SOLOv2\\( \\mathcal{P} \\)ResNet-501xSOLOv224.641.925.39.328.638.1
MIL-SOLOv2\\( \\mathcal{P} \\)ResNet-501xSOLOv226.847.726.811.231.540.4
SAPNet(ours)\\( \\mathcal{P} \\)ResNet-501xSOLOv231.251.832.312.635.147.8
SAPNet(ours)*\\( \\mathcal{P} \\)ResNet-1013xSOLOv234.656.036.615.739.552.1
", + "bbox": [ + 81, + 89, + 890, + 458 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/5d2612b00f4dd8cd1a46ab6ac8af5f371b78baf12387c6a891f8a9122dda2381.jpg", + "table_caption": [ + "Table 1. Mask annotation(M), image annotation(I), box annotation(B) and point annotation(P) performance on COCO-17 val. 'Ann.' is the type of the annotation and 'sched.' means schedule. * is the multi-scale augment training for re-training segmentation methods, and other experiments are on single-scale training. SwinMR is Swin-Transformer-Mask R-CNN. SwinMR and Mask2Former use multi-scale data augment strategies for SOTA." + ], + "table_footnote": [ + "Table 2. Instance segmentation performance on the VOC2012 test set. $\\dagger$ indicates applying MRCNN refinement." + ], + "table_body": "
MethodSup.BackboneAP25AP50AP75
Mask R-CNN [16]MR-5078.068.843.3
Mask R-CNN [16]MR-10179.670.245.3
BoxInst [39]BR-101-61.437.0
DiscoBoxBR-10172.862.237.5
BESTIE [21]IHRNet53.541.724.2
IRNet [45]IR-50-46.723.5
BESTIE† [21]IHRNet61.251.026.6
WISE-Net [24]PR-5053.543.025.9
BESTIE [21]PHRNet58.646.726.3
BESTIE† [21]PHRNet66.456.130.2
Attnshift [31]PVit-S68.354.425.4
Attnshift† [31]PVit-S70.357.130.4
SAPNet(ours)PR-10176.564.858.7
", + "bbox": [ + 81, + 523, + 468, + 715 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "sponds to the architecture illustrated in Fig. 2. Our findings indicate that our method is more competitive than directly employing SAM (31.2 AP vs 24.6 AP), and the visualization of Fig. 4 shows us this enhancement. Moreover, the end-to-end training strategy boasts a more elegant model structure and outperforms the two-stage approach in overall efficiency (31.2 AP vs 30.18 AP).", + "bbox": [ + 75, + 746, + 468, + 851 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Effect of Each Component. Given the limited performance of SAM-top1, we opted for the single-MIL as our baseline. With a preliminary selection using MIL1, we", + "bbox": [ + 75, + 854, + 470, + 900 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/7e58933038a2be704e32eebd534d169562eede1cfd41805e23e5e8e4dac96f03.jpg", + "image_caption": [ + "Figure 4. The comparative visualization between SAM-top1 and SAPNet is presented, showcasing SAM's segmentation outcomes in green masks and our results in yellow. The orange and red bounding boxes highlight the respective mask boundaries." + ], + "image_footnote": [], + "bbox": [ + 501, + 523, + 678, + 627 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/f25761820fcc09c4e886e4b8e486fab5adb98d8eec758148b0187d4ca6bdbdbb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 679, + 523, + 885, + 627 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/df86f796ed7d155ee948920c25991a164612e2b7ee31122527446c234b839c26.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
train stage on cocosched.APAP50AP75
SAM-top11x24.641.925.3
Two stage1x30.249.831.5
End to end1x31.251.832.3
", + "bbox": [ + 504, + 694, + 893, + 763 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 3. The experimental comparisons of segmenters in COCO dataset, SAM-top1 is the highest scoring mask generated by SAM.", + "bbox": [ + 498, + 768, + 893, + 797 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "have achieved a segmentation performance of 26.8 AP. i) Point Distance Guidance. We updated the proposal scores from the existing MIL by integrating the PDG module into the foundational MIL selection. This approach successfully segments adjacent objects of the same category, improving the segmentation performance by 0.7 points (27.5 vs 26.8).", + "bbox": [ + 496, + 809, + 893, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "3591", + "bbox": [ + 482, + 944, + 513, + 957 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/c45def14b9991273165f483f0ae2d132797f34dbbbe2091457eb5b656859c6e5.jpg", + "table_caption": [], + "table_footnote": [ + "Table 4. The effect of each component in SAPNet: proposal selection module(MIL1), point distance guidance(PDG), positive and negative proposals generator(PNPG), proposal selection module(MIL2), box mining strategy(BMS), and Multi-mask Proposals Supervision(MPS) in Sec. 7 of supplementary." + ], + "table_body": "
mil1PDGmil2PNPGBMSMPSmAP
26.8
27.5
27.7
29.7
30.8
31.2
", + "bbox": [ + 83, + 88, + 464, + 200 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "ii) MIL2. Building on the previous step, we incorporate a second MIL selection module to refine the initially selected boxes, resulting in a performance increment of 0.2 points. iii) PNPG. For MIL2, we devised the positive-negative sample sets, aiming to enhance the input quality for the PRM module and use the negative samples to suppress background. This adjustment leads to a segmentation performance boost of 2 points (29.7 vs 27.7). iv) BMS. Within the PRM, we refine the selected boxes using BMS, pushing the segmentation performance up by 1.1 points (30.8 vs 29.7). v) MPS. Utilizing MPS for segmentation branch supervision yields a 0.4-point performance improvement.", + "bbox": [ + 75, + 277, + 467, + 460 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Threshold of BMS. For point refinement, there are two constraints (described in Sec. 3.4). $T_{min1}$ and $T_{min2}$ are thresholds of the Box Mining Strategy. In Tab. 5, it shows that the two constraints together to obtain performance gain. After multiple experiments, we have found that there is a significant performance improvement when $T_{min1}$ and $T_{min2}$ are set to 0.6 and 0.3, respectively.", + "bbox": [ + 75, + 460, + 467, + 566 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Components of PNPG. Tab. 6 presents the results of a dissected ablation study on the Positive and Negative Proposals Generator(PNPG), illustrating the respective impacts of the positive and negative examples on the model's performance. It is evident that the construction of negative examples plays a significant role in enhancing model efficacy. Furthermore, the beneficial effects of both positive and negative examples are observed to be cumulative.", + "bbox": [ + 75, + 566, + 467, + 686 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Performance Analysis. As presented in Tab. 7, we conducted a statistical analysis to validate SAPNet's capability to address 'local' issue and compare the outcomes selected by the single-MIL with those obtained by SAPNet in the absence of segmentation branch integration. Specifically, the part problem generated by the single-MIL, where MIL is inclined to select proposals with a higher proportion of foreground, is exemplified in Fig. 6 of supplementary. On this premise, we initially establish an evaluative criterion $R_{v} = \\frac{area_{mask}}{area_{box}}$ , which is the ratio of the mask area to the bounding box area. Subsequently, we compute $R_{v_i}$ for each proposal within the proposal bag corresponding to every instance across the entire COCO dataset and select the maximum $R_{v_{max}}$ to compute the mean value over the dataset,", + "bbox": [ + 75, + 688, + 467, + 901 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/c659e505dcf0fe439a1de4ce93da2f6215f1b535634a4135eb8ab816a8108c9a.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Tmin1Tmin2APAP50AP75APsAPmAPl
0.50.330.951.332.012.234.747.4
0.50.430.751.231.811.934.747.1
0.60.331.251.832.312.635.147.8
0.60.430.851.132.012.134.747.3
0.70.331.051.532.212.634.947.3
0.70.430.751.131.912.034.647.2
", + "bbox": [ + 504, + 89, + 893, + 188 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/0c07e2649a0abdc31fdee7927331295c5118d42be9011e3ea7d21e8a13654c7f.jpg", + "table_caption": [ + "Table 5. Constraints in box mining strategy." + ], + "table_footnote": [], + "table_body": "
PNPGAPAP50AP75
PPGNPG
29.349.730.0
29.850.530.8
30.751.231.7
31.251.832.3
", + "bbox": [ + 555, + 210, + 838, + 308 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/6eb306980562b8b728013e500658b038230b28aa17f054bb406f16334ad9699f.jpg", + "table_caption": [ + "Table 6. Meticulous ablation experiments in PNPG" + ], + "table_footnote": [], + "table_body": "
MethodGapmIoUbox
Single-MIL0.19963.8
SAPNet0.13169.1
", + "bbox": [ + 576, + 332, + 818, + 383 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 7. Experimental analysis with part problem.", + "bbox": [ + 545, + 388, + 843, + 402 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "which is then designated as the threshold $T_{rv}$ . Ultimately, we identify the ground truth $R_{vgt}$ and objects where $R_{vmax}$ exceeds $T_{rv}$ and calculates the discrepancy between $R_{v}$ values selected by single-MIL and SAPNet. The description is as follows:", + "bbox": [ + 498, + 414, + 890, + 489 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\nG a p _ {s i n g l e} = R v _ {s i n g l e} - R v _ {g t}, \\quad G a p _ {o u r} = R v _ {o u r} - R v _ {g t}. \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 509, + 498, + 890, + 526 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Tab. 7 shows that the proposed SAPNet mitigates the locality issue faced by the single-MIL. Furthermore, the boxes selected via SAPNet exhibit a substantially higher IoU with GT than those selected by the single-MIL.", + "bbox": [ + 498, + 530, + 890, + 590 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 607, + 617, + 622 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this paper, we propose SAPNet, an innovative end-to-end point-prompted instance segmentation framework. SAPNet transforms point annotations into category-agnostic mask proposals and employs dual selection branches to elect the most semantic mask for each object, guiding the segmentation process. To address challenges such as indistinguishable adjacent objects of the same class and MIL's locality bias, we integrate PDG and PNPG, complemented by a Box Mining Strategy for enhanced proposal refinement. SAPNet uniquely merges segmentation and selection branches under multi-mask supervision, significantly enhancing its segmentation performance. Extensive experimental comparisons on VOC and COCO datasets validate the SAPNet's effectiveness in point-prompted instance segmentation.", + "bbox": [ + 496, + 630, + 890, + 801 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6. Acknowledgements", + "text_level": 1, + "bbox": [ + 500, + 820, + 687, + 835 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This work was supported in part by the Youth Innovation Promotion Association CAS, the National Natural Science Foundation of China (NSFC) under Grant No. 61836012, 61771447 and 62272438, and the Strategic Priority Research Program of the Chinese Academy of Sciences under Grant No.XDA27000000.", + "bbox": [ + 498, + 839, + 890, + 898 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "3592", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 174, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Jiwoon Ahn, Sunghyun Cho, and Suha Kwak. Weakly supervised learning of instance segmentation with inter-pixel relations. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2209-2218, 2019. 2", + "[2] Pablo Andres Arbeláez, Jordi Pont-Tuset, and Jonathan T. Barron et al. Multiscale combinatorial grouping. In CVPR, 2014. 2", + "[3] Aditya Arun, CV Jawahar, and M Pawan Kumar. Weakly supervised instance segmentation by learning annotation consistent instances. In European Conference on Computer Vision, pages 254-270. Springer, 2020. 2", + "[4] Hakan Bilen and Andrea Vedaldi. Weakly supervised deep detection networks. In CVPR, 2016. 2, 3", + "[5] Daniel Bolya, Chong Zhou, Fanyi Xiao, and Yong Jae Lee. Yolact: Real-time instance segmentation. In Proceedings of the IEEE/CVF international conference on computer vision, pages 9157-9166, 2019. 1, 7", + "[6] Léon Bottou. Stochastic gradient descent tricks. In Neural Networks: Tricks of the Trade: Second Edition, pages 421-436. Springer, 2012. 6", + "[7] Kai Chen, Jiaqi Wang, Jiangmiao Pang, Yuhang Cao, Yu Xiong, Xiaoxiao Li, Shuyang Sun, Wansen Feng, Ziwei Liu, Jiarui Xu, et al. Mmdetection: Open mmlab detection toolbox and benchmark. arXiv preprint arXiv:1906.07155, 2019. https://github.com/open-mmlab/mmdetection.6", + "[8] Keyan Chen, Chenyang Liu, Hao Chen, Haotian Zhang, Wenyuan Li, Zhengxia Zou, and Zhenwei Shi. Rsprompter: Learning to prompt for remote sensing instance segmentation based on visual foundation model. arXiv preprint arXiv:2306.16269, 2023. 3", + "[9] Pengfei Chen, Xuehui Yu, Xumeng Han, Najmul Hassan, Kai Wang, Jiachen Li, Jian Zhao, Humphrey Shi, Zhenjun Han, and Qixiang Ye. Point-to-box network for accurate object detection via single point supervision. In European Conference on Computer Vision, pages 51-67. Springer, 2022. 2, 3, 6", + "[10] Tianle Chen, Zheda Mai, Ruiwen Li, and Wei-lun Chao. Segment anything model (sam) enhanced pseudo labels for weakly supervised semantic segmentation. arXiv preprint arXiv:2305.05803, 2023. 3", + "[11] Bowen Cheng, Ishan Misra, Alexander G. Schwing, Alexander Kirillov, and Rohit Girdhar. Masked-attention mask transformer for universal image segmentation. In CVPR, 2022. 1, 6, 7", + "[12] Bowen Cheng, Omkar Parkhi, and Alexander Kirillov. Pointly-supervised instance segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2617-2626, 2022. 2", + "[13] Mark Everingham, Luc Van Gool, and Christopher K. I. Williams et al. The Pascal visual object classes (VOC) challenge. IJCV, 2010. http://host.robots.ox.ac.uk/pascal/VOC/.6", + "[14] Junsong Fan, Zhaoxiang Zhang, and Tieniu Tan. Pointly-" + ], + "bbox": [ + 78, + 114, + 470, + 901 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "supervised panoptic segmentation. In European Conference on Computer Vision, pages 319-336. Springer, 2022. 1", + "[15] Kaiming He, Xiangyu Zhang, and Shaoqing Ren et al. Deep residual learning for image recognition. In CVPR, 2016. 6", + "[16] Kaiming He, Georgia Gkioxari, and Piotr Dólar et al. Mask R-CNN. In ICCV, 2017. 1, 6, 7", + "[17] Sheng He, Rina Bao, Jingpeng Li, P Ellen Grant, and Yangming Ou. Accuracy of segment-anything model (sam) in medical image segmentation tasks. arXiv preprint arXiv:2304.09324, 2023. 3", + "[18] Cheng-Chun Hsu, Kuang-Jui Hsu, Chung-Chi Tsai, Yen-Yu Lin, and Yung-Yu Chuang. Weakly supervised instance segmentation using the bounding box tightness prior. In NeurIPS, 2019. 2, 7", + "[19] Peng-Tao Jiang and Yuqi Yang. Segment anything is a good pseudo-label generator for weakly supervised semantic segmentation. arXiv preprint arXiv:2305.01275, 2023. 3", + "[20] Lei Ke, Mingqiao Ye, Martin Danelljan, Yifan Liu, Yu-Wing Tai, Chi-Keung Tang, and Fisher Yu. Segment anything in high quality. arXiv preprint arXiv:2306.01567, 2023. 1, 3", + "[21] Beomyoung Kim, Youngjoon Yoo, Chaeun Rhee, and Junmo Kim. Beyond semantic to instance segmentation: Weakly-supervised instance segmentation via semantic knowledge transfer and self-refinement. In CVPR, 2022. 1, 2, 7", + "[22] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. arXiv preprint arXiv:2304.02643, 2023. https://segment-anything.com/.1,2,3", + "[23] Shiyi Lan, Zhiding Yu, Christopher Choy, Subhashree Radhakrishnan, Guilin Liu, Yuke Zhu, Larry S Davis, and Anima Anandkumar. Discobox: Weakly supervised instance segmentation and semantic correspondence from box supervision. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3406-3416, 2021. 1, 2, 7", + "[24] Issam H. Laradji, Negar Rostamzadeh, Pedro O. Pinheiro, David Vázquez, and Mark Schmidt. Proposal-based instance segmentation with point supervision. In ICIP, 2020. 2, 7", + "[25] Jungbeom Lee, Jihun Yi, Chaehun Shin, and Sungroh Yoon. Bbam: Bounding box attribution map for weakly supervised semantic and instance segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2643-2652, 2021. 2", + "[26] Feng Li, Hao Zhang, Peize Sun, Xueyan Zou, Shilong Liu, Jianwei Yang, Chunyuan Li, Lei Zhang, and Jianfeng Gao. Semantic-sam: Segment and recognize anything at any granularity. arXiv preprint arXiv:2307.04767, 2023. 3", + "[27] Wentong Li, Wenyu Liu, Jianke Zhu, Miaomiao Cui, XianSheng Hua, and Lei Zhang. Box-supervised instance segmentation with level set evolution. In European conference on computer vision, pages 1-18. Springer, 2022. 1, 7", + "[28] Wentong Li, Yuqian Yuan, Song Wang, Jianke Zhu, Jianshu Li, Jian Liu, and Lei Zhang. Point2mask: Point-supervised panoptic segmentation via optimal transport. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 572-581, 2023. 1, 2" + ], + "bbox": [ + 503, + 92, + 890, + 901 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "3593", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[29] Wentong Li, Wenyu Liu, Jianke Zhu, Miaomiao Cui, Risheng Yu Xiansheng Hua, and Lei Zhang. Box2mask: Box-supervised instance segmentation via level-set evolution. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2024. 2", + "[30] Wentong Li, Yuqian Yuan, Song Wang, Wenyu Liu, Dongqi Tang, Jianke Zhu, Lei Zhang, et al. Label-efficient segmentation via affinity propagation. Advances in Neural Information Processing Systems, 36, 2024. 2", + "[31] Mingxiang Liao, Zonghao Guo, , and Yuze Wang et al. Attentionshift: Iteratively estimated part-based attention map for pointly supervised instance segmentation. In CVPR, 2023. 2, 7", + "[32] Tsung-Yi Lin, Priya Goyal, and Ross B. Girshick et al. Focal loss for dense object detection. In ICCV, 2017. 5, 6", + "[33] Tsung-Yi Lin, Michael Maire, and Serge et al. Belongie. Microsoft coco: Common objects in context. In ECCV, 2014. https://cocodataset.org/. 6", + "[34] Ze Liu, Yutong Lin, and Yue Cao et al. Swin transformer: Hierarchical vision transformer using shifted windows. In ICCV, 2021. 6, 7", + "[35] Fausto Milletari, Nassir Navab, and Seyed-Ahmad Ahmadi. V-net: Fully convolutional neural networks for volumetric medical image segmentation. In 2016 fourth international conference on 3D vision (3DV), pages 565-571. IEEE, 2016. 6", + "[36] Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, et al. Imagenet large scale visual recognition challenge. International journal of computer vision, 115:211-252, 2015. 6", + "[37] Peng Tang and Xinggang Wang et al. Multiple instance detection network with online instance classifier refinement. In CVPR, 2017. 3", + "[38] Peng Tang, Xinggang Wang, and Song Bai et al. PCL: proposal cluster learning for weakly supervised object detection. IEEE TPAMI, 2020. 5", + "[39] Zhi Tian, Chunhua Shen, Xinlong Wang, and Hao Chen. Boxinst: High-performance instance segmentation with box annotations. In CVPR, 2021. 1, 2, 7", + "[40] Zhi Tian, Bowen Zhang, Hao Chen, and Chunhua Shen. Instance and panoptic segmentation using conditional convolutions. IEEE TPAMI, 2023. 1, 2, 6, 7", + "[41] Xinlong Wang, Tao Kong, Chunhua Shen, Yuning Jiang, and Lei Li. SOLO: segmenting objects by locations. In ECCV, 2020.", + "[42] Xinlong Wang, Rufeng Zhang, Tao Kong, Lei Li, and Chunhua Shen. Solov2: Dynamic and fast instance segmentation. Proc. Advances in Neural Information Processing Systems (NeurIPS), 2020. https://github.com/WXinlong/SOLO.1, 2, 3, 6, 7", + "[43] Jinyu Yang, Mingqi Gao, Zhe Li, Shang Gao, Fangjing Wang, and Feng Zheng. Track anything: Segment anything meets videos. arXiv preprint arXiv:2304.11968, 2023. 3", + "[44] Xu Zhao, Wenchao Ding, Yongqi An, Yinglong Du, Tao Yu, Min Li, Ming Tang, and Jinqiao Wang. Fast segment anything. arXiv preprint arXiv:2306.12156, 2023. 1, 3" + ], + "bbox": [ + 78, + 90, + 468, + 900 + ], + "page_idx": 9 + }, + { + "type": "ref_text", + "text": "[45] Yanning Zhou, Hao Chen, Jiaqi Xu, Qi Dou, and Pheng-Ann Heng. Irnet: Instance relation network for overlapping cervical cell segmentation. In MICCAI, 2019. 1, 7", + "bbox": [ + 501, + 90, + 890, + 133 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "3594", + "bbox": [ + 482, + 945, + 514, + 955 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2024/Semantic-aware SAM for Point-Prompted Instance Segmentation/98ef3dac-add6-4e97-bd9e-baeff12acffa_model.json b/2024/Semantic-aware SAM for Point-Prompted Instance Segmentation/98ef3dac-add6-4e97-bd9e-baeff12acffa_model.json new file mode 100644 index 0000000000000000000000000000000000000000..197c7cd8b96f2d31c82034ebf7f7a438679c18fd --- /dev/null +++ b/2024/Semantic-aware SAM for Point-Prompted Instance Segmentation/98ef3dac-add6-4e97-bd9e-baeff12acffa_model.json @@ -0,0 +1,2057 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.808, + 0.045 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.155, + 0.131, + 0.816, + 0.154 + ], + "angle": 0, + "content": "Semantic-aware SAM for Point-Prompted Instance Segmentation" + }, + { + "type": "text", + "bbox": [ + 0.244, + 0.18, + 0.724, + 0.217 + ], + "angle": 0, + "content": "Zhaoyang Wei\\(^{1*}\\), Pengfei Chen\\(^{1*}\\), Xuehui Yu\\(^{1*}\\), Guorong Li\\(^{1}\\), Jianbin Jiao\\(^{1}\\), Zhenjun Han\\(^{1\\dagger}\\)" + }, + { + "type": "text", + "bbox": [ + 0.328, + 0.218, + 0.644, + 0.234 + ], + "angle": 0, + "content": "1University of Chinese Academy of Sciences(UCAS)" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.269, + 0.314, + 0.285 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.301, + 0.474, + 0.695 + ], + "angle": 0, + "content": "Single-point annotation in visual tasks, with the goal of minimizing labelling costs, is becoming increasingly prominent in research. Recently, visual foundation models, such as Segment Anything (SAM), have gained widespread usage due to their robust zero-shot capabilities and exceptional annotation performance. However, SAM's class-agnostic output and high confidence in local segmentation introduce semantic ambiguity, posing a challenge for precise category-specific segmentation. In this paper, we introduce a cost-effective category-specific segmenter using SAM. To tackle this challenge, we have devised a Semantic-Aware Instance Segmentation Network (SAPNet) that integrates Multiple Instance Learning (MIL) with matching capability and SAM with point prompts. SAPNet strategically selects the most representative mask proposals generated by SAM to supervise segmentation, with a specific focus on object category information. Moreover, we introduce the Point Distance Guidance and Box Mining Strategy to mitigate inherent challenges: group and local issues in weakly supervised segmentation. These strategies serve to further enhance the overall segmentation performance. The experimental results on Pascal VOC and COCO demonstrate the promising performance of our proposed SAPNet, emphasizing its semantic matching capabilities and its potential to advance point-prompted instance segmentation. The code is available at https://github.com/zhaoyangwei123/SAPNet." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.721, + 0.21, + 0.737 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.746, + 0.47, + 0.868 + ], + "angle": 0, + "content": "Instance segmentation seeks to discern pixel-level labels for both instances of interest and their semantic content in images, a crucial function in domains like autonomous driving, image editing, and human-computer interaction. Despite impressive results demonstrated by various studies [5, 11, 16, 40-42], the majority of these high-performing methods are trained in a fully supervised manner and heavily dependent on detailed pixel-level mask annotations," + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.266, + 0.905, + 0.421 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.428, + 0.895, + 0.568 + ], + "angle": 0, + "content": "Figure 1. Three Challenges Brought by SAM and single-MIL. Orange dash box illustrates that semantic ambiguity in SAM-generated masks, where it erroneously assigns higher scores to non-object categories like clothes, despite the person being our desired target. Green dash box depicts a comparison between mask proposals using single-MIL and SAPNet. It illustrates two primary challenges: 'group', where segmentation encounters difficulties in isolating individual targets among adjacent objects of the same category, and 'local', where MIL favors foreground-dominant regions, resulting in overlooked local details." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.58, + 0.892, + 0.656 + ], + "angle": 0, + "content": "thereby incurring significant labeling costs. To address this challenge, researchers are increasingly focusing on weakly supervised instance segmentation, leveraging cost-effective supervision methods, such as bounding boxes [23, 27, 39], points [14, 28], and image-level labels [21, 45]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.657, + 0.893, + 0.838 + ], + "angle": 0, + "content": "Recently, visual foundation models, such as Segment Anything (SAM)[22], have been widely employed by researchers for their exceptional generalization capabilities and impressive annotation performance. Numerous studies based on SAM, such as [20, 44] have emerged, building upon the foundations of SAM to further enhance its generalization capabilities and efficiency. However, these efforts have predominantly focused on improving the annotation performance of SAM. One limitation arises from SAM's lack of classification ability, resulting in class-agnostic segmentation results that fail to accurately segment specific categories as desired." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.84, + 0.894, + 0.901 + ], + "angle": 0, + "content": "To tackle the inherent semantic ambiguity in SAM and achieve specific-category segmentation, we propose integrating weak annotations with SAM, employing point annotations as prompts to imbue semantic information into" + }, + { + "type": "page_footnote", + "bbox": [ + 0.096, + 0.875, + 0.21, + 0.887 + ], + "angle": 0, + "content": "* Equal contribution." + }, + { + "type": "page_footnote", + "bbox": [ + 0.096, + 0.888, + 0.345, + 0.9 + ], + "angle": 0, + "content": "† Corresponding authors. (hanzhj@ucas.ac.cn)" + }, + { + "type": "list", + "bbox": [ + 0.096, + 0.875, + 0.345, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "3585" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.288 + ], + "angle": 0, + "content": "SAM's outputs. A straightforward approach involves leveraging SAM's intrinsic scoring mechanism, selecting the top-scoring mask as the corresponding label for each category. However, when annotating object points are fed into the SAM, its category-agnostic characteristic tends to assign higher scores to parts of the object, resulting in generated mask annotations that fail to encompass the object as a whole. In Fig. 1 orange dashed box, we aim to obtain the 'person' mask annotation, but SAM predicts the proposals of 'clothes', 'clothes+trousers' and 'person'. Relying solely on the score SAM provides is insufficient, as the highest score corresponds to 'clothes' (col-2), which does not meet our specific needs." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.29, + 0.473, + 0.668 + ], + "angle": 0, + "content": "To address this challenge, we have proposed SAPNet, a semantically-aware instance segmentation network designed for high-quality, end-to-end segmentation. In this study, we design a proposal selection module (PSM) using the Multiple Instance Learning (MIL) paradigm to choose proposals that align closely with the specified semantic label. However, the MIL-based method relies on the classification score, often leading to group and local predictions [4, 21, 24]. In Fig. 1 green dashed box, the group issue is evident, where two objects of the same category are often both included when they are in close proximity. It also illustrates the local issue, where the MIL classifier frequently predicts the most discriminative region instead of the entire object. To overcome these limitations, we have introduced Point Distance Guidance (PDG) and Box Mining Strategies (BMS). Specifically, we penalize the selection results by calculating the Euclidean distances between the annotated points of identical categories enclosed within the proposals. Additionally, for more localized proposals, we filter out higher-quality proposals from their corresponding bags and dynamically merge them in scale. By fully exploiting the positional clues to prevent local and group prediction, we aim to select the proposal that most effectively represents the object category in refinement stage. The primary contributions of this work can be outlined as follows:" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.67, + 0.47, + 0.761 + ], + "angle": 0, + "content": "1) We introduce SAPNet, an end-to-end semantic-aware instance segmentation network based on point prompts. SAPNet combines the visual foundation model SAM with semantic information to address its inherent semantic ambiguity, facilitating the generation of semantically-aware proposal masks." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.763, + 0.47, + 0.825 + ], + "angle": 0, + "content": "2) We incorporate Point Distance Guidance (PDG) and Box Mining Strategies (BMS) to prevent local and group predictions induced by MIL-based classifiers in both the proposal selection and refinement stages." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.825, + 0.47, + 0.9 + ], + "angle": 0, + "content": "3) SAPNet achieves state-of-the-art performance in Point-Prompted Instance Segmentation (PPIS), significantly bridging the gap between point-prompted and fully-supervised segmentation methods on two challenging benchmarks (COCO and VOC2012)." + }, + { + "type": "list", + "bbox": [ + 0.076, + 0.67, + 0.47, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.501, + 0.09, + 0.642, + 0.107 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.115, + 0.892, + 0.478 + ], + "angle": 0, + "content": "Weakly-Supervised Instance Segmentation (WSIS) offers a practical approach for accurate object masks using minimal supervision. It spans a range of annotations, from image labels to bounding boxes. Research has focused on narrowing the performance gap between weakly and fully-supervised methods, primarily through box-level [18, 25, 39] and image-level annotations [1, 21]. Box-based methods have explored structural constraints to guide the segmentation, as seen in BBTP [18], BoxInst [39], and Box2Mask [29], and applied structural constraints to drive segmentation, treating it as a multiple-instance learning task or enforcing color consistency based on CondInst [40]. These approaches, while innovative, can complicate training and sometimes neglect the object's overall shape due to their focus on local features and proposal generation, like MCG [2]. Conversely, the proposal-free methods, like IRN [1], rely on class relationships for mask production but can falter in accurately separating instances. To preserve object integrity, recent methods such as Discobox [23] and BESTIE [21] integrate advanced semantic insights into instance segmentation using pairwise losses or saliency cues [30, 39, 42]. However, semantic drift remains an issue, with mislabeling or missed instances resulting in inferior pseudo labels [3] compromising segmentation quality." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.479, + 0.892, + 0.84 + ], + "angle": 0, + "content": "Pointly-Supervised Detection and Segmentation (PSDS) cleverly balances minimal annotation costs with satisfactory localization accuracy. By introducing point annotations, WISE-Net [24], P2BNet [9] and BESTIE [21] improve upon weakly supervised methods that suffer from vague localizations. That only slightly increases the costs (by about \\(10\\%\\)) and is almost as quick as the image-level annotation, but that is far speedier than more detailed bounding box or mask annotations. Such precision allows for tackling semantic bias, as seen in methods like PointRend [12], which utilize multiple points for improved accuracy, despite requiring additional bounding box supervision. Recent advancements in point-supervised instance segmentation, employed by WISE-Net and Point2Mask [28], show that even single-point annotations can yield precise mask proposals. WISE-Net skillfully localizes objects and selects masks, while BESTIE enhances accuracy using instance cues and self-correction to reduce semantic drift. Attnshift [31] advances this by extending single points to reconstruct entire objects. Apart from their complexity, these methods have yet to fully demonstrate their effectiveness, indicating ongoing challenges in harnessing single-point annotations for image segmentation and presenting clear avenues for further research." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Prompting and Foundation Models. Prompt-based learning enables pretrained foundation models to adapt to various tasks using well-crafted prompts. SAM [22], a prominent example in computer vision, exemplifies robust zero" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "3586" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.35 + ], + "angle": 0, + "content": "shot generalization and interactive segmentation across multiple applications. Additionally, SAM-based models like Fast-SAM [44] increases speed, HQ-SAM [20] improves segmentation quality, and Semantic-SAM [26] optimizes performance by training on diverse data granularities. Foundational models, pre-trained on large datasets, help improve generalization in downstream tasks, especially in data-scarce scenarios. Basing on SAM, Rsprompter [8] utilizes SAM-derived pseudo labels for improved remote sensing segmentation, meanwhile, adaptations for medical imaging and video tracking are explored in A-SAM [17] and Tracking Anything [43]. Further, [10] and [19] have integrated SAM with Weakly Supervised Semantic Segmentation networks to refine pseudo labels. Our research builds upon these innovations, transforming point annotations into mask proposals in instance segmentation to significantly enhancing performance." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.361, + 0.212, + 0.379 + ], + "angle": 0, + "content": "3. Methodology" + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.387, + 0.188, + 0.401 + ], + "angle": 0, + "content": "3.1. Overview" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.411, + 0.473, + 0.728 + ], + "angle": 0, + "content": "The overview of our method is illustrated in Fig. 2, SAPNet comprises of two branches: one dedicated to the selection and refinement of mask proposals to generate pseudo-labels and the other employing solov2 head [42] for instance segmentation supervised by the generated pseudo labels. The central focus of our approach is the pseudo-label generation branch, exclusively utilized during the training phase, which includes the PSM, PNPG, and PRM modules. Following the initial proposal inputs, the PSM employs multi-instance learning and a point-distance penalty to identify semantically rich proposals. Subsequently, coupled with selected proposals from the PSM stage, the PNPG generates quality positive-negative bags to mitigate background and locality issues, emphasizing the primary regions of interest. Then, the PRM processes these bags, which selects refined proposals from positive bags to improve final box quality. Ultimately, the mask mappings derived from these box proposals are utilized to guide the segmentation branch. This guarantees the acquisition of high-quality category-specified mask proposals to supervise the segmentation branch." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.74, + 0.319, + 0.755 + ], + "angle": 0, + "content": "3.2. Proposal Selection Module" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.764, + 0.469, + 0.869 + ], + "angle": 0, + "content": "SAM's limited semantic discernment causes category-agnostic labeling, leading to inconsistent proposal quality for the same objects. Employing these proposals directly for segmentation supervision could introduce noise and impair performance. Our goal is to design a category-specific segmenter, which needs to select the most semantically representative proposals for robust supervision." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.871, + 0.47, + 0.902 + ], + "angle": 0, + "content": "Motivated by the insights from WSDDN [4] and P2BNet [9], our proposal selection module employs multi-instance" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.272 + ], + "angle": 0, + "content": "learning and leverages labeling information to prioritize high-confidence proposals for segmentation. In the training phase, we leverage SAM[22] solely to generate category-agnostic proposals. To avoid excessive memory use and slow training, we convert them into box proposals using the minimum bounding rectangle, and combine with depth features \\( F \\in \\mathbb{R}^{H \\times W \\times D} \\) from the image \\( I \\in \\mathbb{R}^{H \\times W} \\), serve as input to the PSM. Utilizing our designed MIL loss, PSM precisely predicts each proposal's class and instance details. It selects the highest-scoring proposal as the semantically richest bounding box for each object, effectively choosing higher quality mask proposals." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.273, + 0.893, + 0.485 + ], + "angle": 0, + "content": "Given an image \\(I\\) with \\(N\\) point annotations \\(Y_{n} = \\{(p_{i},c_{i})\\}_{i = 1}^{N}\\), where \\(p_i\\) is the coordinate of the annotated point and \\(c_{i}\\) is the class index. We transform each class-informative point \\(p_i\\) into \\(M\\) semantic mask proposals, which is further converted to a semantic proposal bag \\(B_{i}\\in \\mathbb{R}^{M\\times 4}\\). As illustrated in Fig. 2, after passing through a 7x7 RoIAlign layer and two fully-connected layers, features \\(F_{i}\\in \\mathbb{R}^{M\\times H\\times W\\times D}\\) are extracted from proposal bag \\(B_{i}\\). Like in [4] and [37], the features \\(F\\) serve as input for the classification branch and instance branch, using fully-connected layer \\(f\\) and \\(f^{\\prime}\\) to generate \\(\\mathbf{W}_{cls}\\in \\mathbb{R}^{M\\times K}\\) and \\(\\mathbf{W}_{ins}\\in \\mathbb{R}^{M\\times K}\\). A softmax activation function over \\(K\\) class and \\(M\\) instance dimensions yields the classification scores \\(\\mathbf{S}_{cls}\\in \\mathbb{R}^{M\\times K}\\) and instance scores \\(\\mathbf{S}_{ins}\\in \\mathbb{R}^{M\\times K}\\)." + }, + { + "type": "equation", + "bbox": [ + 0.513, + 0.49, + 0.891, + 0.535 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathbf {W} _ {c l s} = f (\\mathbf {F}); [ \\mathbf {S} _ {c l s} ] _ {m k} = e ^ {[ \\mathbf {W} _ {c l s} ] _ {m k}} / \\sum_ {k = 1} ^ {K} e ^ {[ \\mathbf {W} _ {c l s} ] _ {m k}}. \\\\ \\mathbf {W} _ {i n s} = f ^ {\\prime} (\\mathbf {F}); [ \\mathbf {S} _ {i n s} ] _ {m k} = e ^ {[ \\mathbf {W} _ {i n s} ] _ {m k}} / \\sum_ {m = 1} ^ {M} e ^ {[ \\mathbf {W} _ {i n s} ] _ {m k}}. \\tag {1} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.538, + 0.886, + 0.553 + ], + "angle": 0, + "content": "where \\([\\cdot]_{mk}\\) is the value in row \\(m\\) and column \\(k\\) of matrix." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.553, + 0.892, + 0.644 + ], + "angle": 0, + "content": "Point Distance Guidance. SAM and MIL struggle with distinguishing adjacent objects of the same category, often merging two separate objects into one and giving high score. To combat this, we incorporate instance-level annotated point information and introduce a spatially aware selection with a point-distance penalty mechanism." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.644, + 0.893, + 0.826 + ], + "angle": 0, + "content": "To address the challenge of overlapping objects and thereby enhance model optimization, we propose a strategy specifically aimed at penalizing instances of object overlap. For each m-th proposal within the set \\( B_{i} \\), we define \\( t_{mj} = 1 \\) to denote an overlap with any proposal in another identical class bag \\( B_{j} \\); otherwise, \\( t_{mj} = 0 \\). The penalty imposed increases in proportion to the distance of the overlapping objects from the proposal in question. This penalty, \\( W_{dis} \\), is represented using the Euclidean distance between the annotated points of the overlapping proposals. Subsequently, the reciprocal of \\( W_{dis} \\) is then passed through a sigmoid function to compute the distance score \\( \\mathbf{S}_{dis} \\) for the proposal." + }, + { + "type": "equation", + "bbox": [ + 0.584, + 0.83, + 0.891, + 0.884 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\left[ \\mathbf {W} _ {d i s} \\right] _ {i m} = \\sum_ {j = 1, j \\neq i} ^ {N} \\| p _ {i} - p _ {j} \\| * t _ {m j}. \\tag {2} \\\\ [ \\mathbf {S} _ {d i s} ] _ {i m} = (1 / e ^ {- (1 / [ \\mathbf {W} _ {d i s} ] _ {i m})) ^ {d}}. \\\\ \\end{array}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "3587" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.099, + 0.089, + 0.885, + 0.335 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.337, + 0.895, + 0.409 + ], + "angle": 0, + "content": "Figure 2. The framework of SAPNet comprises two components: one for generating mask proposals and another for their utilization in instance segmentation. The process starts with generating category-agnostic mask proposals using point prompts within a visual foundation model. That is followed by an initial proposal selection via MIL combined with PDG. Next, the PRM refines these proposals using positive and negative samples from PNPG, capturing global object semantics. Finally, augmented with the multi-mask proposal supervision, the segmentation branch aims to improve segmentation quality." + }, + { + "type": "image", + "bbox": [ + 0.08, + 0.417, + 0.473, + 0.509 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.095, + 0.518, + 0.45, + 0.532 + ], + "angle": 0, + "content": "Figure 3. The mechanism of the proposal selection module." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.533, + 0.47, + 0.563 + ], + "angle": 0, + "content": "where \\(\\left[\\cdot\\right]_{im}\\) is the value at the row \\(i\\) and column \\(m\\) in the matrix, and \\(d\\) is the exponential factor." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.563, + 0.47, + 0.669 + ], + "angle": 0, + "content": "PSM Loss. The final score \\(\\mathbf{S}\\) of each proposal is obtained by computing the Hadamard product of the classification score, the instance score, and the distance score, while the score \\(\\widehat{\\mathbf{S}}\\) for each proposal bag \\(B_{i}\\) is obtained by summing the scores of the proposals in \\(B_{i}\\). The MILloss of the PSM is constructed using the form of binary crossentropy, and it is defined as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.077, + 0.671, + 0.411, + 0.706 + ], + "angle": 0, + "content": "\\[\n\\mathbf {S} = \\mathbf {S} _ {c l s} \\odot \\mathbf {S} _ {i n s} \\odot \\mathbf {S} _ {d i s} \\in \\mathbb {R} ^ {M \\times K}; \\widehat {\\mathbf {S}} = \\sum_ {m = 1} ^ {M} [ \\mathbf {S} ] _ {m} \\in \\mathbb {R} ^ {K}.\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.078, + 0.709, + 0.484, + 0.742 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {p s m} = C E (\\widehat {\\mathbf {S}}, \\mathbf {c}) = - \\frac {1}{N} \\sum_ {n = 1} ^ {N} \\sum_ {k = 1} ^ {K} \\mathbf {c} _ {k} \\log (\\widehat {\\mathbf {S}} _ {k}) + (1 - \\mathbf {c} _ {k}) \\log (1 - \\widehat {\\mathbf {S}} _ {k})\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.746, + 0.411, + 0.763 + ], + "angle": 0, + "content": "where \\(\\mathbf{c} \\in \\{0,1\\}^K\\) is the one-hot category's label." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.763, + 0.47, + 0.839 + ], + "angle": 0, + "content": "Utilizing the MILloss, the PSM module skillfully identifies each proposal's category and instance. The module selects the proposal with the highest score, marked as S, for a specific object and identifies a bounding box enriched with semantic information." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.848, + 0.442, + 0.865 + ], + "angle": 0, + "content": "3.3. Positive and Negative Proposals Generator" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.871, + 0.47, + 0.903 + ], + "angle": 0, + "content": "To further refine the selection of more accurate bounding boxes, we employ PNPG based on \\( box_{psm} \\) selected via" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.417, + 0.895, + 0.569 + ], + "angle": 0, + "content": "PSM. That consists of two components: PPG and NPG. The PPG is designed to generate a richer set of positive samples, enhancing bag's quality. Concurrently, the NPG is responsible for generating negative samples, which are crucial for assisting model training. These negative samples, including background samples for all objects and part samples for each, are crucial in resolving part issues and ensuring high-quality bounding box selection. The positive sample set \\( B^{+} \\) produced by PPG and the negative sample set \\( \\mathcal{U} \\) generated by NPG are utilized for training the subsequent PRM." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.572, + 0.893, + 0.736 + ], + "angle": 0, + "content": "Positive Proposals Generator (PPG). Within this phase, to implement adaptive sampling for the identified bounding box, we capitalize on the \\(box_{psm}\\) derived from the PSM stage, coupled with the point distance penalty score \\(\\mathbf{S}_{dis}\\) attributed to each proposal. To further elaborate, for each \\(box_{psm}\\) (denoted as \\(b_x^*, b_y^*, b_w^*, b_h^*\\)) isolated during the PSM phase, its dimensions are meticulously recalibrated leveraging a scale factor \\(v\\) and its associated within-category inclusion score \\(\\mathbf{S}_{dis}\\) to generate an augmented set of positive proposals \\((b_x, b_y, b_w, b_h)\\). The formulation is defined as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.538, + 0.737, + 0.891, + 0.771 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} b _ {w} = \\left(1 \\pm v / \\mathbf {S} _ {d i s}\\right) \\cdot b _ {w} ^ {*}, \\quad b _ {h} = \\left(1 \\pm v / \\mathbf {S} _ {d i s}\\right) \\cdot b _ {h} ^ {*}, \\\\ b _ {x} = b _ {x} ^ {*} \\pm \\left(b _ {w} - b _ {w} ^ {*}\\right) / 2, \\quad b _ {y} = b _ {y} ^ {*} \\pm \\left(b _ {h} - b _ {h} ^ {*}\\right) / 2. \\tag {4} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.776, + 0.893, + 0.837 + ], + "angle": 0, + "content": "These newly cultivated positive proposals are carefully integrated into the existing set \\( B_{i} \\) to enhance the positive instances' pool. Such enhancements are pivotal in optimizing the training of the forthcoming PRM." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.894, + 0.903 + ], + "angle": 0, + "content": "Negative Proposals Generator(NPG). MIL-based selection within a single positive bag may overemphasize the background noise, leading to inadequate focus on the object. To solve this, we create a negative bag from the back-" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "3588" + } + ], + [ + { + "type": "algorithm", + "bbox": [ + 0.078, + 0.091, + 0.47, + 0.43 + ], + "angle": 0, + "content": "Algorithm 1 Positive and Negative Proposals Generation \nInput: \\( T_{neg1}, T_{neg2}, box_{psm} \\) from PSM stage, image \\( I \\), positive bags \\( B^{+} \\). \nOutput: Positive proposal bags \\( B^{+} \\), Negative proposal set \\( \\mathcal{U} \\). \n1: // Step1: positive proposals sampling \n2: for \\( i \\in N, N \\) is the number of objects in image \\( I \\) do \n3: \\( B_{i}^{+} \\gets B_{i}, B_{i} \\in B \\); \n4: \\( B_{i}^{+} = B_{i}^{+} \\bigcup PPG(\\text{box}_{psm}^{i}) \\); \n5: end for \n6: // Step2: background negative proposals sampling \n7: \\( \\mathcal{U} \\gets \\{\\} \\); \n8: proposals \\( \\gets \\) random_sampling(1) for each image \\( I \\); \n9: \\( iou = IOU(proposals, B_{i}) \\) for each \\( B_{i} \\in B \\); \n10: if \\( iou < T_{neg1} \\) then \n11: \\( \\mathcal{U} = \\mathcal{U} \\bigcup \\) proposals; \n12: end if \n13: // Step3: part negative proposals sampling \n14: for \\( i \\in N, N \\) is the number of objects in image \\( I \\) do \n15: proposals \\( \\gets \\) part_neg_sampling(\\( box_{psm}^{i} \\)); \n16: \\( iou = IOU(proposals, box_{psm}^{i}) \\); \n17: if \\( iou < T_{neg2} \\) then \n18: \\( \\mathcal{U} = \\mathcal{U} \\bigcup \\) proposals; \n19: end if \n20: end for" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.459, + 0.47, + 0.489 + ], + "angle": 0, + "content": "ground proposals post-positive bag training, which helps MIL maximize the attention towards the object." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.49, + 0.47, + 0.566 + ], + "angle": 0, + "content": "Considering the image dimensions, we randomly sample proposals according to each image's width and height, for negative instance sampling. We assess the Intersection over Union (IoU) between these negatives and the positive sets, filtering out those below a threshold \\( T_{neg1} \\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.566, + 0.47, + 0.687 + ], + "angle": 0, + "content": "Additionally, to rectify MIL localization errors, we enforce the sampling of smaller proposals with an IoU under a second threshold, \\( T_{\\text{neg2}} \\), from inside boxpsm based on its width and height, that is scored highest in PSM, as negative examples. These negative instances, partially capturing the object, drive the model to select high-quality bounding boxes that encompass the entire object. The PNPG is systematically elaborated upon in Algorithm1." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.696, + 0.345, + 0.712 + ], + "angle": 0, + "content": "3.4. Proposals Refinement Module" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.719, + 0.47, + 0.885 + ], + "angle": 0, + "content": "In the PSM phase, we employ MIL to select high-quality proposals from bag \\( B^{+} \\). However, as shown in Fig. 2, the box \\( psm \\) outcomes derived solely from a single-stage MIL are suboptimal and localized. Inspired by PCL [38], we consider refining the proposals in a second phase. However, in contrast to most WSOD methods which choose to continue refining using classification information in subsequent stages, we have established high-quality positive and negative bags, and further combined both classification and instance branches to introduce the PRM module to refine the proposals, aiming to obtain a high-quality bounding box." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.886, + 0.469, + 0.901 + ], + "angle": 0, + "content": "The PRM module, extending beyond the scope of PSM," + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.211 + ], + "angle": 0, + "content": "focuses on both selection and refinement. It combines positive instances from the PPG with the initial set, forming an enriched \\( B^{+} \\). Simultaneously, it incorporates the negative instance set \\( \\mathcal{U} \\) from NPG, providing a comprehensive foundation for PRM. This integration leads to a restructured MIL loss in PRM, replacing the conventional CELoss with Focal Loss for positive instances. The modified positive loss function is as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.568, + 0.213, + 0.892, + 0.253 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {p o s} = \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\left\\langle \\mathbf {c} _ {i} ^ {\\mathrm {T}}, \\widehat {\\mathbf {S}} _ {i} \\right\\rangle \\cdot \\operatorname {F L} \\left(\\widehat {\\mathbf {S}} _ {i} ^ {*}, \\mathbf {c} _ {i}\\right). \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.266, + 0.892, + 0.334 + ], + "angle": 0, + "content": "where FL is the focal loss [32], \\(\\widehat{\\mathbf{S}}_i^*\\) and \\(\\widehat{\\mathbf{S}}_i\\) represent the bag score predicted by PRM and PSM, respectively. \\(\\left\\langle \\mathbf{c}_i^{\\mathrm{T}},\\widehat{\\mathbf{S}}_i\\right\\rangle\\) represents the inner product of the two vectors, meaning the predicted bag score of the ground-truth category." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.335, + 0.892, + 0.424 + ], + "angle": 0, + "content": "Enhancing background suppression, we use negative proposals and introduce a dedicated loss for these instances. Notably, these negative instances pass only through the classification branch for instance score computation, with their scores derived exclusively from classification. The specific formulation of this loss function is detailed below:" + }, + { + "type": "equation", + "bbox": [ + 0.618, + 0.428, + 0.892, + 0.468 + ], + "angle": 0, + "content": "\\[\n\\beta = \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\left\\langle \\mathbf {c} _ {i} ^ {\\mathrm {T}}, \\widehat {\\mathbf {S}} _ {i} \\right\\rangle , \\tag {6}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.515, + 0.475, + 0.892, + 0.529 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {n e g} = - \\frac {1}{| \\mathcal {U} |} \\sum_ {\\mathcal {U}} \\sum_ {k = 1} ^ {K} \\beta \\cdot \\left(\\left[ \\mathbf {S} _ {n e g} ^ {c l s} \\right] _ {k}\\right) ^ {2} \\log \\left(1 - \\left[ \\mathbf {S} _ {n e g} ^ {c l s} \\right] _ {k}\\right). \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.53, + 0.892, + 0.561 + ], + "angle": 0, + "content": "The PRM loss consists of the MIL loss \\(\\mathcal{L}_{pos}\\) for positive bags and negative loss \\(\\mathcal{L}_{neg}\\) for negative samples, i.e.," + }, + { + "type": "equation", + "bbox": [ + 0.588, + 0.572, + 0.892, + 0.589 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {p r m} = \\alpha \\mathcal {L} _ {p o s} + (1 - \\alpha) \\mathcal {L} _ {n e g}, \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.6, + 0.684, + 0.614 + ], + "angle": 0, + "content": "where \\(\\alpha = 0.25\\) by default." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.614, + 0.892, + 0.75 + ], + "angle": 0, + "content": "Box Mining Strategy. MIL's preference for segments with more foreground presence and SAM's tendency to capture only parts of an object often bring to final bounding boxes, \\( box_{prim} \\), the 'local' issue of MIL inadequately covers the instances. To improve the bounding box quality, we introduce a box mining strategy that adaptively expands \\( box_{select} \\) from proposal selection in PRM, by merging it with the original proposals filter, aiming to address MIL's localization challenges." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.75, + 0.892, + 0.901 + ], + "angle": 0, + "content": "The Box Mining Strategy (BMS) consists of two primary components: (i) We select the top \\(k\\) proposals from the positive proposal bag \\(B^{+}\\), to create a set \\(G\\). We evaluate the proposals in \\(G\\) against box_select based on IoU and size, using a threshold \\(T_{min1}\\). Proposals larger than box_select and with an IoU above \\(T_{min1}\\) undergo dynamic expansion through IoU consideration, which allows for the adaptive integration with box_select. That mitigates the 'local' issue and maintains the bounding box's consistency to the object's true boundaries. (ii) Frequently, issues related to lo" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "3589" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.242 + ], + "angle": 0, + "content": "cality can lead to an exceedingly low IoU between proposals and box_select. Nonetheless, the ground truth box can fully encompass the box_part. Therefore, when component (i) conditions are unmet, if a proposal can entirely encapsulate box_select, we reset the threshold \\( T_{min2} \\). Proposals surpassing this threshold adaptively merge with box_select to generate the final box_prm, used to yield Mask_prm. These two components collectively form our BMS strategy. A detailed procedure of this approach will be delineated in Algorithm2 of the supplementary materials." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.243, + 0.473, + 0.376 + ], + "angle": 0, + "content": "Loss Function. After acquiring the final supervision masks, \\(Mask_{prm}\\) and the filtered \\(Mask_{sam}\\) in Multi-mask Proposals Supervision(MPS) in Sec. 7 of supplementary, we use them together to guide the dynamic segmentation branch. To comprehensively train SAPNet, we integrate the loss functions from the PSM and PRM, culminating in the formulation of the total loss for our model, denoted as \\(L_{total}\\). The aggregate loss function, \\(L_{total}\\) can be articulated as:" + }, + { + "type": "equation", + "bbox": [ + 0.137, + 0.379, + 0.469, + 0.394 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {t o t a l}} = \\mathcal {L} _ {\\text {m a s k}} + \\mathcal {L} _ {\\text {c l s}} + \\lambda \\cdot \\mathcal {L} _ {\\text {p s m}} + \\mathcal {L} _ {\\text {p r m}} \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.395, + 0.47, + 0.426 + ], + "angle": 0, + "content": "where, \\(\\mathcal{L}_{Dice}\\) is the Dice Loss [35], \\(\\mathcal{L}_{cls}\\) is the Focal Loss[32], and \\(\\lambda\\) is set as 0.25." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.44, + 0.203, + 0.457 + ], + "angle": 0, + "content": "4. Experiment" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.465, + 0.285, + 0.482 + ], + "angle": 0, + "content": "4.1. Experimental Settings" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.488, + 0.47, + 0.579 + ], + "angle": 0, + "content": "Datasets. We use the publicly available MS COCO[33] and VOC2012SBD [13] datasets for experiments. COCO17 has 118k training and 5k validation images with 80 common object categories. VOC consists of 20 categories and contains 10,582 images for model training and 1,449 validation images for evaluation." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.58, + 0.47, + 0.7 + ], + "angle": 0, + "content": "Evaluation Metric. We use mean average precision mAP@[.5,.95] for the MS-COCO. The \\(\\{AP, AP_{50}, AP_{75}, AP_{Small}, AP_{Middle}, AP_{Large}\\}\\) is reported for MS-COCO and for VOC12SBD segmentation, and we report \\(AP_{25,50,75}\\). The \\(mIoU_{box}\\) is the average IoU between predicted pseudo-boxes and GT-boxes in the training set. It measures SAPNet's ability to select mask proposals without using the segmentation branch." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.701, + 0.47, + 0.839 + ], + "angle": 0, + "content": "Implementation Details. In our study, we employed the Stochastic Gradient Descent (SGD) optimizer, as detailed in [6]. Our experiments were conducted using the mmdetection toolbox [7], following standard training protocols for each dataset. We used the ResNet architecture [15], pretrained on ImageNet [36], as the backbone. For COCO, batch size was set at four images per GPU across eight GPUs, and for VOC2012, it was four GPUs. More details of the experiment are in Sec. 8 of the supplementary." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.847, + 0.327, + 0.864 + ], + "angle": 0, + "content": "4.2. Experimental Comparisons" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.871, + 0.47, + 0.902 + ], + "angle": 0, + "content": "Tab. 1 shows the comparison results between our method and previous SOTA approaches [11, 16, 34, 40, 42] on" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.227 + ], + "angle": 0, + "content": "COCO. In our experiments, we provide SAM with both the labeled points and the annotations generated by the point annotation enhancer [9]. SAM then utilizes these inputs to generate subsequent mask proposals for selection and supervision. For fair comparison, we design two baselines: the top-1 scored mask from SAM and MIL-selected SAM mask proposals are used as SOLOv2 supervision, respectively. Tab. 1 shows our method substantially surpasses these baselines in performance." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.228, + 0.893, + 0.41 + ], + "angle": 0, + "content": "Comparison with point-annotated methods. Our approach achieves a 31.2 AP performance with a ResNet-50 backbone, surpassing all previous point-annotated methods, including BESTIE on HRNet-48 and AttnShift on Vit-B. Our model exhibits significant improvements under a 1x training schedule, with a 13.5 AP increase when compared to the previous SOTA method, BESTIE. Furthermore, under a 3x training schedule, SAPNet outperforms AttnShift, which relies on large model training, with 13.4 AP, improvements. Importantly, our method is trained end-to-end without needing post-processing, achieving SOTA performance in point-annotated instance segmentation." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.411, + 0.892, + 0.561 + ], + "angle": 0, + "content": "Comparison with other annotation-based methods. Our SAPNet has significantly elevated point annotation, regardless of point annotation's limitations in annotation time and quality compared to box annotation. Utilizing a ResNet-101 backbone and a 3x training schedule, SAPNet surpasses most box-annotated instance segmentation methods, achieving a 1.4 AP improvement over BoxInst. Moreover, SAPNet's segmentation performance nearly matches the mask-annotated methods, effectively bridging the gap between point-annotated and these techniques." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.562, + 0.893, + 0.713 + ], + "angle": 0, + "content": "Segmentation performance on VOC2012SBD. Tab. 2 compares segmentation methods under different supervisions on the VOC2012 dataset. SAPNet reports an enhancement of 7.7 \\(AP\\) over the AttnShift approach, evidencing a notable advancement in performance. Thereby, it significantly outstrips image-level supervised segmentation methods. Additionally, SAPNet surpasses box-annotated segmentation methods, such as BoxInst by 3.4 \\(AP_{50}\\) and DiscoBox by 32.6 \\(AP_{50}\\). Further, our point-prompted method achieves \\(92.3\\%\\) of the Mask-R-CNN." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.725, + 0.665, + 0.74 + ], + "angle": 0, + "content": "4.3. Ablation Studies" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.749, + 0.892, + 0.779 + ], + "angle": 0, + "content": "More experiments have been conducted on COCO to further analyze SAPNet's effectiveness and robustness." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.78, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Training Stage in SAPNet. The ablation study of the training stage is given in Tab. 3. We trained solov2 using the top-1 scored mask provided by SAM and compared it to the two training strategies of SAPNet. In the two-stage approach, the segmentation branch and multiple-mask supervision of SAPNet are removed. Instead, we use the selected mask to train a standalone instance segmentation model, as described by [42]. The end-to-end training method corre" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "3590" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.082, + 0.09, + 0.892, + 0.459 + ], + "angle": 0, + "content": "
MethodAnn.Backbonesched.Arch.mAP\\( \\mathrm{mAP}_{50} \\)\\( \\mathrm{mAP}_{75} \\)\\( \\mathrm{mAP}_{\\mathrm{s}} \\)\\( \\mathrm{mAP}_{\\mathrm{m}} \\)\\( \\mathrm{mAP}_{1} \\)
Fully-supervised instance segmentation models.
Mask R-CNN [16]\\( \\mathcal{M} \\)ResNet-501xMask R-CNN34.656.536.618.337.447.2
YOLACT-700 [5]\\( \\mathcal{M} \\)ResNet-1014.5xYOLACT31.254.032.812.133.347.
PolarMask [16]\\( \\mathcal{M} \\)ResNet-1012xPolarMask32.153.733.114.733.845.3
SOLOv2 [42]\\( \\mathcal{M} \\)ResNet-501xSOLOv234.854.936.913.437.853.7
CondInst [40]\\( \\mathcal{M} \\)ResNet-501xCondInst35.356.437.418.039.450.4
SwinMR [34]\\( \\mathcal{M} \\)Swin-S50eSwinMR43.267.046.124.846.362.1
Mask2Former [11]\\( \\mathcal{M} \\)Swin-S50eMask2Former46.169.452.825.449.768.5
Weakly-supervised instance segmentation models.
IRNet [45]\\( \\mathcal{I} \\)ResNet-501xMask R-CNN6.111.75.5---
BESTIE [21]\\( \\mathcal{I} \\)HRNet-481xMask R-CNN14.328.013.2---
BBTP [18]\\( \\mathcal{B} \\)ResNet-1011xMask R-CNN21.145.517.211.222.029.8
BoxInst [39]\\( \\mathcal{B} \\)ResNet-1013xCondInst33.256.533.616.235.345.1
DiscoBox [23]\\( \\mathcal{B} \\)ResNet-503xSOLOv232.053.632.611.733.748.4
Boxlevelset [27]\\( \\mathcal{B} \\)ResNet-1013xSOLOv233.456.834.115.236.846.8
WISE-Net [24]\\( \\mathcal{P} \\)ResNet-501xMask R-CNN7.818.28.8---
BESTIE†[21]\\( \\mathcal{P} \\)HRNet-481xMask R-CNN17.734.016.4---
AttnShift [31]\\( \\mathcal{P} \\)Vit-B50eMask R-CNN21.243.519.4---
SAM-SOLOv2\\( \\mathcal{P} \\)ResNet-501xSOLOv224.641.925.39.328.638.1
MIL-SOLOv2\\( \\mathcal{P} \\)ResNet-501xSOLOv226.847.726.811.231.540.4
SAPNet(ours)\\( \\mathcal{P} \\)ResNet-501xSOLOv231.251.832.312.635.147.8
SAPNet(ours)*\\( \\mathcal{P} \\)ResNet-1013xSOLOv234.656.036.615.739.552.1
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.464, + 0.895, + 0.52 + ], + "angle": 0, + "content": "Table 1. Mask annotation(M), image annotation(I), box annotation(B) and point annotation(P) performance on COCO-17 val. 'Ann.' is the type of the annotation and 'sched.' means schedule. * is the multi-scale augment training for re-training segmentation methods, and other experiments are on single-scale training. SwinMR is Swin-Transformer-Mask R-CNN. SwinMR and Mask2Former use multi-scale data augment strategies for SOTA." + }, + { + "type": "table", + "bbox": [ + 0.082, + 0.524, + 0.47, + 0.716 + ], + "angle": 0, + "content": "
MethodSup.BackboneAP25AP50AP75
Mask R-CNN [16]MR-5078.068.843.3
Mask R-CNN [16]MR-10179.670.245.3
BoxInst [39]BR-101-61.437.0
DiscoBoxBR-10172.862.237.5
BESTIE [21]IHRNet53.541.724.2
IRNet [45]IR-50-46.723.5
BESTIE† [21]IHRNet61.251.026.6
WISE-Net [24]PR-5053.543.025.9
BESTIE [21]PHRNet58.646.726.3
BESTIE† [21]PHRNet66.456.130.2
Attnshift [31]PVit-S68.354.425.4
Attnshift† [31]PVit-S70.357.130.4
SAPNet(ours)PR-10176.564.858.7
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.076, + 0.719, + 0.47, + 0.746 + ], + "angle": 0, + "content": "Table 2. Instance segmentation performance on the VOC2012 test set. \\(\\dagger\\) indicates applying MRCNN refinement." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.747, + 0.47, + 0.852 + ], + "angle": 0, + "content": "sponds to the architecture illustrated in Fig. 2. Our findings indicate that our method is more competitive than directly employing SAM (31.2 AP vs 24.6 AP), and the visualization of Fig. 4 shows us this enhancement. Moreover, the end-to-end training strategy boasts a more elegant model structure and outperforms the two-stage approach in overall efficiency (31.2 AP vs 30.18 AP)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.856, + 0.471, + 0.901 + ], + "angle": 0, + "content": "Effect of Each Component. Given the limited performance of SAM-top1, we opted for the single-MIL as our baseline. With a preliminary selection using MIL1, we" + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.524, + 0.679, + 0.628 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.681, + 0.524, + 0.887, + 0.628 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.632, + 0.894, + 0.687 + ], + "angle": 0, + "content": "Figure 4. The comparative visualization between SAM-top1 and SAPNet is presented, showcasing SAM's segmentation outcomes in green masks and our results in yellow. The orange and red bounding boxes highlight the respective mask boundaries." + }, + { + "type": "table", + "bbox": [ + 0.505, + 0.695, + 0.895, + 0.765 + ], + "angle": 0, + "content": "
train stage on cocosched.APAP50AP75
SAM-top11x24.641.925.3
Two stage1x30.249.831.5
End to end1x31.251.832.3
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.77, + 0.895, + 0.798 + ], + "angle": 0, + "content": "Table 3. The experimental comparisons of segmenters in COCO dataset, SAM-top1 is the highest scoring mask generated by SAM." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.81, + 0.894, + 0.901 + ], + "angle": 0, + "content": "have achieved a segmentation performance of 26.8 AP. i) Point Distance Guidance. We updated the proposal scores from the existing MIL by integrating the PDG module into the foundational MIL selection. This approach successfully segments adjacent objects of the same category, improving the segmentation performance by 0.7 points (27.5 vs 26.8)." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.514, + 0.958 + ], + "angle": 0, + "content": "3591" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.084, + 0.089, + 0.465, + 0.201 + ], + "angle": 0, + "content": "
mil1PDGmil2PNPGBMSMPSmAP
26.8
27.5
27.7
29.7
30.8
31.2
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.076, + 0.205, + 0.47, + 0.276 + ], + "angle": 0, + "content": "Table 4. The effect of each component in SAPNet: proposal selection module(MIL1), point distance guidance(PDG), positive and negative proposals generator(PNPG), proposal selection module(MIL2), box mining strategy(BMS), and Multi-mask Proposals Supervision(MPS) in Sec. 7 of supplementary." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.279, + 0.468, + 0.461 + ], + "angle": 0, + "content": "ii) MIL2. Building on the previous step, we incorporate a second MIL selection module to refine the initially selected boxes, resulting in a performance increment of 0.2 points. iii) PNPG. For MIL2, we devised the positive-negative sample sets, aiming to enhance the input quality for the PRM module and use the negative samples to suppress background. This adjustment leads to a segmentation performance boost of 2 points (29.7 vs 27.7). iv) BMS. Within the PRM, we refine the selected boxes using BMS, pushing the segmentation performance up by 1.1 points (30.8 vs 29.7). v) MPS. Utilizing MPS for segmentation branch supervision yields a 0.4-point performance improvement." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.462, + 0.468, + 0.567 + ], + "angle": 0, + "content": "Threshold of BMS. For point refinement, there are two constraints (described in Sec. 3.4). \\( T_{min1} \\) and \\( T_{min2} \\) are thresholds of the Box Mining Strategy. In Tab. 5, it shows that the two constraints together to obtain performance gain. After multiple experiments, we have found that there is a significant performance improvement when \\( T_{min1} \\) and \\( T_{min2} \\) are set to 0.6 and 0.3, respectively." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.568, + 0.468, + 0.688 + ], + "angle": 0, + "content": "Components of PNPG. Tab. 6 presents the results of a dissected ablation study on the Positive and Negative Proposals Generator(PNPG), illustrating the respective impacts of the positive and negative examples on the model's performance. It is evident that the construction of negative examples plays a significant role in enhancing model efficacy. Furthermore, the beneficial effects of both positive and negative examples are observed to be cumulative." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.689, + 0.468, + 0.902 + ], + "angle": 0, + "content": "Performance Analysis. As presented in Tab. 7, we conducted a statistical analysis to validate SAPNet's capability to address 'local' issue and compare the outcomes selected by the single-MIL with those obtained by SAPNet in the absence of segmentation branch integration. Specifically, the part problem generated by the single-MIL, where MIL is inclined to select proposals with a higher proportion of foreground, is exemplified in Fig. 6 of supplementary. On this premise, we initially establish an evaluative criterion \\( R_{v} = \\frac{area_{mask}}{area_{box}} \\), which is the ratio of the mask area to the bounding box area. Subsequently, we compute \\( R_{v_i} \\) for each proposal within the proposal bag corresponding to every instance across the entire COCO dataset and select the maximum \\( R_{v_{max}} \\) to compute the mean value over the dataset," + }, + { + "type": "table", + "bbox": [ + 0.505, + 0.09, + 0.895, + 0.189 + ], + "angle": 0, + "content": "
Tmin1Tmin2APAP50AP75APsAPmAPl
0.50.330.951.332.012.234.747.4
0.50.430.751.231.811.934.747.1
0.60.331.251.832.312.635.147.8
0.60.430.851.132.012.134.747.3
0.70.331.051.532.212.634.947.3
0.70.430.751.131.912.034.647.2
" + }, + { + "type": "table_caption", + "bbox": [ + 0.565, + 0.194, + 0.825, + 0.208 + ], + "angle": 0, + "content": "Table 5. Constraints in box mining strategy." + }, + { + "type": "table", + "bbox": [ + 0.556, + 0.212, + 0.839, + 0.309 + ], + "angle": 0, + "content": "
PNPGAPAP50AP75
PPGNPG
29.349.730.0
29.850.530.8
30.751.231.7
31.251.832.3
" + }, + { + "type": "table_caption", + "bbox": [ + 0.544, + 0.314, + 0.847, + 0.327 + ], + "angle": 0, + "content": "Table 6. Meticulous ablation experiments in PNPG" + }, + { + "type": "table", + "bbox": [ + 0.577, + 0.333, + 0.819, + 0.385 + ], + "angle": 0, + "content": "
MethodGapmIoUbox
Single-MIL0.19963.8
SAPNet0.13169.1
" + }, + { + "type": "table_caption", + "bbox": [ + 0.546, + 0.389, + 0.844, + 0.404 + ], + "angle": 0, + "content": "Table 7. Experimental analysis with part problem." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.415, + 0.892, + 0.49 + ], + "angle": 0, + "content": "which is then designated as the threshold \\( T_{rv} \\). Ultimately, we identify the ground truth \\( R_{vgt} \\) and objects where \\( R_{vmax} \\) exceeds \\( T_{rv} \\) and calculates the discrepancy between \\( R_{v} \\) values selected by single-MIL and SAPNet. The description is as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.51, + 0.499, + 0.891, + 0.527 + ], + "angle": 0, + "content": "\\[\nG a p _ {s i n g l e} = R v _ {s i n g l e} - R v _ {g t}, \\quad G a p _ {o u r} = R v _ {o u r} - R v _ {g t}. \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.531, + 0.892, + 0.592 + ], + "angle": 0, + "content": "Tab. 7 shows that the proposed SAPNet mitigates the locality issue faced by the single-MIL. Furthermore, the boxes selected via SAPNet exhibit a substantially higher IoU with GT than those selected by the single-MIL." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.608, + 0.619, + 0.623 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.631, + 0.892, + 0.802 + ], + "angle": 0, + "content": "In this paper, we propose SAPNet, an innovative end-to-end point-prompted instance segmentation framework. SAPNet transforms point annotations into category-agnostic mask proposals and employs dual selection branches to elect the most semantic mask for each object, guiding the segmentation process. To address challenges such as indistinguishable adjacent objects of the same class and MIL's locality bias, we integrate PDG and PNPG, complemented by a Box Mining Strategy for enhanced proposal refinement. SAPNet uniquely merges segmentation and selection branches under multi-mask supervision, significantly enhancing its segmentation performance. Extensive experimental comparisons on VOC and COCO datasets validate the SAPNet's effectiveness in point-prompted instance segmentation." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.821, + 0.688, + 0.837 + ], + "angle": 0, + "content": "6. Acknowledgements" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.84, + 0.892, + 0.9 + ], + "angle": 0, + "content": "This work was supported in part by the Youth Innovation Promotion Association CAS, the National Natural Science Foundation of China (NSFC) under Grant No. 61836012, 61771447 and 62272438, and the Strategic Priority Research Program of the Chinese Academy of Sciences under Grant No.XDA27000000." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "3592" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.175, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.184 + ], + "angle": 0, + "content": "[1] Jiwoon Ahn, Sunghyun Cho, and Suha Kwak. Weakly supervised learning of instance segmentation with inter-pixel relations. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2209-2218, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.185, + 0.472, + 0.227 + ], + "angle": 0, + "content": "[2] Pablo Andres Arbeláez, Jordi Pont-Tuset, and Jonathan T. Barron et al. Multiscale combinatorial grouping. In CVPR, 2014. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.229, + 0.471, + 0.286 + ], + "angle": 0, + "content": "[3] Aditya Arun, CV Jawahar, and M Pawan Kumar. Weakly supervised instance segmentation by learning annotation consistent instances. In European Conference on Computer Vision, pages 254-270. Springer, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.286, + 0.471, + 0.315 + ], + "angle": 0, + "content": "[4] Hakan Bilen and Andrea Vedaldi. Weakly supervised deep detection networks. In CVPR, 2016. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.316, + 0.472, + 0.373 + ], + "angle": 0, + "content": "[5] Daniel Bolya, Chong Zhou, Fanyi Xiao, and Yong Jae Lee. Yolact: Real-time instance segmentation. In Proceedings of the IEEE/CVF international conference on computer vision, pages 9157-9166, 2019. 1, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.373, + 0.471, + 0.416 + ], + "angle": 0, + "content": "[6] Léon Bottou. Stochastic gradient descent tricks. In Neural Networks: Tricks of the Trade: Second Edition, pages 421-436. Springer, 2012. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.417, + 0.471, + 0.5 + ], + "angle": 0, + "content": "[7] Kai Chen, Jiaqi Wang, Jiangmiao Pang, Yuhang Cao, Yu Xiong, Xiaoxiao Li, Shuyang Sun, Wansen Feng, Ziwei Liu, Jiarui Xu, et al. Mmdetection: Open mmlab detection toolbox and benchmark. arXiv preprint arXiv:1906.07155, 2019. https://github.com/open-mmlab/mmdetection.6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.501, + 0.471, + 0.572 + ], + "angle": 0, + "content": "[8] Keyan Chen, Chenyang Liu, Hao Chen, Haotian Zhang, Wenyuan Li, Zhengxia Zou, and Zhenwei Shi. Rsprompter: Learning to prompt for remote sensing instance segmentation based on visual foundation model. arXiv preprint arXiv:2306.16269, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.573, + 0.471, + 0.655 + ], + "angle": 0, + "content": "[9] Pengfei Chen, Xuehui Yu, Xumeng Han, Najmul Hassan, Kai Wang, Jiachen Li, Jian Zhao, Humphrey Shi, Zhenjun Han, and Qixiang Ye. Point-to-box network for accurate object detection via single point supervision. In European Conference on Computer Vision, pages 51-67. Springer, 2022. 2, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.657, + 0.47, + 0.713 + ], + "angle": 0, + "content": "[10] Tianle Chen, Zheda Mai, Ruiwen Li, and Wei-lun Chao. Segment anything model (sam) enhanced pseudo labels for weakly supervised semantic segmentation. arXiv preprint arXiv:2305.05803, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.714, + 0.47, + 0.77 + ], + "angle": 0, + "content": "[11] Bowen Cheng, Ishan Misra, Alexander G. Schwing, Alexander Kirillov, and Rohit Girdhar. Masked-attention mask transformer for universal image segmentation. In CVPR, 2022. 1, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.771, + 0.471, + 0.829 + ], + "angle": 0, + "content": "[12] Bowen Cheng, Omkar Parkhi, and Alexander Kirillov. Pointly-supervised instance segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2617-2626, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.83, + 0.47, + 0.885 + ], + "angle": 0, + "content": "[13] Mark Everingham, Luc Van Gool, and Christopher K. I. Williams et al. The Pascal visual object classes (VOC) challenge. IJCV, 2010. http://host.robots.ox.ac.uk/pascal/VOC/.6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.887, + 0.47, + 0.902 + ], + "angle": 0, + "content": "[14] Junsong Fan, Zhaoxiang Zhang, and Tieniu Tan. Pointly-" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.472, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.892, + 0.121 + ], + "angle": 0, + "content": "supervised panoptic segmentation. In European Conference on Computer Vision, pages 319-336. Springer, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.121, + 0.892, + 0.149 + ], + "angle": 0, + "content": "[15] Kaiming He, Xiangyu Zhang, and Shaoqing Ren et al. Deep residual learning for image recognition. In CVPR, 2016. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.149, + 0.892, + 0.176 + ], + "angle": 0, + "content": "[16] Kaiming He, Georgia Gkioxari, and Piotr Dólar et al. Mask R-CNN. In ICCV, 2017. 1, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.177, + 0.892, + 0.232 + ], + "angle": 0, + "content": "[17] Sheng He, Rina Bao, Jingpeng Li, P Ellen Grant, and Yangming Ou. Accuracy of segment-anything model (sam) in medical image segmentation tasks. arXiv preprint arXiv:2304.09324, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.232, + 0.892, + 0.288 + ], + "angle": 0, + "content": "[18] Cheng-Chun Hsu, Kuang-Jui Hsu, Chung-Chi Tsai, Yen-Yu Lin, and Yung-Yu Chuang. Weakly supervised instance segmentation using the bounding box tightness prior. In NeurIPS, 2019. 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.288, + 0.892, + 0.33 + ], + "angle": 0, + "content": "[19] Peng-Tao Jiang and Yuqi Yang. Segment anything is a good pseudo-label generator for weakly supervised semantic segmentation. arXiv preprint arXiv:2305.01275, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.33, + 0.892, + 0.372 + ], + "angle": 0, + "content": "[20] Lei Ke, Mingqiao Ye, Martin Danelljan, Yifan Liu, Yu-Wing Tai, Chi-Keung Tang, and Fisher Yu. Segment anything in high quality. arXiv preprint arXiv:2306.01567, 2023. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.372, + 0.892, + 0.44 + ], + "angle": 0, + "content": "[21] Beomyoung Kim, Youngjoon Yoo, Chaeun Rhee, and Junmo Kim. Beyond semantic to instance segmentation: Weakly-supervised instance segmentation via semantic knowledge transfer and self-refinement. In CVPR, 2022. 1, 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.441, + 0.892, + 0.512 + ], + "angle": 0, + "content": "[22] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. arXiv preprint arXiv:2304.02643, 2023. https://segment-anything.com/.1,2,3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.512, + 0.892, + 0.607 + ], + "angle": 0, + "content": "[23] Shiyi Lan, Zhiding Yu, Christopher Choy, Subhashree Radhakrishnan, Guilin Liu, Yuke Zhu, Larry S Davis, and Anima Anandkumar. Discobox: Weakly supervised instance segmentation and semantic correspondence from box supervision. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3406-3416, 2021. 1, 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.608, + 0.892, + 0.651 + ], + "angle": 0, + "content": "[24] Issam H. Laradji, Negar Rostamzadeh, Pedro O. Pinheiro, David Vázquez, and Mark Schmidt. Proposal-based instance segmentation with point supervision. In ICIP, 2020. 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.651, + 0.892, + 0.72 + ], + "angle": 0, + "content": "[25] Jungbeom Lee, Jihun Yi, Chaehun Shin, and Sungroh Yoon. Bbam: Bounding box attribution map for weakly supervised semantic and instance segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2643-2652, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.72, + 0.892, + 0.776 + ], + "angle": 0, + "content": "[26] Feng Li, Hao Zhang, Peize Sun, Xueyan Zou, Shilong Liu, Jianwei Yang, Chunyuan Li, Lei Zhang, and Jianfeng Gao. Semantic-sam: Segment and recognize anything at any granularity. arXiv preprint arXiv:2307.04767, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.776, + 0.892, + 0.832 + ], + "angle": 0, + "content": "[27] Wentong Li, Wenyu Liu, Jianke Zhu, Miaomiao Cui, XianSheng Hua, and Lei Zhang. Box-supervised instance segmentation with level set evolution. In European conference on computer vision, pages 1-18. Springer, 2022. 1, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.832, + 0.892, + 0.902 + ], + "angle": 0, + "content": "[28] Wentong Li, Yuqian Yuan, Song Wang, Jianke Zhu, Jianshu Li, Jian Liu, and Lei Zhang. Point2mask: Point-supervised panoptic segmentation via optimal transport. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 572-581, 2023. 1, 2" + }, + { + "type": "list", + "bbox": [ + 0.504, + 0.093, + 0.892, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "3593" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.161 + ], + "angle": 0, + "content": "[29] Wentong Li, Wenyu Liu, Jianke Zhu, Miaomiao Cui, Risheng Yu Xiansheng Hua, and Lei Zhang. Box2mask: Box-supervised instance segmentation via level-set evolution. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.163, + 0.47, + 0.219 + ], + "angle": 0, + "content": "[30] Wentong Li, Yuqian Yuan, Song Wang, Wenyu Liu, Dongqi Tang, Jianke Zhu, Lei Zhang, et al. Label-efficient segmentation via affinity propagation. Advances in Neural Information Processing Systems, 36, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.22, + 0.47, + 0.275 + ], + "angle": 0, + "content": "[31] Mingxiang Liao, Zonghao Guo, , and Yuze Wang et al. Attentionshift: Iteratively estimated part-based attention map for pointly supervised instance segmentation. In CVPR, 2023. 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.277, + 0.469, + 0.304 + ], + "angle": 0, + "content": "[32] Tsung-Yi Lin, Priya Goyal, and Ross B. Girshick et al. Focal loss for dense object detection. In ICCV, 2017. 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.305, + 0.469, + 0.348 + ], + "angle": 0, + "content": "[33] Tsung-Yi Lin, Michael Maire, and Serge et al. Belongie. Microsoft coco: Common objects in context. In ECCV, 2014. https://cocodataset.org/. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.349, + 0.469, + 0.389 + ], + "angle": 0, + "content": "[34] Ze Liu, Yutong Lin, and Yue Cao et al. Swin transformer: Hierarchical vision transformer using shifted windows. In ICCV, 2021. 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.391, + 0.469, + 0.459 + ], + "angle": 0, + "content": "[35] Fausto Milletari, Nassir Navab, and Seyed-Ahmad Ahmadi. V-net: Fully convolutional neural networks for volumetric medical image segmentation. In 2016 fourth international conference on 3D vision (3DV), pages 565-571. IEEE, 2016. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.461, + 0.47, + 0.531 + ], + "angle": 0, + "content": "[36] Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, et al. Imagenet large scale visual recognition challenge. International journal of computer vision, 115:211-252, 2015. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.532, + 0.469, + 0.573 + ], + "angle": 0, + "content": "[37] Peng Tang and Xinggang Wang et al. Multiple instance detection network with online instance classifier refinement. In CVPR, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.575, + 0.469, + 0.615 + ], + "angle": 0, + "content": "[38] Peng Tang, Xinggang Wang, and Song Bai et al. PCL: proposal cluster learning for weakly supervised object detection. IEEE TPAMI, 2020. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.617, + 0.469, + 0.657 + ], + "angle": 0, + "content": "[39] Zhi Tian, Chunhua Shen, Xinlong Wang, and Hao Chen. Boxinst: High-performance instance segmentation with box annotations. In CVPR, 2021. 1, 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.66, + 0.469, + 0.702 + ], + "angle": 0, + "content": "[40] Zhi Tian, Bowen Zhang, Hao Chen, and Chunhua Shen. Instance and panoptic segmentation using conditional convolutions. IEEE TPAMI, 2023. 1, 2, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.703, + 0.469, + 0.743 + ], + "angle": 0, + "content": "[41] Xinlong Wang, Tao Kong, Chunhua Shen, Yuning Jiang, and Lei Li. SOLO: segmenting objects by locations. In ECCV, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.746, + 0.469, + 0.815 + ], + "angle": 0, + "content": "[42] Xinlong Wang, Rufeng Zhang, Tao Kong, Lei Li, and Chunhua Shen. Solov2: Dynamic and fast instance segmentation. Proc. Advances in Neural Information Processing Systems (NeurIPS), 2020. https://github.com/WXinlong/SOLO.1, 2, 3, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.817, + 0.469, + 0.858 + ], + "angle": 0, + "content": "[43] Jinyu Yang, Mingqi Gao, Zhe Li, Shang Gao, Fangjing Wang, and Feng Zheng. Track anything: Segment anything meets videos. arXiv preprint arXiv:2304.11968, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.86, + 0.469, + 0.901 + ], + "angle": 0, + "content": "[44] Xu Zhao, Wenchao Ding, Yongqi An, Yinglong Du, Tao Yu, Min Li, Ming Tang, and Jinqiao Wang. Fast segment anything. arXiv preprint arXiv:2306.12156, 2023. 1, 3" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.092, + 0.892, + 0.134 + ], + "angle": 0, + "content": "[45] Yanning Zhou, Hao Chen, Jiaqi Xu, Qi Dou, and Pheng-Ann Heng. Irnet: Instance relation network for overlapping cervical cell segmentation. In MICCAI, 2019. 1, 7" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.516, + 0.957 + ], + "angle": 0, + "content": "3594" + } + ] +] \ No newline at end of file diff --git a/2024/Semantic-aware SAM for Point-Prompted Instance Segmentation/98ef3dac-add6-4e97-bd9e-baeff12acffa_origin.pdf b/2024/Semantic-aware SAM for Point-Prompted Instance Segmentation/98ef3dac-add6-4e97-bd9e-baeff12acffa_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..16135744cc6a157433ec59fe5b409486c3163c96 --- /dev/null +++ b/2024/Semantic-aware SAM for Point-Prompted Instance Segmentation/98ef3dac-add6-4e97-bd9e-baeff12acffa_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b48fb2587ba598a2327f7bf673a9c23970ef3377f04c3dd4b1cbeb047804cc2 +size 10025598 diff --git a/2024/Semantic-aware SAM for Point-Prompted Instance Segmentation/full.md b/2024/Semantic-aware SAM for Point-Prompted Instance Segmentation/full.md new file mode 100644 index 0000000000000000000000000000000000000000..dbb66281a7587e9070be62f418f439810b7d6fc7 --- /dev/null +++ b/2024/Semantic-aware SAM for Point-Prompted Instance Segmentation/full.md @@ -0,0 +1,327 @@ +# Semantic-aware SAM for Point-Prompted Instance Segmentation + +Zhaoyang Wei $^{1*}$ , Pengfei Chen $^{1*}$ , Xuehui Yu $^{1*}$ , Guorong Li $^{1}$ , Jianbin Jiao $^{1}$ , Zhenjun Han $^{1\dagger}$ + +1University of Chinese Academy of Sciences(UCAS) + +# Abstract + +Single-point annotation in visual tasks, with the goal of minimizing labelling costs, is becoming increasingly prominent in research. Recently, visual foundation models, such as Segment Anything (SAM), have gained widespread usage due to their robust zero-shot capabilities and exceptional annotation performance. However, SAM's class-agnostic output and high confidence in local segmentation introduce semantic ambiguity, posing a challenge for precise category-specific segmentation. In this paper, we introduce a cost-effective category-specific segmenter using SAM. To tackle this challenge, we have devised a Semantic-Aware Instance Segmentation Network (SAPNet) that integrates Multiple Instance Learning (MIL) with matching capability and SAM with point prompts. SAPNet strategically selects the most representative mask proposals generated by SAM to supervise segmentation, with a specific focus on object category information. Moreover, we introduce the Point Distance Guidance and Box Mining Strategy to mitigate inherent challenges: group and local issues in weakly supervised segmentation. These strategies serve to further enhance the overall segmentation performance. The experimental results on Pascal VOC and COCO demonstrate the promising performance of our proposed SAPNet, emphasizing its semantic matching capabilities and its potential to advance point-prompted instance segmentation. The code is available at https://github.com/zhaoyangwei123/SAPNet. + +# 1. Introduction + +Instance segmentation seeks to discern pixel-level labels for both instances of interest and their semantic content in images, a crucial function in domains like autonomous driving, image editing, and human-computer interaction. Despite impressive results demonstrated by various studies [5, 11, 16, 40-42], the majority of these high-performing methods are trained in a fully supervised manner and heavily dependent on detailed pixel-level mask annotations, + +![](images/812a1aefd54f5b63c9e2f44b1709b59996855a7d9a975724833467707765d8e9.jpg) +Figure 1. Three Challenges Brought by SAM and single-MIL. Orange dash box illustrates that semantic ambiguity in SAM-generated masks, where it erroneously assigns higher scores to non-object categories like clothes, despite the person being our desired target. Green dash box depicts a comparison between mask proposals using single-MIL and SAPNet. It illustrates two primary challenges: 'group', where segmentation encounters difficulties in isolating individual targets among adjacent objects of the same category, and 'local', where MIL favors foreground-dominant regions, resulting in overlooked local details. + +thereby incurring significant labeling costs. To address this challenge, researchers are increasingly focusing on weakly supervised instance segmentation, leveraging cost-effective supervision methods, such as bounding boxes [23, 27, 39], points [14, 28], and image-level labels [21, 45]. + +Recently, visual foundation models, such as Segment Anything (SAM)[22], have been widely employed by researchers for their exceptional generalization capabilities and impressive annotation performance. Numerous studies based on SAM, such as [20, 44] have emerged, building upon the foundations of SAM to further enhance its generalization capabilities and efficiency. However, these efforts have predominantly focused on improving the annotation performance of SAM. One limitation arises from SAM's lack of classification ability, resulting in class-agnostic segmentation results that fail to accurately segment specific categories as desired. + +To tackle the inherent semantic ambiguity in SAM and achieve specific-category segmentation, we propose integrating weak annotations with SAM, employing point annotations as prompts to imbue semantic information into + +SAM's outputs. A straightforward approach involves leveraging SAM's intrinsic scoring mechanism, selecting the top-scoring mask as the corresponding label for each category. However, when annotating object points are fed into the SAM, its category-agnostic characteristic tends to assign higher scores to parts of the object, resulting in generated mask annotations that fail to encompass the object as a whole. In Fig. 1 orange dashed box, we aim to obtain the 'person' mask annotation, but SAM predicts the proposals of 'clothes', 'clothes+trousers' and 'person'. Relying solely on the score SAM provides is insufficient, as the highest score corresponds to 'clothes' (col-2), which does not meet our specific needs. + +To address this challenge, we have proposed SAPNet, a semantically-aware instance segmentation network designed for high-quality, end-to-end segmentation. In this study, we design a proposal selection module (PSM) using the Multiple Instance Learning (MIL) paradigm to choose proposals that align closely with the specified semantic label. However, the MIL-based method relies on the classification score, often leading to group and local predictions [4, 21, 24]. In Fig. 1 green dashed box, the group issue is evident, where two objects of the same category are often both included when they are in close proximity. It also illustrates the local issue, where the MIL classifier frequently predicts the most discriminative region instead of the entire object. To overcome these limitations, we have introduced Point Distance Guidance (PDG) and Box Mining Strategies (BMS). Specifically, we penalize the selection results by calculating the Euclidean distances between the annotated points of identical categories enclosed within the proposals. Additionally, for more localized proposals, we filter out higher-quality proposals from their corresponding bags and dynamically merge them in scale. By fully exploiting the positional clues to prevent local and group prediction, we aim to select the proposal that most effectively represents the object category in refinement stage. The primary contributions of this work can be outlined as follows: + +1) We introduce SAPNet, an end-to-end semantic-aware instance segmentation network based on point prompts. SAPNet combines the visual foundation model SAM with semantic information to address its inherent semantic ambiguity, facilitating the generation of semantically-aware proposal masks. +2) We incorporate Point Distance Guidance (PDG) and Box Mining Strategies (BMS) to prevent local and group predictions induced by MIL-based classifiers in both the proposal selection and refinement stages. +3) SAPNet achieves state-of-the-art performance in Point-Prompted Instance Segmentation (PPIS), significantly bridging the gap between point-prompted and fully-supervised segmentation methods on two challenging benchmarks (COCO and VOC2012). + +# 2. Related Work + +Weakly-Supervised Instance Segmentation (WSIS) offers a practical approach for accurate object masks using minimal supervision. It spans a range of annotations, from image labels to bounding boxes. Research has focused on narrowing the performance gap between weakly and fully-supervised methods, primarily through box-level [18, 25, 39] and image-level annotations [1, 21]. Box-based methods have explored structural constraints to guide the segmentation, as seen in BBTP [18], BoxInst [39], and Box2Mask [29], and applied structural constraints to drive segmentation, treating it as a multiple-instance learning task or enforcing color consistency based on CondInst [40]. These approaches, while innovative, can complicate training and sometimes neglect the object's overall shape due to their focus on local features and proposal generation, like MCG [2]. Conversely, the proposal-free methods, like IRN [1], rely on class relationships for mask production but can falter in accurately separating instances. To preserve object integrity, recent methods such as Discobox [23] and BESTIE [21] integrate advanced semantic insights into instance segmentation using pairwise losses or saliency cues [30, 39, 42]. However, semantic drift remains an issue, with mislabeling or missed instances resulting in inferior pseudo labels [3] compromising segmentation quality. + +Pointly-Supervised Detection and Segmentation (PSDS) cleverly balances minimal annotation costs with satisfactory localization accuracy. By introducing point annotations, WISE-Net [24], P2BNet [9] and BESTIE [21] improve upon weakly supervised methods that suffer from vague localizations. That only slightly increases the costs (by about $10\%$ ) and is almost as quick as the image-level annotation, but that is far speedier than more detailed bounding box or mask annotations. Such precision allows for tackling semantic bias, as seen in methods like PointRend [12], which utilize multiple points for improved accuracy, despite requiring additional bounding box supervision. Recent advancements in point-supervised instance segmentation, employed by WISE-Net and Point2Mask [28], show that even single-point annotations can yield precise mask proposals. WISE-Net skillfully localizes objects and selects masks, while BESTIE enhances accuracy using instance cues and self-correction to reduce semantic drift. Attnshift [31] advances this by extending single points to reconstruct entire objects. Apart from their complexity, these methods have yet to fully demonstrate their effectiveness, indicating ongoing challenges in harnessing single-point annotations for image segmentation and presenting clear avenues for further research. + +Prompting and Foundation Models. Prompt-based learning enables pretrained foundation models to adapt to various tasks using well-crafted prompts. SAM [22], a prominent example in computer vision, exemplifies robust zero + +shot generalization and interactive segmentation across multiple applications. Additionally, SAM-based models like Fast-SAM [44] increases speed, HQ-SAM [20] improves segmentation quality, and Semantic-SAM [26] optimizes performance by training on diverse data granularities. Foundational models, pre-trained on large datasets, help improve generalization in downstream tasks, especially in data-scarce scenarios. Basing on SAM, Rsprompter [8] utilizes SAM-derived pseudo labels for improved remote sensing segmentation, meanwhile, adaptations for medical imaging and video tracking are explored in A-SAM [17] and Tracking Anything [43]. Further, [10] and [19] have integrated SAM with Weakly Supervised Semantic Segmentation networks to refine pseudo labels. Our research builds upon these innovations, transforming point annotations into mask proposals in instance segmentation to significantly enhancing performance. + +# 3. Methodology + +# 3.1. Overview + +The overview of our method is illustrated in Fig. 2, SAPNet comprises of two branches: one dedicated to the selection and refinement of mask proposals to generate pseudo-labels and the other employing solov2 head [42] for instance segmentation supervised by the generated pseudo labels. The central focus of our approach is the pseudo-label generation branch, exclusively utilized during the training phase, which includes the PSM, PNPG, and PRM modules. Following the initial proposal inputs, the PSM employs multi-instance learning and a point-distance penalty to identify semantically rich proposals. Subsequently, coupled with selected proposals from the PSM stage, the PNPG generates quality positive-negative bags to mitigate background and locality issues, emphasizing the primary regions of interest. Then, the PRM processes these bags, which selects refined proposals from positive bags to improve final box quality. Ultimately, the mask mappings derived from these box proposals are utilized to guide the segmentation branch. This guarantees the acquisition of high-quality category-specified mask proposals to supervise the segmentation branch. + +# 3.2. Proposal Selection Module + +SAM's limited semantic discernment causes category-agnostic labeling, leading to inconsistent proposal quality for the same objects. Employing these proposals directly for segmentation supervision could introduce noise and impair performance. Our goal is to design a category-specific segmenter, which needs to select the most semantically representative proposals for robust supervision. + +Motivated by the insights from WSDDN [4] and P2BNet [9], our proposal selection module employs multi-instance + +learning and leverages labeling information to prioritize high-confidence proposals for segmentation. In the training phase, we leverage SAM[22] solely to generate category-agnostic proposals. To avoid excessive memory use and slow training, we convert them into box proposals using the minimum bounding rectangle, and combine with depth features $F \in \mathbb{R}^{H \times W \times D}$ from the image $I \in \mathbb{R}^{H \times W}$ , serve as input to the PSM. Utilizing our designed MIL loss, PSM precisely predicts each proposal's class and instance details. It selects the highest-scoring proposal as the semantically richest bounding box for each object, effectively choosing higher quality mask proposals. + +Given an image $I$ with $N$ point annotations $Y_{n} = \{(p_{i},c_{i})\}_{i = 1}^{N}$ , where $p_i$ is the coordinate of the annotated point and $c_{i}$ is the class index. We transform each class-informative point $p_i$ into $M$ semantic mask proposals, which is further converted to a semantic proposal bag $B_{i}\in \mathbb{R}^{M\times 4}$ . As illustrated in Fig. 2, after passing through a 7x7 RoIAlign layer and two fully-connected layers, features $F_{i}\in \mathbb{R}^{M\times H\times W\times D}$ are extracted from proposal bag $B_{i}$ . Like in [4] and [37], the features $F$ serve as input for the classification branch and instance branch, using fully-connected layer $f$ and $f^{\prime}$ to generate $\mathbf{W}_{cls}\in \mathbb{R}^{M\times K}$ and $\mathbf{W}_{ins}\in \mathbb{R}^{M\times K}$ . A softmax activation function over $K$ class and $M$ instance dimensions yields the classification scores $\mathbf{S}_{cls}\in \mathbb{R}^{M\times K}$ and instance scores $\mathbf{S}_{ins}\in \mathbb{R}^{M\times K}$ . + +$$ +\begin{array}{l} \mathbf {W} _ {c l s} = f (\mathbf {F}); [ \mathbf {S} _ {c l s} ] _ {m k} = e ^ {[ \mathbf {W} _ {c l s} ] _ {m k}} / \sum_ {k = 1} ^ {K} e ^ {[ \mathbf {W} _ {c l s} ] _ {m k}}. \\ \mathbf {W} _ {i n s} = f ^ {\prime} (\mathbf {F}); [ \mathbf {S} _ {i n s} ] _ {m k} = e ^ {[ \mathbf {W} _ {i n s} ] _ {m k}} / \sum_ {m = 1} ^ {M} e ^ {[ \mathbf {W} _ {i n s} ] _ {m k}}. \tag {1} \\ \end{array} +$$ + +where $[\cdot]_{mk}$ is the value in row $m$ and column $k$ of matrix. + +Point Distance Guidance. SAM and MIL struggle with distinguishing adjacent objects of the same category, often merging two separate objects into one and giving high score. To combat this, we incorporate instance-level annotated point information and introduce a spatially aware selection with a point-distance penalty mechanism. + +To address the challenge of overlapping objects and thereby enhance model optimization, we propose a strategy specifically aimed at penalizing instances of object overlap. For each m-th proposal within the set $B_{i}$ , we define $t_{mj} = 1$ to denote an overlap with any proposal in another identical class bag $B_{j}$ ; otherwise, $t_{mj} = 0$ . The penalty imposed increases in proportion to the distance of the overlapping objects from the proposal in question. This penalty, $W_{dis}$ , is represented using the Euclidean distance between the annotated points of the overlapping proposals. Subsequently, the reciprocal of $W_{dis}$ is then passed through a sigmoid function to compute the distance score $\mathbf{S}_{dis}$ for the proposal. + +$$ +\begin{array}{l} \left[ \mathbf {W} _ {d i s} \right] _ {i m} = \sum_ {j = 1, j \neq i} ^ {N} \| p _ {i} - p _ {j} \| * t _ {m j}. \tag {2} \\ [ \mathbf {S} _ {d i s} ] _ {i m} = (1 / e ^ {- (1 / [ \mathbf {W} _ {d i s} ] _ {i m})) ^ {d}}. \\ \end{array} +$$ + +![](images/6d66dc9f49b4155db0bdf1985b5c709d0f637159652c5b601a231cd7ce856475.jpg) +Figure 2. The framework of SAPNet comprises two components: one for generating mask proposals and another for their utilization in instance segmentation. The process starts with generating category-agnostic mask proposals using point prompts within a visual foundation model. That is followed by an initial proposal selection via MIL combined with PDG. Next, the PRM refines these proposals using positive and negative samples from PNPG, capturing global object semantics. Finally, augmented with the multi-mask proposal supervision, the segmentation branch aims to improve segmentation quality. + +![](images/0cfbabbee76143e3c4f53d38feb7e5afc47bd365548088625de6f90afda6b588.jpg) +Figure 3. The mechanism of the proposal selection module. + +where $\left[\cdot\right]_{im}$ is the value at the row $i$ and column $m$ in the matrix, and $d$ is the exponential factor. + +PSM Loss. The final score $\mathbf{S}$ of each proposal is obtained by computing the Hadamard product of the classification score, the instance score, and the distance score, while the score $\widehat{\mathbf{S}}$ for each proposal bag $B_{i}$ is obtained by summing the scores of the proposals in $B_{i}$ . The MILloss of the PSM is constructed using the form of binary crossentropy, and it is defined as follows: + +$$ +\mathbf {S} = \mathbf {S} _ {c l s} \odot \mathbf {S} _ {i n s} \odot \mathbf {S} _ {d i s} \in \mathbb {R} ^ {M \times K}; \widehat {\mathbf {S}} = \sum_ {m = 1} ^ {M} [ \mathbf {S} ] _ {m} \in \mathbb {R} ^ {K}. +$$ + +$$ +\mathcal {L} _ {p s m} = C E (\widehat {\mathbf {S}}, \mathbf {c}) = - \frac {1}{N} \sum_ {n = 1} ^ {N} \sum_ {k = 1} ^ {K} \mathbf {c} _ {k} \log (\widehat {\mathbf {S}} _ {k}) + (1 - \mathbf {c} _ {k}) \log (1 - \widehat {\mathbf {S}} _ {k}) +$$ + +where $\mathbf{c} \in \{0,1\}^K$ is the one-hot category's label. + +Utilizing the MILloss, the PSM module skillfully identifies each proposal's category and instance. The module selects the proposal with the highest score, marked as S, for a specific object and identifies a bounding box enriched with semantic information. + +# 3.3. Positive and Negative Proposals Generator + +To further refine the selection of more accurate bounding boxes, we employ PNPG based on $box_{psm}$ selected via + +PSM. That consists of two components: PPG and NPG. The PPG is designed to generate a richer set of positive samples, enhancing bag's quality. Concurrently, the NPG is responsible for generating negative samples, which are crucial for assisting model training. These negative samples, including background samples for all objects and part samples for each, are crucial in resolving part issues and ensuring high-quality bounding box selection. The positive sample set $B^{+}$ produced by PPG and the negative sample set $\mathcal{U}$ generated by NPG are utilized for training the subsequent PRM. + +Positive Proposals Generator (PPG). Within this phase, to implement adaptive sampling for the identified bounding box, we capitalize on the $box_{psm}$ derived from the PSM stage, coupled with the point distance penalty score $\mathbf{S}_{dis}$ attributed to each proposal. To further elaborate, for each $box_{psm}$ (denoted as $b_x^*, b_y^*, b_w^*, b_h^*$ ) isolated during the PSM phase, its dimensions are meticulously recalibrated leveraging a scale factor $v$ and its associated within-category inclusion score $\mathbf{S}_{dis}$ to generate an augmented set of positive proposals $(b_x, b_y, b_w, b_h)$ . The formulation is defined as follows: + +$$ +\begin{array}{l} b _ {w} = \left(1 \pm v / \mathbf {S} _ {d i s}\right) \cdot b _ {w} ^ {*}, \quad b _ {h} = \left(1 \pm v / \mathbf {S} _ {d i s}\right) \cdot b _ {h} ^ {*}, \\ b _ {x} = b _ {x} ^ {*} \pm \left(b _ {w} - b _ {w} ^ {*}\right) / 2, \quad b _ {y} = b _ {y} ^ {*} \pm \left(b _ {h} - b _ {h} ^ {*}\right) / 2. \tag {4} \\ \end{array} +$$ + +These newly cultivated positive proposals are carefully integrated into the existing set $B_{i}$ to enhance the positive instances' pool. Such enhancements are pivotal in optimizing the training of the forthcoming PRM. + +Negative Proposals Generator(NPG). MIL-based selection within a single positive bag may overemphasize the background noise, leading to inadequate focus on the object. To solve this, we create a negative bag from the back- + +Algorithm 1 Positive and Negative Proposals Generation +Input: $T_{neg1}, T_{neg2}, box_{psm}$ from PSM stage, image $I$ , positive bags $B^{+}$ . +Output: Positive proposal bags $B^{+}$ , Negative proposal set $\mathcal{U}$ . +1: // Step1: positive proposals sampling +2: for $i \in N, N$ is the number of objects in image $I$ do +3: $B_{i}^{+} \gets B_{i}, B_{i} \in B$ ; +4: $B_{i}^{+} = B_{i}^{+} \bigcup PPG(\text{box}_{psm}^{i})$ ; +5: end for +6: // Step2: background negative proposals sampling +7: $\mathcal{U} \gets \{\}$ ; +8: proposals $\gets$ random_sampling(1) for each image $I$ ; +9: $iou = IOU(proposals, B_{i})$ for each $B_{i} \in B$ ; +10: if $iou < T_{neg1}$ then +11: $\mathcal{U} = \mathcal{U} \bigcup$ proposals; +12: end if +13: // Step3: part negative proposals sampling +14: for $i \in N, N$ is the number of objects in image $I$ do +15: proposals $\gets$ part_neg_sampling( $box_{psm}^{i}$ ); +16: $iou = IOU(proposals, box_{psm}^{i})$ ; +17: if $iou < T_{neg2}$ then +18: $\mathcal{U} = \mathcal{U} \bigcup$ proposals; +19: end if +20: end for + +ground proposals post-positive bag training, which helps MIL maximize the attention towards the object. + +Considering the image dimensions, we randomly sample proposals according to each image's width and height, for negative instance sampling. We assess the Intersection over Union (IoU) between these negatives and the positive sets, filtering out those below a threshold $T_{neg1}$ . + +Additionally, to rectify MIL localization errors, we enforce the sampling of smaller proposals with an IoU under a second threshold, $T_{\text{neg2}}$ , from inside boxpsm based on its width and height, that is scored highest in PSM, as negative examples. These negative instances, partially capturing the object, drive the model to select high-quality bounding boxes that encompass the entire object. The PNPG is systematically elaborated upon in Algorithm1. + +# 3.4. Proposals Refinement Module + +In the PSM phase, we employ MIL to select high-quality proposals from bag $B^{+}$ . However, as shown in Fig. 2, the box $psm$ outcomes derived solely from a single-stage MIL are suboptimal and localized. Inspired by PCL [38], we consider refining the proposals in a second phase. However, in contrast to most WSOD methods which choose to continue refining using classification information in subsequent stages, we have established high-quality positive and negative bags, and further combined both classification and instance branches to introduce the PRM module to refine the proposals, aiming to obtain a high-quality bounding box. + +The PRM module, extending beyond the scope of PSM, + +focuses on both selection and refinement. It combines positive instances from the PPG with the initial set, forming an enriched $B^{+}$ . Simultaneously, it incorporates the negative instance set $\mathcal{U}$ from NPG, providing a comprehensive foundation for PRM. This integration leads to a restructured MIL loss in PRM, replacing the conventional CELoss with Focal Loss for positive instances. The modified positive loss function is as follows: + +$$ +\mathcal {L} _ {p o s} = \frac {1}{N} \sum_ {i = 1} ^ {N} \left\langle \mathbf {c} _ {i} ^ {\mathrm {T}}, \widehat {\mathbf {S}} _ {i} \right\rangle \cdot \operatorname {F L} \left(\widehat {\mathbf {S}} _ {i} ^ {*}, \mathbf {c} _ {i}\right). \tag {5} +$$ + +where FL is the focal loss [32], $\widehat{\mathbf{S}}_i^*$ and $\widehat{\mathbf{S}}_i$ represent the bag score predicted by PRM and PSM, respectively. $\left\langle \mathbf{c}_i^{\mathrm{T}},\widehat{\mathbf{S}}_i\right\rangle$ represents the inner product of the two vectors, meaning the predicted bag score of the ground-truth category. + +Enhancing background suppression, we use negative proposals and introduce a dedicated loss for these instances. Notably, these negative instances pass only through the classification branch for instance score computation, with their scores derived exclusively from classification. The specific formulation of this loss function is detailed below: + +$$ +\beta = \frac {1}{N} \sum_ {i = 1} ^ {N} \left\langle \mathbf {c} _ {i} ^ {\mathrm {T}}, \widehat {\mathbf {S}} _ {i} \right\rangle , \tag {6} +$$ + +$$ +\mathcal {L} _ {n e g} = - \frac {1}{| \mathcal {U} |} \sum_ {\mathcal {U}} \sum_ {k = 1} ^ {K} \beta \cdot \left(\left[ \mathbf {S} _ {n e g} ^ {c l s} \right] _ {k}\right) ^ {2} \log \left(1 - \left[ \mathbf {S} _ {n e g} ^ {c l s} \right] _ {k}\right). \tag {7} +$$ + +The PRM loss consists of the MIL loss $\mathcal{L}_{pos}$ for positive bags and negative loss $\mathcal{L}_{neg}$ for negative samples, i.e., + +$$ +\mathcal {L} _ {p r m} = \alpha \mathcal {L} _ {p o s} + (1 - \alpha) \mathcal {L} _ {n e g}, \tag {8} +$$ + +where $\alpha = 0.25$ by default. + +Box Mining Strategy. MIL's preference for segments with more foreground presence and SAM's tendency to capture only parts of an object often bring to final bounding boxes, $box_{prim}$ , the 'local' issue of MIL inadequately covers the instances. To improve the bounding box quality, we introduce a box mining strategy that adaptively expands $box_{select}$ from proposal selection in PRM, by merging it with the original proposals filter, aiming to address MIL's localization challenges. + +The Box Mining Strategy (BMS) consists of two primary components: (i) We select the top $k$ proposals from the positive proposal bag $B^{+}$ , to create a set $G$ . We evaluate the proposals in $G$ against box_select based on IoU and size, using a threshold $T_{min1}$ . Proposals larger than box_select and with an IoU above $T_{min1}$ undergo dynamic expansion through IoU consideration, which allows for the adaptive integration with box_select. That mitigates the 'local' issue and maintains the bounding box's consistency to the object's true boundaries. (ii) Frequently, issues related to lo + +cality can lead to an exceedingly low IoU between proposals and box_select. Nonetheless, the ground truth box can fully encompass the box_part. Therefore, when component (i) conditions are unmet, if a proposal can entirely encapsulate box_select, we reset the threshold $T_{min2}$ . Proposals surpassing this threshold adaptively merge with box_select to generate the final box_prm, used to yield Mask_prm. These two components collectively form our BMS strategy. A detailed procedure of this approach will be delineated in Algorithm2 of the supplementary materials. + +Loss Function. After acquiring the final supervision masks, $Mask_{prm}$ and the filtered $Mask_{sam}$ in Multi-mask Proposals Supervision(MPS) in Sec. 7 of supplementary, we use them together to guide the dynamic segmentation branch. To comprehensively train SAPNet, we integrate the loss functions from the PSM and PRM, culminating in the formulation of the total loss for our model, denoted as $L_{total}$ . The aggregate loss function, $L_{total}$ can be articulated as: + +$$ +\mathcal {L} _ {\text {t o t a l}} = \mathcal {L} _ {\text {m a s k}} + \mathcal {L} _ {\text {c l s}} + \lambda \cdot \mathcal {L} _ {\text {p s m}} + \mathcal {L} _ {\text {p r m}} \tag {9} +$$ + +where, $\mathcal{L}_{Dice}$ is the Dice Loss [35], $\mathcal{L}_{cls}$ is the Focal Loss[32], and $\lambda$ is set as 0.25. + +# 4. Experiment + +# 4.1. Experimental Settings + +Datasets. We use the publicly available MS COCO[33] and VOC2012SBD [13] datasets for experiments. COCO17 has 118k training and 5k validation images with 80 common object categories. VOC consists of 20 categories and contains 10,582 images for model training and 1,449 validation images for evaluation. + +Evaluation Metric. We use mean average precision mAP@[.5,.95] for the MS-COCO. The $\{AP, AP_{50}, AP_{75}, AP_{Small}, AP_{Middle}, AP_{Large}\}$ is reported for MS-COCO and for VOC12SBD segmentation, and we report $AP_{25,50,75}$ . The $mIoU_{box}$ is the average IoU between predicted pseudo-boxes and GT-boxes in the training set. It measures SAPNet's ability to select mask proposals without using the segmentation branch. + +Implementation Details. In our study, we employed the Stochastic Gradient Descent (SGD) optimizer, as detailed in [6]. Our experiments were conducted using the mmdetection toolbox [7], following standard training protocols for each dataset. We used the ResNet architecture [15], pretrained on ImageNet [36], as the backbone. For COCO, batch size was set at four images per GPU across eight GPUs, and for VOC2012, it was four GPUs. More details of the experiment are in Sec. 8 of the supplementary. + +# 4.2. Experimental Comparisons + +Tab. 1 shows the comparison results between our method and previous SOTA approaches [11, 16, 34, 40, 42] on + +COCO. In our experiments, we provide SAM with both the labeled points and the annotations generated by the point annotation enhancer [9]. SAM then utilizes these inputs to generate subsequent mask proposals for selection and supervision. For fair comparison, we design two baselines: the top-1 scored mask from SAM and MIL-selected SAM mask proposals are used as SOLOv2 supervision, respectively. Tab. 1 shows our method substantially surpasses these baselines in performance. + +Comparison with point-annotated methods. Our approach achieves a 31.2 AP performance with a ResNet-50 backbone, surpassing all previous point-annotated methods, including BESTIE on HRNet-48 and AttnShift on Vit-B. Our model exhibits significant improvements under a 1x training schedule, with a 13.5 AP increase when compared to the previous SOTA method, BESTIE. Furthermore, under a 3x training schedule, SAPNet outperforms AttnShift, which relies on large model training, with 13.4 AP, improvements. Importantly, our method is trained end-to-end without needing post-processing, achieving SOTA performance in point-annotated instance segmentation. + +Comparison with other annotation-based methods. Our SAPNet has significantly elevated point annotation, regardless of point annotation's limitations in annotation time and quality compared to box annotation. Utilizing a ResNet-101 backbone and a 3x training schedule, SAPNet surpasses most box-annotated instance segmentation methods, achieving a 1.4 AP improvement over BoxInst. Moreover, SAPNet's segmentation performance nearly matches the mask-annotated methods, effectively bridging the gap between point-annotated and these techniques. + +Segmentation performance on VOC2012SBD. Tab. 2 compares segmentation methods under different supervisions on the VOC2012 dataset. SAPNet reports an enhancement of 7.7 $AP$ over the AttnShift approach, evidencing a notable advancement in performance. Thereby, it significantly outstrips image-level supervised segmentation methods. Additionally, SAPNet surpasses box-annotated segmentation methods, such as BoxInst by 3.4 $AP_{50}$ and DiscoBox by 32.6 $AP_{50}$ . Further, our point-prompted method achieves $92.3\%$ of the Mask-R-CNN. + +# 4.3. Ablation Studies + +More experiments have been conducted on COCO to further analyze SAPNet's effectiveness and robustness. + +Training Stage in SAPNet. The ablation study of the training stage is given in Tab. 3. We trained solov2 using the top-1 scored mask provided by SAM and compared it to the two training strategies of SAPNet. In the two-stage approach, the segmentation branch and multiple-mask supervision of SAPNet are removed. Instead, we use the selected mask to train a standalone instance segmentation model, as described by [42]. The end-to-end training method corre + +
MethodAnn.Backbonesched.Arch.mAP\( \mathrm{mAP}_{50} \)\( \mathrm{mAP}_{75} \)\( \mathrm{mAP}_{\mathrm{s}} \)\( \mathrm{mAP}_{\mathrm{m}} \)\( \mathrm{mAP}_{1} \)
Fully-supervised instance segmentation models.
Mask R-CNN [16]\( \mathcal{M} \)ResNet-501xMask R-CNN34.656.536.618.337.447.2
YOLACT-700 [5]\( \mathcal{M} \)ResNet-1014.5xYOLACT31.254.032.812.133.347.
PolarMask [16]\( \mathcal{M} \)ResNet-1012xPolarMask32.153.733.114.733.845.3
SOLOv2 [42]\( \mathcal{M} \)ResNet-501xSOLOv234.854.936.913.437.853.7
CondInst [40]\( \mathcal{M} \)ResNet-501xCondInst35.356.437.418.039.450.4
SwinMR [34]\( \mathcal{M} \)Swin-S50eSwinMR43.267.046.124.846.362.1
Mask2Former [11]\( \mathcal{M} \)Swin-S50eMask2Former46.169.452.825.449.768.5
Weakly-supervised instance segmentation models.
IRNet [45]\( \mathcal{I} \)ResNet-501xMask R-CNN6.111.75.5---
BESTIE [21]\( \mathcal{I} \)HRNet-481xMask R-CNN14.328.013.2---
BBTP [18]\( \mathcal{B} \)ResNet-1011xMask R-CNN21.145.517.211.222.029.8
BoxInst [39]\( \mathcal{B} \)ResNet-1013xCondInst33.256.533.616.235.345.1
DiscoBox [23]\( \mathcal{B} \)ResNet-503xSOLOv232.053.632.611.733.748.4
Boxlevelset [27]\( \mathcal{B} \)ResNet-1013xSOLOv233.456.834.115.236.846.8
WISE-Net [24]\( \mathcal{P} \)ResNet-501xMask R-CNN7.818.28.8---
BESTIE†[21]\( \mathcal{P} \)HRNet-481xMask R-CNN17.734.016.4---
AttnShift [31]\( \mathcal{P} \)Vit-B50eMask R-CNN21.243.519.4---
SAM-SOLOv2\( \mathcal{P} \)ResNet-501xSOLOv224.641.925.39.328.638.1
MIL-SOLOv2\( \mathcal{P} \)ResNet-501xSOLOv226.847.726.811.231.540.4
SAPNet(ours)\( \mathcal{P} \)ResNet-501xSOLOv231.251.832.312.635.147.8
SAPNet(ours)*\( \mathcal{P} \)ResNet-1013xSOLOv234.656.036.615.739.552.1
+ +Table 1. Mask annotation(M), image annotation(I), box annotation(B) and point annotation(P) performance on COCO-17 val. 'Ann.' is the type of the annotation and 'sched.' means schedule. * is the multi-scale augment training for re-training segmentation methods, and other experiments are on single-scale training. SwinMR is Swin-Transformer-Mask R-CNN. SwinMR and Mask2Former use multi-scale data augment strategies for SOTA. + +
MethodSup.BackboneAP25AP50AP75
Mask R-CNN [16]MR-5078.068.843.3
Mask R-CNN [16]MR-10179.670.245.3
BoxInst [39]BR-101-61.437.0
DiscoBoxBR-10172.862.237.5
BESTIE [21]IHRNet53.541.724.2
IRNet [45]IR-50-46.723.5
BESTIE† [21]IHRNet61.251.026.6
WISE-Net [24]PR-5053.543.025.9
BESTIE [21]PHRNet58.646.726.3
BESTIE† [21]PHRNet66.456.130.2
Attnshift [31]PVit-S68.354.425.4
Attnshift† [31]PVit-S70.357.130.4
SAPNet(ours)PR-10176.564.858.7
+ +Table 2. Instance segmentation performance on the VOC2012 test set. $\dagger$ indicates applying MRCNN refinement. + +sponds to the architecture illustrated in Fig. 2. Our findings indicate that our method is more competitive than directly employing SAM (31.2 AP vs 24.6 AP), and the visualization of Fig. 4 shows us this enhancement. Moreover, the end-to-end training strategy boasts a more elegant model structure and outperforms the two-stage approach in overall efficiency (31.2 AP vs 30.18 AP). + +Effect of Each Component. Given the limited performance of SAM-top1, we opted for the single-MIL as our baseline. With a preliminary selection using MIL1, we + +![](images/7e58933038a2be704e32eebd534d169562eede1cfd41805e23e5e8e4dac96f03.jpg) +Figure 4. The comparative visualization between SAM-top1 and SAPNet is presented, showcasing SAM's segmentation outcomes in green masks and our results in yellow. The orange and red bounding boxes highlight the respective mask boundaries. + +![](images/f25761820fcc09c4e886e4b8e486fab5adb98d8eec758148b0187d4ca6bdbdbb.jpg) + +
train stage on cocosched.APAP50AP75
SAM-top11x24.641.925.3
Two stage1x30.249.831.5
End to end1x31.251.832.3
+ +Table 3. The experimental comparisons of segmenters in COCO dataset, SAM-top1 is the highest scoring mask generated by SAM. + +have achieved a segmentation performance of 26.8 AP. i) Point Distance Guidance. We updated the proposal scores from the existing MIL by integrating the PDG module into the foundational MIL selection. This approach successfully segments adjacent objects of the same category, improving the segmentation performance by 0.7 points (27.5 vs 26.8). + +
mil1PDGmil2PNPGBMSMPSmAP
26.8
27.5
27.7
29.7
30.8
31.2
+ +Table 4. The effect of each component in SAPNet: proposal selection module(MIL1), point distance guidance(PDG), positive and negative proposals generator(PNPG), proposal selection module(MIL2), box mining strategy(BMS), and Multi-mask Proposals Supervision(MPS) in Sec. 7 of supplementary. + +ii) MIL2. Building on the previous step, we incorporate a second MIL selection module to refine the initially selected boxes, resulting in a performance increment of 0.2 points. iii) PNPG. For MIL2, we devised the positive-negative sample sets, aiming to enhance the input quality for the PRM module and use the negative samples to suppress background. This adjustment leads to a segmentation performance boost of 2 points (29.7 vs 27.7). iv) BMS. Within the PRM, we refine the selected boxes using BMS, pushing the segmentation performance up by 1.1 points (30.8 vs 29.7). v) MPS. Utilizing MPS for segmentation branch supervision yields a 0.4-point performance improvement. + +Threshold of BMS. For point refinement, there are two constraints (described in Sec. 3.4). $T_{min1}$ and $T_{min2}$ are thresholds of the Box Mining Strategy. In Tab. 5, it shows that the two constraints together to obtain performance gain. After multiple experiments, we have found that there is a significant performance improvement when $T_{min1}$ and $T_{min2}$ are set to 0.6 and 0.3, respectively. + +Components of PNPG. Tab. 6 presents the results of a dissected ablation study on the Positive and Negative Proposals Generator(PNPG), illustrating the respective impacts of the positive and negative examples on the model's performance. It is evident that the construction of negative examples plays a significant role in enhancing model efficacy. Furthermore, the beneficial effects of both positive and negative examples are observed to be cumulative. + +Performance Analysis. As presented in Tab. 7, we conducted a statistical analysis to validate SAPNet's capability to address 'local' issue and compare the outcomes selected by the single-MIL with those obtained by SAPNet in the absence of segmentation branch integration. Specifically, the part problem generated by the single-MIL, where MIL is inclined to select proposals with a higher proportion of foreground, is exemplified in Fig. 6 of supplementary. On this premise, we initially establish an evaluative criterion $R_{v} = \frac{area_{mask}}{area_{box}}$ , which is the ratio of the mask area to the bounding box area. Subsequently, we compute $R_{v_i}$ for each proposal within the proposal bag corresponding to every instance across the entire COCO dataset and select the maximum $R_{v_{max}}$ to compute the mean value over the dataset, + +
Tmin1Tmin2APAP50AP75APsAPmAPl
0.50.330.951.332.012.234.747.4
0.50.430.751.231.811.934.747.1
0.60.331.251.832.312.635.147.8
0.60.430.851.132.012.134.747.3
0.70.331.051.532.212.634.947.3
0.70.430.751.131.912.034.647.2
+ +Table 5. Constraints in box mining strategy. + +
PNPGAPAP50AP75
PPGNPG
29.349.730.0
29.850.530.8
30.751.231.7
31.251.832.3
+ +Table 6. Meticulous ablation experiments in PNPG + +
MethodGapmIoUbox
Single-MIL0.19963.8
SAPNet0.13169.1
+ +Table 7. Experimental analysis with part problem. + +which is then designated as the threshold $T_{rv}$ . Ultimately, we identify the ground truth $R_{vgt}$ and objects where $R_{vmax}$ exceeds $T_{rv}$ and calculates the discrepancy between $R_{v}$ values selected by single-MIL and SAPNet. The description is as follows: + +$$ +G a p _ {s i n g l e} = R v _ {s i n g l e} - R v _ {g t}, \quad G a p _ {o u r} = R v _ {o u r} - R v _ {g t}. \tag {10} +$$ + +Tab. 7 shows that the proposed SAPNet mitigates the locality issue faced by the single-MIL. Furthermore, the boxes selected via SAPNet exhibit a substantially higher IoU with GT than those selected by the single-MIL. + +# 5. Conclusion + +In this paper, we propose SAPNet, an innovative end-to-end point-prompted instance segmentation framework. SAPNet transforms point annotations into category-agnostic mask proposals and employs dual selection branches to elect the most semantic mask for each object, guiding the segmentation process. To address challenges such as indistinguishable adjacent objects of the same class and MIL's locality bias, we integrate PDG and PNPG, complemented by a Box Mining Strategy for enhanced proposal refinement. SAPNet uniquely merges segmentation and selection branches under multi-mask supervision, significantly enhancing its segmentation performance. Extensive experimental comparisons on VOC and COCO datasets validate the SAPNet's effectiveness in point-prompted instance segmentation. + +# 6. Acknowledgements + +This work was supported in part by the Youth Innovation Promotion Association CAS, the National Natural Science Foundation of China (NSFC) under Grant No. 61836012, 61771447 and 62272438, and the Strategic Priority Research Program of the Chinese Academy of Sciences under Grant No.XDA27000000. + +# References + +[1] Jiwoon Ahn, Sunghyun Cho, and Suha Kwak. Weakly supervised learning of instance segmentation with inter-pixel relations. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2209-2218, 2019. 2 +[2] Pablo Andres Arbeláez, Jordi Pont-Tuset, and Jonathan T. Barron et al. Multiscale combinatorial grouping. In CVPR, 2014. 2 +[3] Aditya Arun, CV Jawahar, and M Pawan Kumar. Weakly supervised instance segmentation by learning annotation consistent instances. In European Conference on Computer Vision, pages 254-270. Springer, 2020. 2 +[4] Hakan Bilen and Andrea Vedaldi. Weakly supervised deep detection networks. In CVPR, 2016. 2, 3 +[5] Daniel Bolya, Chong Zhou, Fanyi Xiao, and Yong Jae Lee. Yolact: Real-time instance segmentation. In Proceedings of the IEEE/CVF international conference on computer vision, pages 9157-9166, 2019. 1, 7 +[6] Léon Bottou. Stochastic gradient descent tricks. In Neural Networks: Tricks of the Trade: Second Edition, pages 421-436. Springer, 2012. 6 +[7] Kai Chen, Jiaqi Wang, Jiangmiao Pang, Yuhang Cao, Yu Xiong, Xiaoxiao Li, Shuyang Sun, Wansen Feng, Ziwei Liu, Jiarui Xu, et al. Mmdetection: Open mmlab detection toolbox and benchmark. arXiv preprint arXiv:1906.07155, 2019. https://github.com/open-mmlab/mmdetection.6 +[8] Keyan Chen, Chenyang Liu, Hao Chen, Haotian Zhang, Wenyuan Li, Zhengxia Zou, and Zhenwei Shi. Rsprompter: Learning to prompt for remote sensing instance segmentation based on visual foundation model. arXiv preprint arXiv:2306.16269, 2023. 3 +[9] Pengfei Chen, Xuehui Yu, Xumeng Han, Najmul Hassan, Kai Wang, Jiachen Li, Jian Zhao, Humphrey Shi, Zhenjun Han, and Qixiang Ye. Point-to-box network for accurate object detection via single point supervision. In European Conference on Computer Vision, pages 51-67. Springer, 2022. 2, 3, 6 +[10] Tianle Chen, Zheda Mai, Ruiwen Li, and Wei-lun Chao. Segment anything model (sam) enhanced pseudo labels for weakly supervised semantic segmentation. arXiv preprint arXiv:2305.05803, 2023. 3 +[11] Bowen Cheng, Ishan Misra, Alexander G. Schwing, Alexander Kirillov, and Rohit Girdhar. Masked-attention mask transformer for universal image segmentation. In CVPR, 2022. 1, 6, 7 +[12] Bowen Cheng, Omkar Parkhi, and Alexander Kirillov. Pointly-supervised instance segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2617-2626, 2022. 2 +[13] Mark Everingham, Luc Van Gool, and Christopher K. I. Williams et al. The Pascal visual object classes (VOC) challenge. IJCV, 2010. http://host.robots.ox.ac.uk/pascal/VOC/.6 +[14] Junsong Fan, Zhaoxiang Zhang, and Tieniu Tan. Pointly- + +supervised panoptic segmentation. In European Conference on Computer Vision, pages 319-336. Springer, 2022. 1 +[15] Kaiming He, Xiangyu Zhang, and Shaoqing Ren et al. Deep residual learning for image recognition. In CVPR, 2016. 6 +[16] Kaiming He, Georgia Gkioxari, and Piotr Dólar et al. Mask R-CNN. In ICCV, 2017. 1, 6, 7 +[17] Sheng He, Rina Bao, Jingpeng Li, P Ellen Grant, and Yangming Ou. Accuracy of segment-anything model (sam) in medical image segmentation tasks. arXiv preprint arXiv:2304.09324, 2023. 3 +[18] Cheng-Chun Hsu, Kuang-Jui Hsu, Chung-Chi Tsai, Yen-Yu Lin, and Yung-Yu Chuang. Weakly supervised instance segmentation using the bounding box tightness prior. In NeurIPS, 2019. 2, 7 +[19] Peng-Tao Jiang and Yuqi Yang. Segment anything is a good pseudo-label generator for weakly supervised semantic segmentation. arXiv preprint arXiv:2305.01275, 2023. 3 +[20] Lei Ke, Mingqiao Ye, Martin Danelljan, Yifan Liu, Yu-Wing Tai, Chi-Keung Tang, and Fisher Yu. Segment anything in high quality. arXiv preprint arXiv:2306.01567, 2023. 1, 3 +[21] Beomyoung Kim, Youngjoon Yoo, Chaeun Rhee, and Junmo Kim. Beyond semantic to instance segmentation: Weakly-supervised instance segmentation via semantic knowledge transfer and self-refinement. In CVPR, 2022. 1, 2, 7 +[22] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. arXiv preprint arXiv:2304.02643, 2023. https://segment-anything.com/.1,2,3 +[23] Shiyi Lan, Zhiding Yu, Christopher Choy, Subhashree Radhakrishnan, Guilin Liu, Yuke Zhu, Larry S Davis, and Anima Anandkumar. Discobox: Weakly supervised instance segmentation and semantic correspondence from box supervision. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3406-3416, 2021. 1, 2, 7 +[24] Issam H. Laradji, Negar Rostamzadeh, Pedro O. Pinheiro, David Vázquez, and Mark Schmidt. Proposal-based instance segmentation with point supervision. In ICIP, 2020. 2, 7 +[25] Jungbeom Lee, Jihun Yi, Chaehun Shin, and Sungroh Yoon. Bbam: Bounding box attribution map for weakly supervised semantic and instance segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2643-2652, 2021. 2 +[26] Feng Li, Hao Zhang, Peize Sun, Xueyan Zou, Shilong Liu, Jianwei Yang, Chunyuan Li, Lei Zhang, and Jianfeng Gao. Semantic-sam: Segment and recognize anything at any granularity. arXiv preprint arXiv:2307.04767, 2023. 3 +[27] Wentong Li, Wenyu Liu, Jianke Zhu, Miaomiao Cui, XianSheng Hua, and Lei Zhang. Box-supervised instance segmentation with level set evolution. In European conference on computer vision, pages 1-18. Springer, 2022. 1, 7 +[28] Wentong Li, Yuqian Yuan, Song Wang, Jianke Zhu, Jianshu Li, Jian Liu, and Lei Zhang. Point2mask: Point-supervised panoptic segmentation via optimal transport. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 572-581, 2023. 1, 2 + +[29] Wentong Li, Wenyu Liu, Jianke Zhu, Miaomiao Cui, Risheng Yu Xiansheng Hua, and Lei Zhang. Box2mask: Box-supervised instance segmentation via level-set evolution. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2024. 2 +[30] Wentong Li, Yuqian Yuan, Song Wang, Wenyu Liu, Dongqi Tang, Jianke Zhu, Lei Zhang, et al. Label-efficient segmentation via affinity propagation. Advances in Neural Information Processing Systems, 36, 2024. 2 +[31] Mingxiang Liao, Zonghao Guo, , and Yuze Wang et al. Attentionshift: Iteratively estimated part-based attention map for pointly supervised instance segmentation. In CVPR, 2023. 2, 7 +[32] Tsung-Yi Lin, Priya Goyal, and Ross B. Girshick et al. Focal loss for dense object detection. In ICCV, 2017. 5, 6 +[33] Tsung-Yi Lin, Michael Maire, and Serge et al. Belongie. Microsoft coco: Common objects in context. In ECCV, 2014. https://cocodataset.org/. 6 +[34] Ze Liu, Yutong Lin, and Yue Cao et al. Swin transformer: Hierarchical vision transformer using shifted windows. In ICCV, 2021. 6, 7 +[35] Fausto Milletari, Nassir Navab, and Seyed-Ahmad Ahmadi. V-net: Fully convolutional neural networks for volumetric medical image segmentation. In 2016 fourth international conference on 3D vision (3DV), pages 565-571. IEEE, 2016. 6 +[36] Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, et al. Imagenet large scale visual recognition challenge. International journal of computer vision, 115:211-252, 2015. 6 +[37] Peng Tang and Xinggang Wang et al. Multiple instance detection network with online instance classifier refinement. In CVPR, 2017. 3 +[38] Peng Tang, Xinggang Wang, and Song Bai et al. PCL: proposal cluster learning for weakly supervised object detection. IEEE TPAMI, 2020. 5 +[39] Zhi Tian, Chunhua Shen, Xinlong Wang, and Hao Chen. Boxinst: High-performance instance segmentation with box annotations. In CVPR, 2021. 1, 2, 7 +[40] Zhi Tian, Bowen Zhang, Hao Chen, and Chunhua Shen. Instance and panoptic segmentation using conditional convolutions. IEEE TPAMI, 2023. 1, 2, 6, 7 +[41] Xinlong Wang, Tao Kong, Chunhua Shen, Yuning Jiang, and Lei Li. SOLO: segmenting objects by locations. In ECCV, 2020. +[42] Xinlong Wang, Rufeng Zhang, Tao Kong, Lei Li, and Chunhua Shen. Solov2: Dynamic and fast instance segmentation. Proc. Advances in Neural Information Processing Systems (NeurIPS), 2020. https://github.com/WXinlong/SOLO.1, 2, 3, 6, 7 +[43] Jinyu Yang, Mingqi Gao, Zhe Li, Shang Gao, Fangjing Wang, and Feng Zheng. Track anything: Segment anything meets videos. arXiv preprint arXiv:2304.11968, 2023. 3 +[44] Xu Zhao, Wenchao Ding, Yongqi An, Yinglong Du, Tao Yu, Min Li, Ming Tang, and Jinqiao Wang. Fast segment anything. arXiv preprint arXiv:2306.12156, 2023. 1, 3 + +[45] Yanning Zhou, Hao Chen, Jiaqi Xu, Qi Dou, and Pheng-Ann Heng. Irnet: Instance relation network for overlapping cervical cell segmentation. In MICCAI, 2019. 1, 7 \ No newline at end of file diff --git a/2024/Semantic-aware SAM for Point-Prompted Instance Segmentation/images.zip b/2024/Semantic-aware SAM for Point-Prompted Instance Segmentation/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..ee8274fe535675f0030588fd5758849d7ec5db5c --- /dev/null +++ b/2024/Semantic-aware SAM for Point-Prompted Instance Segmentation/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73d2fb139f3e991398ac2c673f934fb51cef920fc337928331dbf13cd4336ac2 +size 599192 diff --git a/2024/Semantic-aware SAM for Point-Prompted Instance Segmentation/layout.json b/2024/Semantic-aware SAM for Point-Prompted Instance Segmentation/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..adb197dac28e33c5238547dfa2eddf4f46014785 --- /dev/null +++ b/2024/Semantic-aware SAM for Point-Prompted Instance Segmentation/layout.json @@ -0,0 +1,8857 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 94, + 103, + 499, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 103, + 499, + 121 + ], + "spans": [ + { + "bbox": [ + 94, + 103, + 499, + 121 + ], + "type": "text", + "content": "Semantic-aware SAM for Point-Prompted Instance Segmentation" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 149, + 142, + 443, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 142, + 443, + 171 + ], + "spans": [ + { + "bbox": [ + 149, + 142, + 443, + 171 + ], + "type": "text", + "content": "Zhaoyang Wei" + }, + { + "bbox": [ + 149, + 142, + 443, + 171 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 149, + 142, + 443, + 171 + ], + "type": "text", + "content": ", Pengfei Chen" + }, + { + "bbox": [ + 149, + 142, + 443, + 171 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 149, + 142, + 443, + 171 + ], + "type": "text", + "content": ", Xuehui Yu" + }, + { + "bbox": [ + 149, + 142, + 443, + 171 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 149, + 142, + 443, + 171 + ], + "type": "text", + "content": ", Guorong Li" + }, + { + "bbox": [ + 149, + 142, + 443, + 171 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 149, + 142, + 443, + 171 + ], + "type": "text", + "content": ", Jianbin Jiao" + }, + { + "bbox": [ + 149, + 142, + 443, + 171 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 149, + 142, + 443, + 171 + ], + "type": "text", + "content": ", Zhenjun Han" + }, + { + "bbox": [ + 149, + 142, + 443, + 171 + ], + "type": "inline_equation", + "content": "^{1\\dagger}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 200, + 172, + 394, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 200, + 172, + 394, + 185 + ], + "spans": [ + { + "bbox": [ + 200, + 172, + 394, + 185 + ], + "type": "text", + "content": "1University of Chinese Academy of Sciences(UCAS)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 143, + 213, + 192, + 225 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 213, + 192, + 225 + ], + "spans": [ + { + "bbox": [ + 143, + 213, + 192, + 225 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 238, + 290, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 238, + 290, + 550 + ], + "spans": [ + { + "bbox": [ + 47, + 238, + 290, + 550 + ], + "type": "text", + "content": "Single-point annotation in visual tasks, with the goal of minimizing labelling costs, is becoming increasingly prominent in research. Recently, visual foundation models, such as Segment Anything (SAM), have gained widespread usage due to their robust zero-shot capabilities and exceptional annotation performance. However, SAM's class-agnostic output and high confidence in local segmentation introduce semantic ambiguity, posing a challenge for precise category-specific segmentation. In this paper, we introduce a cost-effective category-specific segmenter using SAM. To tackle this challenge, we have devised a Semantic-Aware Instance Segmentation Network (SAPNet) that integrates Multiple Instance Learning (MIL) with matching capability and SAM with point prompts. SAPNet strategically selects the most representative mask proposals generated by SAM to supervise segmentation, with a specific focus on object category information. Moreover, we introduce the Point Distance Guidance and Box Mining Strategy to mitigate inherent challenges: group and local issues in weakly supervised segmentation. These strategies serve to further enhance the overall segmentation performance. The experimental results on Pascal VOC and COCO demonstrate the promising performance of our proposed SAPNet, emphasizing its semantic matching capabilities and its potential to advance point-prompted instance segmentation. The code is available at https://github.com/zhaoyangwei123/SAPNet." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 571, + 128, + 583 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 571, + 128, + 583 + ], + "spans": [ + { + "bbox": [ + 47, + 571, + 128, + 583 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 590, + 287, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 590, + 287, + 687 + ], + "spans": [ + { + "bbox": [ + 46, + 590, + 287, + 687 + ], + "type": "text", + "content": "Instance segmentation seeks to discern pixel-level labels for both instances of interest and their semantic content in images, a crucial function in domains like autonomous driving, image editing, and human-computer interaction. Despite impressive results demonstrated by various studies [5, 11, 16, 40-42], the majority of these high-performing methods are trained in a fully supervised manner and heavily dependent on detailed pixel-level mask annotations," + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 307, + 210, + 553, + 333 + ], + "blocks": [ + { + "bbox": [ + 307, + 210, + 553, + 333 + ], + "lines": [ + { + "bbox": [ + 307, + 210, + 553, + 333 + ], + "spans": [ + { + "bbox": [ + 307, + 210, + 553, + 333 + ], + "type": "image", + "image_path": "812a1aefd54f5b63c9e2f44b1709b59996855a7d9a975724833467707765d8e9.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 338, + 547, + 449 + ], + "lines": [ + { + "bbox": [ + 305, + 338, + 547, + 449 + ], + "spans": [ + { + "bbox": [ + 305, + 338, + 547, + 449 + ], + "type": "text", + "content": "Figure 1. Three Challenges Brought by SAM and single-MIL. Orange dash box illustrates that semantic ambiguity in SAM-generated masks, where it erroneously assigns higher scores to non-object categories like clothes, despite the person being our desired target. Green dash box depicts a comparison between mask proposals using single-MIL and SAPNet. It illustrates two primary challenges: 'group', where segmentation encounters difficulties in isolating individual targets among adjacent objects of the same category, and 'local', where MIL favors foreground-dominant regions, resulting in overlooked local details." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 459, + 545, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 459, + 545, + 519 + ], + "spans": [ + { + "bbox": [ + 304, + 459, + 545, + 519 + ], + "type": "text", + "content": "thereby incurring significant labeling costs. To address this challenge, researchers are increasingly focusing on weakly supervised instance segmentation, leveraging cost-effective supervision methods, such as bounding boxes [23, 27, 39], points [14, 28], and image-level labels [21, 45]." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 520, + 546, + 663 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 520, + 546, + 663 + ], + "spans": [ + { + "bbox": [ + 304, + 520, + 546, + 663 + ], + "type": "text", + "content": "Recently, visual foundation models, such as Segment Anything (SAM)[22], have been widely employed by researchers for their exceptional generalization capabilities and impressive annotation performance. Numerous studies based on SAM, such as [20, 44] have emerged, building upon the foundations of SAM to further enhance its generalization capabilities and efficiency. However, these efforts have predominantly focused on improving the annotation performance of SAM. One limitation arises from SAM's lack of classification ability, resulting in class-agnostic segmentation results that fail to accurately segment specific categories as desired." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 665, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 665, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 665, + 547, + 713 + ], + "type": "text", + "content": "To tackle the inherent semantic ambiguity in SAM and achieve specific-category segmentation, we propose integrating weak annotations with SAM, employing point annotations as prompts to imbue semantic information into" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 494, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 494, + 35 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 494, + 35 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 693, + 128, + 702 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 693, + 128, + 702 + ], + "spans": [ + { + "bbox": [ + 58, + 693, + 128, + 702 + ], + "type": "text", + "content": "* Equal contribution." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 58, + 703, + 211, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 703, + 211, + 712 + ], + "spans": [ + { + "bbox": [ + 58, + 703, + 211, + 712 + ], + "type": "text", + "content": "† Corresponding authors. (hanzhj@ucas.ac.cn)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "3585" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 228 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 228 + ], + "type": "text", + "content": "SAM's outputs. A straightforward approach involves leveraging SAM's intrinsic scoring mechanism, selecting the top-scoring mask as the corresponding label for each category. However, when annotating object points are fed into the SAM, its category-agnostic characteristic tends to assign higher scores to parts of the object, resulting in generated mask annotations that fail to encompass the object as a whole. In Fig. 1 orange dashed box, we aim to obtain the 'person' mask annotation, but SAM predicts the proposals of 'clothes', 'clothes+trousers' and 'person'. Relying solely on the score SAM provides is insufficient, as the highest score corresponds to 'clothes' (col-2), which does not meet our specific needs." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 229, + 289, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 229, + 289, + 529 + ], + "spans": [ + { + "bbox": [ + 46, + 229, + 289, + 529 + ], + "type": "text", + "content": "To address this challenge, we have proposed SAPNet, a semantically-aware instance segmentation network designed for high-quality, end-to-end segmentation. In this study, we design a proposal selection module (PSM) using the Multiple Instance Learning (MIL) paradigm to choose proposals that align closely with the specified semantic label. However, the MIL-based method relies on the classification score, often leading to group and local predictions [4, 21, 24]. In Fig. 1 green dashed box, the group issue is evident, where two objects of the same category are often both included when they are in close proximity. It also illustrates the local issue, where the MIL classifier frequently predicts the most discriminative region instead of the entire object. To overcome these limitations, we have introduced Point Distance Guidance (PDG) and Box Mining Strategies (BMS). Specifically, we penalize the selection results by calculating the Euclidean distances between the annotated points of identical categories enclosed within the proposals. Additionally, for more localized proposals, we filter out higher-quality proposals from their corresponding bags and dynamically merge them in scale. By fully exploiting the positional clues to prevent local and group prediction, we aim to select the proposal that most effectively represents the object category in refinement stage. The primary contributions of this work can be outlined as follows:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 530, + 287, + 712 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 46, + 530, + 287, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 530, + 287, + 602 + ], + "spans": [ + { + "bbox": [ + 46, + 530, + 287, + 602 + ], + "type": "text", + "content": "1) We introduce SAPNet, an end-to-end semantic-aware instance segmentation network based on point prompts. SAPNet combines the visual foundation model SAM with semantic information to address its inherent semantic ambiguity, facilitating the generation of semantically-aware proposal masks." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 604, + 287, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 604, + 287, + 653 + ], + "spans": [ + { + "bbox": [ + 46, + 604, + 287, + 653 + ], + "type": "text", + "content": "2) We incorporate Point Distance Guidance (PDG) and Box Mining Strategies (BMS) to prevent local and group predictions induced by MIL-based classifiers in both the proposal selection and refinement stages." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 653, + 287, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 653, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 46, + 653, + 287, + 712 + ], + "type": "text", + "content": "3) SAPNet achieves state-of-the-art performance in Point-Prompted Instance Segmentation (PPIS), significantly bridging the gap between point-prompted and fully-supervised segmentation methods on two challenging benchmarks (COCO and VOC2012)." + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 71, + 392, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 71, + 392, + 84 + ], + "spans": [ + { + "bbox": [ + 306, + 71, + 392, + 84 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 91, + 545, + 378 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 91, + 545, + 378 + ], + "spans": [ + { + "bbox": [ + 304, + 91, + 545, + 378 + ], + "type": "text", + "content": "Weakly-Supervised Instance Segmentation (WSIS) offers a practical approach for accurate object masks using minimal supervision. It spans a range of annotations, from image labels to bounding boxes. Research has focused on narrowing the performance gap between weakly and fully-supervised methods, primarily through box-level [18, 25, 39] and image-level annotations [1, 21]. Box-based methods have explored structural constraints to guide the segmentation, as seen in BBTP [18], BoxInst [39], and Box2Mask [29], and applied structural constraints to drive segmentation, treating it as a multiple-instance learning task or enforcing color consistency based on CondInst [40]. These approaches, while innovative, can complicate training and sometimes neglect the object's overall shape due to their focus on local features and proposal generation, like MCG [2]. Conversely, the proposal-free methods, like IRN [1], rely on class relationships for mask production but can falter in accurately separating instances. To preserve object integrity, recent methods such as Discobox [23] and BESTIE [21] integrate advanced semantic insights into instance segmentation using pairwise losses or saliency cues [30, 39, 42]. However, semantic drift remains an issue, with mislabeling or missed instances resulting in inferior pseudo labels [3] compromising segmentation quality." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 379, + 545, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 379, + 545, + 665 + ], + "spans": [ + { + "bbox": [ + 304, + 379, + 545, + 665 + ], + "type": "text", + "content": "Pointly-Supervised Detection and Segmentation (PSDS) cleverly balances minimal annotation costs with satisfactory localization accuracy. By introducing point annotations, WISE-Net [24], P2BNet [9] and BESTIE [21] improve upon weakly supervised methods that suffer from vague localizations. That only slightly increases the costs (by about " + }, + { + "bbox": [ + 304, + 379, + 545, + 665 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 304, + 379, + 545, + 665 + ], + "type": "text", + "content": ") and is almost as quick as the image-level annotation, but that is far speedier than more detailed bounding box or mask annotations. Such precision allows for tackling semantic bias, as seen in methods like PointRend [12], which utilize multiple points for improved accuracy, despite requiring additional bounding box supervision. Recent advancements in point-supervised instance segmentation, employed by WISE-Net and Point2Mask [28], show that even single-point annotations can yield precise mask proposals. WISE-Net skillfully localizes objects and selects masks, while BESTIE enhances accuracy using instance cues and self-correction to reduce semantic drift. Attnshift [31] advances this by extending single points to reconstruct entire objects. Apart from their complexity, these methods have yet to fully demonstrate their effectiveness, indicating ongoing challenges in harnessing single-point annotations for image segmentation and presenting clear avenues for further research." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "content": "Prompting and Foundation Models. Prompt-based learning enables pretrained foundation models to adapt to various tasks using well-crafted prompts. SAM [22], a prominent example in computer vision, exemplifies robust zero" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "3586" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 277 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 277 + ], + "type": "text", + "content": "shot generalization and interactive segmentation across multiple applications. Additionally, SAM-based models like Fast-SAM [44] increases speed, HQ-SAM [20] improves segmentation quality, and Semantic-SAM [26] optimizes performance by training on diverse data granularities. Foundational models, pre-trained on large datasets, help improve generalization in downstream tasks, especially in data-scarce scenarios. Basing on SAM, Rsprompter [8] utilizes SAM-derived pseudo labels for improved remote sensing segmentation, meanwhile, adaptations for medical imaging and video tracking are explored in A-SAM [17] and Tracking Anything [43]. Further, [10] and [19] have integrated SAM with Weakly Supervised Semantic Segmentation networks to refine pseudo labels. Our research builds upon these innovations, transforming point annotations into mask proposals in instance segmentation to significantly enhancing performance." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 285, + 129, + 300 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 285, + 129, + 300 + ], + "spans": [ + { + "bbox": [ + 47, + 285, + 129, + 300 + ], + "type": "text", + "content": "3. Methodology" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 306, + 115, + 317 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 306, + 115, + 317 + ], + "spans": [ + { + "bbox": [ + 47, + 306, + 115, + 317 + ], + "type": "text", + "content": "3.1. Overview" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 325, + 289, + 576 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 325, + 289, + 576 + ], + "spans": [ + { + "bbox": [ + 46, + 325, + 289, + 576 + ], + "type": "text", + "content": "The overview of our method is illustrated in Fig. 2, SAPNet comprises of two branches: one dedicated to the selection and refinement of mask proposals to generate pseudo-labels and the other employing solov2 head [42] for instance segmentation supervised by the generated pseudo labels. The central focus of our approach is the pseudo-label generation branch, exclusively utilized during the training phase, which includes the PSM, PNPG, and PRM modules. Following the initial proposal inputs, the PSM employs multi-instance learning and a point-distance penalty to identify semantically rich proposals. Subsequently, coupled with selected proposals from the PSM stage, the PNPG generates quality positive-negative bags to mitigate background and locality issues, emphasizing the primary regions of interest. Then, the PRM processes these bags, which selects refined proposals from positive bags to improve final box quality. Ultimately, the mask mappings derived from these box proposals are utilized to guide the segmentation branch. This guarantees the acquisition of high-quality category-specified mask proposals to supervise the segmentation branch." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 586, + 195, + 597 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 586, + 195, + 597 + ], + "spans": [ + { + "bbox": [ + 47, + 586, + 195, + 597 + ], + "type": "text", + "content": "3.2. Proposal Selection Module" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 605, + 287, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 605, + 287, + 688 + ], + "spans": [ + { + "bbox": [ + 46, + 605, + 287, + 688 + ], + "type": "text", + "content": "SAM's limited semantic discernment causes category-agnostic labeling, leading to inconsistent proposal quality for the same objects. Employing these proposals directly for segmentation supervision could introduce noise and impair performance. Our goal is to design a category-specific segmenter, which needs to select the most semantically representative proposals for robust supervision." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "type": "text", + "content": "Motivated by the insights from WSDDN [4] and P2BNet [9], our proposal selection module employs multi-instance" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 72, + 545, + 215 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 215 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 215 + ], + "type": "text", + "content": "learning and leverages labeling information to prioritize high-confidence proposals for segmentation. In the training phase, we leverage SAM[22] solely to generate category-agnostic proposals. To avoid excessive memory use and slow training, we convert them into box proposals using the minimum bounding rectangle, and combine with depth features " + }, + { + "bbox": [ + 304, + 72, + 545, + 215 + ], + "type": "inline_equation", + "content": "F \\in \\mathbb{R}^{H \\times W \\times D}" + }, + { + "bbox": [ + 304, + 72, + 545, + 215 + ], + "type": "text", + "content": " from the image " + }, + { + "bbox": [ + 304, + 72, + 545, + 215 + ], + "type": "inline_equation", + "content": "I \\in \\mathbb{R}^{H \\times W}" + }, + { + "bbox": [ + 304, + 72, + 545, + 215 + ], + "type": "text", + "content": ", serve as input to the PSM. Utilizing our designed MIL loss, PSM precisely predicts each proposal's class and instance details. It selects the highest-scoring proposal as the semantically richest bounding box for each object, effectively choosing higher quality mask proposals." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 305, + 216, + 546, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 216, + 546, + 384 + ], + "spans": [ + { + "bbox": [ + 305, + 216, + 546, + 384 + ], + "type": "text", + "content": "Given an image " + }, + { + "bbox": [ + 305, + 216, + 546, + 384 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 305, + 216, + 546, + 384 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 305, + 216, + 546, + 384 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 305, + 216, + 546, + 384 + ], + "type": "text", + "content": " point annotations " + }, + { + "bbox": [ + 305, + 216, + 546, + 384 + ], + "type": "inline_equation", + "content": "Y_{n} = \\{(p_{i},c_{i})\\}_{i = 1}^{N}" + }, + { + "bbox": [ + 305, + 216, + 546, + 384 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 305, + 216, + 546, + 384 + ], + "type": "inline_equation", + "content": "p_i" + }, + { + "bbox": [ + 305, + 216, + 546, + 384 + ], + "type": "text", + "content": " is the coordinate of the annotated point and " + }, + { + "bbox": [ + 305, + 216, + 546, + 384 + ], + "type": "inline_equation", + "content": "c_{i}" + }, + { + "bbox": [ + 305, + 216, + 546, + 384 + ], + "type": "text", + "content": " is the class index. We transform each class-informative point " + }, + { + "bbox": [ + 305, + 216, + 546, + 384 + ], + "type": "inline_equation", + "content": "p_i" + }, + { + "bbox": [ + 305, + 216, + 546, + 384 + ], + "type": "text", + "content": " into " + }, + { + "bbox": [ + 305, + 216, + 546, + 384 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 305, + 216, + 546, + 384 + ], + "type": "text", + "content": " semantic mask proposals, which is further converted to a semantic proposal bag " + }, + { + "bbox": [ + 305, + 216, + 546, + 384 + ], + "type": "inline_equation", + "content": "B_{i}\\in \\mathbb{R}^{M\\times 4}" + }, + { + "bbox": [ + 305, + 216, + 546, + 384 + ], + "type": "text", + "content": ". As illustrated in Fig. 2, after passing through a 7x7 RoIAlign layer and two fully-connected layers, features " + }, + { + "bbox": [ + 305, + 216, + 546, + 384 + ], + "type": "inline_equation", + "content": "F_{i}\\in \\mathbb{R}^{M\\times H\\times W\\times D}" + }, + { + "bbox": [ + 305, + 216, + 546, + 384 + ], + "type": "text", + "content": " are extracted from proposal bag " + }, + { + "bbox": [ + 305, + 216, + 546, + 384 + ], + "type": "inline_equation", + "content": "B_{i}" + }, + { + "bbox": [ + 305, + 216, + 546, + 384 + ], + "type": "text", + "content": ". Like in [4] and [37], the features " + }, + { + "bbox": [ + 305, + 216, + 546, + 384 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 305, + 216, + 546, + 384 + ], + "type": "text", + "content": " serve as input for the classification branch and instance branch, using fully-connected layer " + }, + { + "bbox": [ + 305, + 216, + 546, + 384 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 305, + 216, + 546, + 384 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 216, + 546, + 384 + ], + "type": "inline_equation", + "content": "f^{\\prime}" + }, + { + "bbox": [ + 305, + 216, + 546, + 384 + ], + "type": "text", + "content": " to generate " + }, + { + "bbox": [ + 305, + 216, + 546, + 384 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_{cls}\\in \\mathbb{R}^{M\\times K}" + }, + { + "bbox": [ + 305, + 216, + 546, + 384 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 216, + 546, + 384 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_{ins}\\in \\mathbb{R}^{M\\times K}" + }, + { + "bbox": [ + 305, + 216, + 546, + 384 + ], + "type": "text", + "content": ". A softmax activation function over " + }, + { + "bbox": [ + 305, + 216, + 546, + 384 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 305, + 216, + 546, + 384 + ], + "type": "text", + "content": " class and " + }, + { + "bbox": [ + 305, + 216, + 546, + 384 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 305, + 216, + 546, + 384 + ], + "type": "text", + "content": " instance dimensions yields the classification scores " + }, + { + "bbox": [ + 305, + 216, + 546, + 384 + ], + "type": "inline_equation", + "content": "\\mathbf{S}_{cls}\\in \\mathbb{R}^{M\\times K}" + }, + { + "bbox": [ + 305, + 216, + 546, + 384 + ], + "type": "text", + "content": " and instance scores " + }, + { + "bbox": [ + 305, + 216, + 546, + 384 + ], + "type": "inline_equation", + "content": "\\mathbf{S}_{ins}\\in \\mathbb{R}^{M\\times K}" + }, + { + "bbox": [ + 305, + 216, + 546, + 384 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 388, + 545, + 423 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 388, + 545, + 423 + ], + "spans": [ + { + "bbox": [ + 313, + 388, + 545, + 423 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathbf {W} _ {c l s} = f (\\mathbf {F}); [ \\mathbf {S} _ {c l s} ] _ {m k} = e ^ {[ \\mathbf {W} _ {c l s} ] _ {m k}} / \\sum_ {k = 1} ^ {K} e ^ {[ \\mathbf {W} _ {c l s} ] _ {m k}}. \\\\ \\mathbf {W} _ {i n s} = f ^ {\\prime} (\\mathbf {F}); [ \\mathbf {S} _ {i n s} ] _ {m k} = e ^ {[ \\mathbf {W} _ {i n s} ] _ {m k}} / \\sum_ {m = 1} ^ {M} e ^ {[ \\mathbf {W} _ {i n s} ] _ {m k}}. \\tag {1} \\\\ \\end{array}", + "image_path": "d95ca14f8c84bd376681dadd00d96e8362efdf329063f16ee4f3b4bf129e0ee9.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 426, + 542, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 426, + 542, + 437 + ], + "spans": [ + { + "bbox": [ + 306, + 426, + 542, + 437 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 306, + 426, + 542, + 437 + ], + "type": "inline_equation", + "content": "[\\cdot]_{mk}" + }, + { + "bbox": [ + 306, + 426, + 542, + 437 + ], + "type": "text", + "content": " is the value in row " + }, + { + "bbox": [ + 306, + 426, + 542, + 437 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 306, + 426, + 542, + 437 + ], + "type": "text", + "content": " and column " + }, + { + "bbox": [ + 306, + 426, + 542, + 437 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 306, + 426, + 542, + 437 + ], + "type": "text", + "content": " of matrix." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 437, + 545, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 437, + 545, + 510 + ], + "spans": [ + { + "bbox": [ + 304, + 437, + 545, + 510 + ], + "type": "text", + "content": "Point Distance Guidance. SAM and MIL struggle with distinguishing adjacent objects of the same category, often merging two separate objects into one and giving high score. To combat this, we incorporate instance-level annotated point information and introduce a spatially aware selection with a point-distance penalty mechanism." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 510, + 546, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 510, + 546, + 654 + ], + "spans": [ + { + "bbox": [ + 304, + 510, + 546, + 654 + ], + "type": "text", + "content": "To address the challenge of overlapping objects and thereby enhance model optimization, we propose a strategy specifically aimed at penalizing instances of object overlap. For each m-th proposal within the set " + }, + { + "bbox": [ + 304, + 510, + 546, + 654 + ], + "type": "inline_equation", + "content": "B_{i}" + }, + { + "bbox": [ + 304, + 510, + 546, + 654 + ], + "type": "text", + "content": ", we define " + }, + { + "bbox": [ + 304, + 510, + 546, + 654 + ], + "type": "inline_equation", + "content": "t_{mj} = 1" + }, + { + "bbox": [ + 304, + 510, + 546, + 654 + ], + "type": "text", + "content": " to denote an overlap with any proposal in another identical class bag " + }, + { + "bbox": [ + 304, + 510, + 546, + 654 + ], + "type": "inline_equation", + "content": "B_{j}" + }, + { + "bbox": [ + 304, + 510, + 546, + 654 + ], + "type": "text", + "content": "; otherwise, " + }, + { + "bbox": [ + 304, + 510, + 546, + 654 + ], + "type": "inline_equation", + "content": "t_{mj} = 0" + }, + { + "bbox": [ + 304, + 510, + 546, + 654 + ], + "type": "text", + "content": ". The penalty imposed increases in proportion to the distance of the overlapping objects from the proposal in question. This penalty, " + }, + { + "bbox": [ + 304, + 510, + 546, + 654 + ], + "type": "inline_equation", + "content": "W_{dis}" + }, + { + "bbox": [ + 304, + 510, + 546, + 654 + ], + "type": "text", + "content": ", is represented using the Euclidean distance between the annotated points of the overlapping proposals. Subsequently, the reciprocal of " + }, + { + "bbox": [ + 304, + 510, + 546, + 654 + ], + "type": "inline_equation", + "content": "W_{dis}" + }, + { + "bbox": [ + 304, + 510, + 546, + 654 + ], + "type": "text", + "content": " is then passed through a sigmoid function to compute the distance score " + }, + { + "bbox": [ + 304, + 510, + 546, + 654 + ], + "type": "inline_equation", + "content": "\\mathbf{S}_{dis}" + }, + { + "bbox": [ + 304, + 510, + 546, + 654 + ], + "type": "text", + "content": " for the proposal." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 357, + 657, + 545, + 700 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 357, + 657, + 545, + 700 + ], + "spans": [ + { + "bbox": [ + 357, + 657, + 545, + 700 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\left[ \\mathbf {W} _ {d i s} \\right] _ {i m} = \\sum_ {j = 1, j \\neq i} ^ {N} \\| p _ {i} - p _ {j} \\| * t _ {m j}. \\tag {2} \\\\ [ \\mathbf {S} _ {d i s} ] _ {i m} = (1 / e ^ {- (1 / [ \\mathbf {W} _ {d i s} ] _ {i m})) ^ {d}}. \\\\ \\end{array}", + "image_path": "d86226bd17ea98e7b199b4f419a839f59c498994f2c9c82a0bdf4ab4f4461d31.jpg" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "3587" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 60, + 70, + 541, + 265 + ], + "blocks": [ + { + "bbox": [ + 60, + 70, + 541, + 265 + ], + "lines": [ + { + "bbox": [ + 60, + 70, + 541, + 265 + ], + "spans": [ + { + "bbox": [ + 60, + 70, + 541, + 265 + ], + "type": "image", + "image_path": "6d66dc9f49b4155db0bdf1985b5c709d0f637159652c5b601a231cd7ce856475.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 266, + 547, + 323 + ], + "lines": [ + { + "bbox": [ + 46, + 266, + 547, + 323 + ], + "spans": [ + { + "bbox": [ + 46, + 266, + 547, + 323 + ], + "type": "text", + "content": "Figure 2. The framework of SAPNet comprises two components: one for generating mask proposals and another for their utilization in instance segmentation. The process starts with generating category-agnostic mask proposals using point prompts within a visual foundation model. That is followed by an initial proposal selection via MIL combined with PDG. Next, the PRM refines these proposals using positive and negative samples from PNPG, capturing global object semantics. Finally, augmented with the multi-mask proposal supervision, the segmentation branch aims to improve segmentation quality." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 48, + 330, + 289, + 403 + ], + "blocks": [ + { + "bbox": [ + 48, + 330, + 289, + 403 + ], + "lines": [ + { + "bbox": [ + 48, + 330, + 289, + 403 + ], + "spans": [ + { + "bbox": [ + 48, + 330, + 289, + 403 + ], + "type": "image", + "image_path": "0cfbabbee76143e3c4f53d38feb7e5afc47bd365548088625de6f90afda6b588.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 58, + 410, + 275, + 421 + ], + "lines": [ + { + "bbox": [ + 58, + 410, + 275, + 421 + ], + "spans": [ + { + "bbox": [ + 58, + 410, + 275, + 421 + ], + "type": "text", + "content": "Figure 3. The mechanism of the proposal selection module." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 422, + 287, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 422, + 287, + 445 + ], + "spans": [ + { + "bbox": [ + 47, + 422, + 287, + 445 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 422, + 287, + 445 + ], + "type": "inline_equation", + "content": "\\left[\\cdot\\right]_{im}" + }, + { + "bbox": [ + 47, + 422, + 287, + 445 + ], + "type": "text", + "content": " is the value at the row " + }, + { + "bbox": [ + 47, + 422, + 287, + 445 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 47, + 422, + 287, + 445 + ], + "type": "text", + "content": " and column " + }, + { + "bbox": [ + 47, + 422, + 287, + 445 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 47, + 422, + 287, + 445 + ], + "type": "text", + "content": " in the matrix, and " + }, + { + "bbox": [ + 47, + 422, + 287, + 445 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 47, + 422, + 287, + 445 + ], + "type": "text", + "content": " is the exponential factor." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 445, + 287, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 445, + 287, + 529 + ], + "spans": [ + { + "bbox": [ + 46, + 445, + 287, + 529 + ], + "type": "text", + "content": "PSM Loss. The final score " + }, + { + "bbox": [ + 46, + 445, + 287, + 529 + ], + "type": "inline_equation", + "content": "\\mathbf{S}" + }, + { + "bbox": [ + 46, + 445, + 287, + 529 + ], + "type": "text", + "content": " of each proposal is obtained by computing the Hadamard product of the classification score, the instance score, and the distance score, while the score " + }, + { + "bbox": [ + 46, + 445, + 287, + 529 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathbf{S}}" + }, + { + "bbox": [ + 46, + 445, + 287, + 529 + ], + "type": "text", + "content": " for each proposal bag " + }, + { + "bbox": [ + 46, + 445, + 287, + 529 + ], + "type": "inline_equation", + "content": "B_{i}" + }, + { + "bbox": [ + 46, + 445, + 287, + 529 + ], + "type": "text", + "content": " is obtained by summing the scores of the proposals in " + }, + { + "bbox": [ + 46, + 445, + 287, + 529 + ], + "type": "inline_equation", + "content": "B_{i}" + }, + { + "bbox": [ + 46, + 445, + 287, + 529 + ], + "type": "text", + "content": ". The MILloss of the PSM is constructed using the form of binary crossentropy, and it is defined as follows:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 531, + 251, + 559 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 531, + 251, + 559 + ], + "spans": [ + { + "bbox": [ + 47, + 531, + 251, + 559 + ], + "type": "interline_equation", + "content": "\\mathbf {S} = \\mathbf {S} _ {c l s} \\odot \\mathbf {S} _ {i n s} \\odot \\mathbf {S} _ {d i s} \\in \\mathbb {R} ^ {M \\times K}; \\widehat {\\mathbf {S}} = \\sum_ {m = 1} ^ {M} [ \\mathbf {S} ] _ {m} \\in \\mathbb {R} ^ {K}.", + "image_path": "831c6c4fa7690ea6e9d142bb15e7a484ef12d49dd5f1fb209c6e7b94f118731b.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 561, + 296, + 587 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 561, + 296, + 587 + ], + "spans": [ + { + "bbox": [ + 47, + 561, + 296, + 587 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {p s m} = C E (\\widehat {\\mathbf {S}}, \\mathbf {c}) = - \\frac {1}{N} \\sum_ {n = 1} ^ {N} \\sum_ {k = 1} ^ {K} \\mathbf {c} _ {k} \\log (\\widehat {\\mathbf {S}} _ {k}) + (1 - \\mathbf {c} _ {k}) \\log (1 - \\widehat {\\mathbf {S}} _ {k})", + "image_path": "e5414c8cbb3731ae708c173f5d193a2cc146ab748cefce0aee2b9add870b0c31.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 590, + 251, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 590, + 251, + 604 + ], + "spans": [ + { + "bbox": [ + 47, + 590, + 251, + 604 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 590, + 251, + 604 + ], + "type": "inline_equation", + "content": "\\mathbf{c} \\in \\{0,1\\}^K" + }, + { + "bbox": [ + 47, + 590, + 251, + 604 + ], + "type": "text", + "content": " is the one-hot category's label." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 604, + 287, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 604, + 287, + 664 + ], + "spans": [ + { + "bbox": [ + 46, + 604, + 287, + 664 + ], + "type": "text", + "content": "Utilizing the MILloss, the PSM module skillfully identifies each proposal's category and instance. The module selects the proposal with the highest score, marked as S, for a specific object and identifies a bounding box enriched with semantic information." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 671, + 270, + 685 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 671, + 270, + 685 + ], + "spans": [ + { + "bbox": [ + 47, + 671, + 270, + 685 + ], + "type": "text", + "content": "3.3. Positive and Negative Proposals Generator" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 689, + 287, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 689, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 689, + 287, + 715 + ], + "type": "text", + "content": "To further refine the selection of more accurate bounding boxes, we employ PNPG based on " + }, + { + "bbox": [ + 46, + 689, + 287, + 715 + ], + "type": "inline_equation", + "content": "box_{psm}" + }, + { + "bbox": [ + 46, + 689, + 287, + 715 + ], + "type": "text", + "content": " selected via" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 330, + 547, + 450 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 330, + 547, + 450 + ], + "spans": [ + { + "bbox": [ + 304, + 330, + 547, + 450 + ], + "type": "text", + "content": "PSM. That consists of two components: PPG and NPG. The PPG is designed to generate a richer set of positive samples, enhancing bag's quality. Concurrently, the NPG is responsible for generating negative samples, which are crucial for assisting model training. These negative samples, including background samples for all objects and part samples for each, are crucial in resolving part issues and ensuring high-quality bounding box selection. The positive sample set " + }, + { + "bbox": [ + 304, + 330, + 547, + 450 + ], + "type": "inline_equation", + "content": "B^{+}" + }, + { + "bbox": [ + 304, + 330, + 547, + 450 + ], + "type": "text", + "content": " produced by PPG and the negative sample set " + }, + { + "bbox": [ + 304, + 330, + 547, + 450 + ], + "type": "inline_equation", + "content": "\\mathcal{U}" + }, + { + "bbox": [ + 304, + 330, + 547, + 450 + ], + "type": "text", + "content": " generated by NPG are utilized for training the subsequent PRM." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 453, + 546, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 453, + 546, + 582 + ], + "spans": [ + { + "bbox": [ + 304, + 453, + 546, + 582 + ], + "type": "text", + "content": "Positive Proposals Generator (PPG). Within this phase, to implement adaptive sampling for the identified bounding box, we capitalize on the " + }, + { + "bbox": [ + 304, + 453, + 546, + 582 + ], + "type": "inline_equation", + "content": "box_{psm}" + }, + { + "bbox": [ + 304, + 453, + 546, + 582 + ], + "type": "text", + "content": " derived from the PSM stage, coupled with the point distance penalty score " + }, + { + "bbox": [ + 304, + 453, + 546, + 582 + ], + "type": "inline_equation", + "content": "\\mathbf{S}_{dis}" + }, + { + "bbox": [ + 304, + 453, + 546, + 582 + ], + "type": "text", + "content": " attributed to each proposal. To further elaborate, for each " + }, + { + "bbox": [ + 304, + 453, + 546, + 582 + ], + "type": "inline_equation", + "content": "box_{psm}" + }, + { + "bbox": [ + 304, + 453, + 546, + 582 + ], + "type": "text", + "content": " (denoted as " + }, + { + "bbox": [ + 304, + 453, + 546, + 582 + ], + "type": "inline_equation", + "content": "b_x^*, b_y^*, b_w^*, b_h^*" + }, + { + "bbox": [ + 304, + 453, + 546, + 582 + ], + "type": "text", + "content": ") isolated during the PSM phase, its dimensions are meticulously recalibrated leveraging a scale factor " + }, + { + "bbox": [ + 304, + 453, + 546, + 582 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 304, + 453, + 546, + 582 + ], + "type": "text", + "content": " and its associated within-category inclusion score " + }, + { + "bbox": [ + 304, + 453, + 546, + 582 + ], + "type": "inline_equation", + "content": "\\mathbf{S}_{dis}" + }, + { + "bbox": [ + 304, + 453, + 546, + 582 + ], + "type": "text", + "content": " to generate an augmented set of positive proposals " + }, + { + "bbox": [ + 304, + 453, + 546, + 582 + ], + "type": "inline_equation", + "content": "(b_x, b_y, b_w, b_h)" + }, + { + "bbox": [ + 304, + 453, + 546, + 582 + ], + "type": "text", + "content": ". The formulation is defined as follows:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 329, + 583, + 545, + 610 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 329, + 583, + 545, + 610 + ], + "spans": [ + { + "bbox": [ + 329, + 583, + 545, + 610 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} b _ {w} = \\left(1 \\pm v / \\mathbf {S} _ {d i s}\\right) \\cdot b _ {w} ^ {*}, \\quad b _ {h} = \\left(1 \\pm v / \\mathbf {S} _ {d i s}\\right) \\cdot b _ {h} ^ {*}, \\\\ b _ {x} = b _ {x} ^ {*} \\pm \\left(b _ {w} - b _ {w} ^ {*}\\right) / 2, \\quad b _ {y} = b _ {y} ^ {*} \\pm \\left(b _ {h} - b _ {h} ^ {*}\\right) / 2. \\tag {4} \\\\ \\end{array}", + "image_path": "208b667ab630a775d5f15ab0e970cc7c231b7c8826b899ec2157c2d351dcda9a.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 614, + 546, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 614, + 546, + 662 + ], + "spans": [ + { + "bbox": [ + 304, + 614, + 546, + 662 + ], + "type": "text", + "content": "These newly cultivated positive proposals are carefully integrated into the existing set " + }, + { + "bbox": [ + 304, + 614, + 546, + 662 + ], + "type": "inline_equation", + "content": "B_{i}" + }, + { + "bbox": [ + 304, + 614, + 546, + 662 + ], + "type": "text", + "content": " to enhance the positive instances' pool. Such enhancements are pivotal in optimizing the training of the forthcoming PRM." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 665, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 547, + 715 + ], + "type": "text", + "content": "Negative Proposals Generator(NPG). MIL-based selection within a single positive bag may overemphasize the background noise, leading to inadequate focus on the object. To solve this, we create a negative bag from the back-" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "3588" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 47, + 72, + 287, + 340 + ], + "blocks": [ + { + "bbox": [ + 47, + 72, + 287, + 340 + ], + "lines": [ + { + "bbox": [ + 47, + 72, + 287, + 340 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 287, + 340 + ], + "type": "text", + "content": "Algorithm 1 Positive and Negative Proposals Generation \nInput: " + }, + { + "bbox": [ + 47, + 72, + 287, + 340 + ], + "type": "inline_equation", + "content": "T_{neg1}, T_{neg2}, box_{psm}" + }, + { + "bbox": [ + 47, + 72, + 287, + 340 + ], + "type": "text", + "content": " from PSM stage, image " + }, + { + "bbox": [ + 47, + 72, + 287, + 340 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 47, + 72, + 287, + 340 + ], + "type": "text", + "content": ", positive bags " + }, + { + "bbox": [ + 47, + 72, + 287, + 340 + ], + "type": "inline_equation", + "content": "B^{+}" + }, + { + "bbox": [ + 47, + 72, + 287, + 340 + ], + "type": "text", + "content": ". \nOutput: Positive proposal bags " + }, + { + "bbox": [ + 47, + 72, + 287, + 340 + ], + "type": "inline_equation", + "content": "B^{+}" + }, + { + "bbox": [ + 47, + 72, + 287, + 340 + ], + "type": "text", + "content": ", Negative proposal set " + }, + { + "bbox": [ + 47, + 72, + 287, + 340 + ], + "type": "inline_equation", + "content": "\\mathcal{U}" + }, + { + "bbox": [ + 47, + 72, + 287, + 340 + ], + "type": "text", + "content": ". \n1: // Step1: positive proposals sampling \n2: for " + }, + { + "bbox": [ + 47, + 72, + 287, + 340 + ], + "type": "inline_equation", + "content": "i \\in N, N" + }, + { + "bbox": [ + 47, + 72, + 287, + 340 + ], + "type": "text", + "content": " is the number of objects in image " + }, + { + "bbox": [ + 47, + 72, + 287, + 340 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 47, + 72, + 287, + 340 + ], + "type": "text", + "content": " do \n3: " + }, + { + "bbox": [ + 47, + 72, + 287, + 340 + ], + "type": "inline_equation", + "content": "B_{i}^{+} \\gets B_{i}, B_{i} \\in B" + }, + { + "bbox": [ + 47, + 72, + 287, + 340 + ], + "type": "text", + "content": "; \n4: " + }, + { + "bbox": [ + 47, + 72, + 287, + 340 + ], + "type": "inline_equation", + "content": "B_{i}^{+} = B_{i}^{+} \\bigcup PPG(\\text{box}_{psm}^{i})" + }, + { + "bbox": [ + 47, + 72, + 287, + 340 + ], + "type": "text", + "content": "; \n5: end for \n6: // Step2: background negative proposals sampling \n7: " + }, + { + "bbox": [ + 47, + 72, + 287, + 340 + ], + "type": "inline_equation", + "content": "\\mathcal{U} \\gets \\{\\}" + }, + { + "bbox": [ + 47, + 72, + 287, + 340 + ], + "type": "text", + "content": "; \n8: proposals " + }, + { + "bbox": [ + 47, + 72, + 287, + 340 + ], + "type": "inline_equation", + "content": "\\gets" + }, + { + "bbox": [ + 47, + 72, + 287, + 340 + ], + "type": "text", + "content": " random_sampling(1) for each image " + }, + { + "bbox": [ + 47, + 72, + 287, + 340 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 47, + 72, + 287, + 340 + ], + "type": "text", + "content": "; \n9: " + }, + { + "bbox": [ + 47, + 72, + 287, + 340 + ], + "type": "inline_equation", + "content": "iou = IOU(proposals, B_{i})" + }, + { + "bbox": [ + 47, + 72, + 287, + 340 + ], + "type": "text", + "content": " for each " + }, + { + "bbox": [ + 47, + 72, + 287, + 340 + ], + "type": "inline_equation", + "content": "B_{i} \\in B" + }, + { + "bbox": [ + 47, + 72, + 287, + 340 + ], + "type": "text", + "content": "; \n10: if " + }, + { + "bbox": [ + 47, + 72, + 287, + 340 + ], + "type": "inline_equation", + "content": "iou < T_{neg1}" + }, + { + "bbox": [ + 47, + 72, + 287, + 340 + ], + "type": "text", + "content": " then \n11: " + }, + { + "bbox": [ + 47, + 72, + 287, + 340 + ], + "type": "inline_equation", + "content": "\\mathcal{U} = \\mathcal{U} \\bigcup" + }, + { + "bbox": [ + 47, + 72, + 287, + 340 + ], + "type": "text", + "content": " proposals; \n12: end if \n13: // Step3: part negative proposals sampling \n14: for " + }, + { + "bbox": [ + 47, + 72, + 287, + 340 + ], + "type": "inline_equation", + "content": "i \\in N, N" + }, + { + "bbox": [ + 47, + 72, + 287, + 340 + ], + "type": "text", + "content": " is the number of objects in image " + }, + { + "bbox": [ + 47, + 72, + 287, + 340 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 47, + 72, + 287, + 340 + ], + "type": "text", + "content": " do \n15: proposals " + }, + { + "bbox": [ + 47, + 72, + 287, + 340 + ], + "type": "inline_equation", + "content": "\\gets" + }, + { + "bbox": [ + 47, + 72, + 287, + 340 + ], + "type": "text", + "content": " part_neg_sampling(" + }, + { + "bbox": [ + 47, + 72, + 287, + 340 + ], + "type": "inline_equation", + "content": "box_{psm}^{i}" + }, + { + "bbox": [ + 47, + 72, + 287, + 340 + ], + "type": "text", + "content": "); \n16: " + }, + { + "bbox": [ + 47, + 72, + 287, + 340 + ], + "type": "inline_equation", + "content": "iou = IOU(proposals, box_{psm}^{i})" + }, + { + "bbox": [ + 47, + 72, + 287, + 340 + ], + "type": "text", + "content": "; \n17: if " + }, + { + "bbox": [ + 47, + 72, + 287, + 340 + ], + "type": "inline_equation", + "content": "iou < T_{neg2}" + }, + { + "bbox": [ + 47, + 72, + 287, + 340 + ], + "type": "text", + "content": " then \n18: " + }, + { + "bbox": [ + 47, + 72, + 287, + 340 + ], + "type": "inline_equation", + "content": "\\mathcal{U} = \\mathcal{U} \\bigcup" + }, + { + "bbox": [ + 47, + 72, + 287, + 340 + ], + "type": "text", + "content": " proposals; \n19: end if \n20: end for" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "code_body" + } + ], + "index": 0, + "sub_type": "algorithm" + }, + { + "bbox": [ + 46, + 363, + 287, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 363, + 287, + 387 + ], + "spans": [ + { + "bbox": [ + 46, + 363, + 287, + 387 + ], + "type": "text", + "content": "ground proposals post-positive bag training, which helps MIL maximize the attention towards the object." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 388, + 287, + 448 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 388, + 287, + 448 + ], + "spans": [ + { + "bbox": [ + 46, + 388, + 287, + 448 + ], + "type": "text", + "content": "Considering the image dimensions, we randomly sample proposals according to each image's width and height, for negative instance sampling. We assess the Intersection over Union (IoU) between these negatives and the positive sets, filtering out those below a threshold " + }, + { + "bbox": [ + 46, + 388, + 287, + 448 + ], + "type": "inline_equation", + "content": "T_{neg1}" + }, + { + "bbox": [ + 46, + 388, + 287, + 448 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 448, + 287, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 448, + 287, + 544 + ], + "spans": [ + { + "bbox": [ + 46, + 448, + 287, + 544 + ], + "type": "text", + "content": "Additionally, to rectify MIL localization errors, we enforce the sampling of smaller proposals with an IoU under a second threshold, " + }, + { + "bbox": [ + 46, + 448, + 287, + 544 + ], + "type": "inline_equation", + "content": "T_{\\text{neg2}}" + }, + { + "bbox": [ + 46, + 448, + 287, + 544 + ], + "type": "text", + "content": ", from inside boxpsm based on its width and height, that is scored highest in PSM, as negative examples. These negative instances, partially capturing the object, drive the model to select high-quality bounding boxes that encompass the entire object. The PNPG is systematically elaborated upon in Algorithm1." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 551, + 211, + 563 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 551, + 211, + 563 + ], + "spans": [ + { + "bbox": [ + 47, + 551, + 211, + 563 + ], + "type": "text", + "content": "3.4. Proposals Refinement Module" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 569, + 287, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 569, + 287, + 700 + ], + "spans": [ + { + "bbox": [ + 46, + 569, + 287, + 700 + ], + "type": "text", + "content": "In the PSM phase, we employ MIL to select high-quality proposals from bag " + }, + { + "bbox": [ + 46, + 569, + 287, + 700 + ], + "type": "inline_equation", + "content": "B^{+}" + }, + { + "bbox": [ + 46, + 569, + 287, + 700 + ], + "type": "text", + "content": ". However, as shown in Fig. 2, the box " + }, + { + "bbox": [ + 46, + 569, + 287, + 700 + ], + "type": "inline_equation", + "content": "psm" + }, + { + "bbox": [ + 46, + 569, + 287, + 700 + ], + "type": "text", + "content": " outcomes derived solely from a single-stage MIL are suboptimal and localized. Inspired by PCL [38], we consider refining the proposals in a second phase. However, in contrast to most WSOD methods which choose to continue refining using classification information in subsequent stages, we have established high-quality positive and negative bags, and further combined both classification and instance branches to introduce the PRM module to refine the proposals, aiming to obtain a high-quality bounding box." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 59, + 701, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 701, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 59, + 701, + 287, + 713 + ], + "type": "text", + "content": "The PRM module, extending beyond the scope of PSM," + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 72, + 545, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 167 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 167 + ], + "type": "text", + "content": "focuses on both selection and refinement. It combines positive instances from the PPG with the initial set, forming an enriched " + }, + { + "bbox": [ + 304, + 72, + 545, + 167 + ], + "type": "inline_equation", + "content": "B^{+}" + }, + { + "bbox": [ + 304, + 72, + 545, + 167 + ], + "type": "text", + "content": ". Simultaneously, it incorporates the negative instance set " + }, + { + "bbox": [ + 304, + 72, + 545, + 167 + ], + "type": "inline_equation", + "content": "\\mathcal{U}" + }, + { + "bbox": [ + 304, + 72, + 545, + 167 + ], + "type": "text", + "content": " from NPG, providing a comprehensive foundation for PRM. This integration leads to a restructured MIL loss in PRM, replacing the conventional CELoss with Focal Loss for positive instances. The modified positive loss function is as follows:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 347, + 168, + 545, + 200 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 347, + 168, + 545, + 200 + ], + "spans": [ + { + "bbox": [ + 347, + 168, + 545, + 200 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {p o s} = \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\left\\langle \\mathbf {c} _ {i} ^ {\\mathrm {T}}, \\widehat {\\mathbf {S}} _ {i} \\right\\rangle \\cdot \\operatorname {F L} \\left(\\widehat {\\mathbf {S}} _ {i} ^ {*}, \\mathbf {c} _ {i}\\right). \\tag {5}", + "image_path": "c8cd4c463f79d13b356da4ca338fa137fbee50bedc366702a5f048d66afc97e6.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 210, + 545, + 264 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 210, + 545, + 264 + ], + "spans": [ + { + "bbox": [ + 304, + 210, + 545, + 264 + ], + "type": "text", + "content": "where FL is the focal loss [32], " + }, + { + "bbox": [ + 304, + 210, + 545, + 264 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathbf{S}}_i^*" + }, + { + "bbox": [ + 304, + 210, + 545, + 264 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 210, + 545, + 264 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathbf{S}}_i" + }, + { + "bbox": [ + 304, + 210, + 545, + 264 + ], + "type": "text", + "content": " represent the bag score predicted by PRM and PSM, respectively. " + }, + { + "bbox": [ + 304, + 210, + 545, + 264 + ], + "type": "inline_equation", + "content": "\\left\\langle \\mathbf{c}_i^{\\mathrm{T}},\\widehat{\\mathbf{S}}_i\\right\\rangle" + }, + { + "bbox": [ + 304, + 210, + 545, + 264 + ], + "type": "text", + "content": " represents the inner product of the two vectors, meaning the predicted bag score of the ground-truth category." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 265, + 545, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 265, + 545, + 335 + ], + "spans": [ + { + "bbox": [ + 304, + 265, + 545, + 335 + ], + "type": "text", + "content": "Enhancing background suppression, we use negative proposals and introduce a dedicated loss for these instances. Notably, these negative instances pass only through the classification branch for instance score computation, with their scores derived exclusively from classification. The specific formulation of this loss function is detailed below:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 378, + 338, + 545, + 370 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 378, + 338, + 545, + 370 + ], + "spans": [ + { + "bbox": [ + 378, + 338, + 545, + 370 + ], + "type": "interline_equation", + "content": "\\beta = \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\left\\langle \\mathbf {c} _ {i} ^ {\\mathrm {T}}, \\widehat {\\mathbf {S}} _ {i} \\right\\rangle , \\tag {6}", + "image_path": "5b2809ea8b23d79e20c91febda2803ed8c0ffd5c9aae353fea9fea5b598f2f90.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 315, + 376, + 545, + 418 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 376, + 545, + 418 + ], + "spans": [ + { + "bbox": [ + 315, + 376, + 545, + 418 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {n e g} = - \\frac {1}{| \\mathcal {U} |} \\sum_ {\\mathcal {U}} \\sum_ {k = 1} ^ {K} \\beta \\cdot \\left(\\left[ \\mathbf {S} _ {n e g} ^ {c l s} \\right] _ {k}\\right) ^ {2} \\log \\left(1 - \\left[ \\mathbf {S} _ {n e g} ^ {c l s} \\right] _ {k}\\right). \\tag {7}", + "image_path": "6bdfeb8181b794ec85438bc4f5c8fb236f2aa03400cc8a6f916e873b9e0d2920.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 419, + 545, + 444 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 419, + 545, + 444 + ], + "spans": [ + { + "bbox": [ + 304, + 419, + 545, + 444 + ], + "type": "text", + "content": "The PRM loss consists of the MIL loss " + }, + { + "bbox": [ + 304, + 419, + 545, + 444 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{pos}" + }, + { + "bbox": [ + 304, + 419, + 545, + 444 + ], + "type": "text", + "content": " for positive bags and negative loss " + }, + { + "bbox": [ + 304, + 419, + 545, + 444 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{neg}" + }, + { + "bbox": [ + 304, + 419, + 545, + 444 + ], + "type": "text", + "content": " for negative samples, i.e.," + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 359, + 453, + 545, + 466 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 359, + 453, + 545, + 466 + ], + "spans": [ + { + "bbox": [ + 359, + 453, + 545, + 466 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {p r m} = \\alpha \\mathcal {L} _ {p o s} + (1 - \\alpha) \\mathcal {L} _ {n e g}, \\tag {8}", + "image_path": "423faf51ac7e6ca939d8db1d1d7caed62627c73860b340a66d8c57b67e0b159d.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 305, + 475, + 418, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 475, + 418, + 486 + ], + "spans": [ + { + "bbox": [ + 305, + 475, + 418, + 486 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 475, + 418, + 486 + ], + "type": "inline_equation", + "content": "\\alpha = 0.25" + }, + { + "bbox": [ + 305, + 475, + 418, + 486 + ], + "type": "text", + "content": " by default." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 486, + 545, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 486, + 545, + 594 + ], + "spans": [ + { + "bbox": [ + 304, + 486, + 545, + 594 + ], + "type": "text", + "content": "Box Mining Strategy. MIL's preference for segments with more foreground presence and SAM's tendency to capture only parts of an object often bring to final bounding boxes, " + }, + { + "bbox": [ + 304, + 486, + 545, + 594 + ], + "type": "inline_equation", + "content": "box_{prim}" + }, + { + "bbox": [ + 304, + 486, + 545, + 594 + ], + "type": "text", + "content": ", the 'local' issue of MIL inadequately covers the instances. To improve the bounding box quality, we introduce a box mining strategy that adaptively expands " + }, + { + "bbox": [ + 304, + 486, + 545, + 594 + ], + "type": "inline_equation", + "content": "box_{select}" + }, + { + "bbox": [ + 304, + 486, + 545, + 594 + ], + "type": "text", + "content": " from proposal selection in PRM, by merging it with the original proposals filter, aiming to address MIL's localization challenges." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "text", + "content": "The Box Mining Strategy (BMS) consists of two primary components: (i) We select the top " + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "text", + "content": " proposals from the positive proposal bag " + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "inline_equation", + "content": "B^{+}" + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "text", + "content": ", to create a set " + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "text", + "content": ". We evaluate the proposals in " + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "text", + "content": " against box_select based on IoU and size, using a threshold " + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "inline_equation", + "content": "T_{min1}" + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "text", + "content": ". Proposals larger than box_select and with an IoU above " + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "inline_equation", + "content": "T_{min1}" + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "text", + "content": " undergo dynamic expansion through IoU consideration, which allows for the adaptive integration with box_select. That mitigates the 'local' issue and maintains the bounding box's consistency to the object's true boundaries. (ii) Frequently, issues related to lo" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "3589" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 191 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 191 + ], + "type": "text", + "content": "cality can lead to an exceedingly low IoU between proposals and box_select. Nonetheless, the ground truth box can fully encompass the box_part. Therefore, when component (i) conditions are unmet, if a proposal can entirely encapsulate box_select, we reset the threshold " + }, + { + "bbox": [ + 46, + 72, + 289, + 191 + ], + "type": "inline_equation", + "content": "T_{min2}" + }, + { + "bbox": [ + 46, + 72, + 289, + 191 + ], + "type": "text", + "content": ". Proposals surpassing this threshold adaptively merge with box_select to generate the final box_prm, used to yield Mask_prm. These two components collectively form our BMS strategy. A detailed procedure of this approach will be delineated in Algorithm2 of the supplementary materials." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 192, + 289, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 192, + 289, + 297 + ], + "spans": [ + { + "bbox": [ + 46, + 192, + 289, + 297 + ], + "type": "text", + "content": "Loss Function. After acquiring the final supervision masks, " + }, + { + "bbox": [ + 46, + 192, + 289, + 297 + ], + "type": "inline_equation", + "content": "Mask_{prm}" + }, + { + "bbox": [ + 46, + 192, + 289, + 297 + ], + "type": "text", + "content": " and the filtered " + }, + { + "bbox": [ + 46, + 192, + 289, + 297 + ], + "type": "inline_equation", + "content": "Mask_{sam}" + }, + { + "bbox": [ + 46, + 192, + 289, + 297 + ], + "type": "text", + "content": " in Multi-mask Proposals Supervision(MPS) in Sec. 7 of supplementary, we use them together to guide the dynamic segmentation branch. To comprehensively train SAPNet, we integrate the loss functions from the PSM and PRM, culminating in the formulation of the total loss for our model, denoted as " + }, + { + "bbox": [ + 46, + 192, + 289, + 297 + ], + "type": "inline_equation", + "content": "L_{total}" + }, + { + "bbox": [ + 46, + 192, + 289, + 297 + ], + "type": "text", + "content": ". The aggregate loss function, " + }, + { + "bbox": [ + 46, + 192, + 289, + 297 + ], + "type": "inline_equation", + "content": "L_{total}" + }, + { + "bbox": [ + 46, + 192, + 289, + 297 + ], + "type": "text", + "content": " can be articulated as:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 83, + 300, + 287, + 312 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 300, + 287, + 312 + ], + "spans": [ + { + "bbox": [ + 83, + 300, + 287, + 312 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {t o t a l}} = \\mathcal {L} _ {\\text {m a s k}} + \\mathcal {L} _ {\\text {c l s}} + \\lambda \\cdot \\mathcal {L} _ {\\text {p s m}} + \\mathcal {L} _ {\\text {p r m}} \\tag {9}", + "image_path": "628dad0b75cf3111addcbed6bc0bf40ddac8a7e58de4c3012bffb1c31e4b5d0d.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 312, + 287, + 337 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 312, + 287, + 337 + ], + "spans": [ + { + "bbox": [ + 46, + 312, + 287, + 337 + ], + "type": "text", + "content": "where, " + }, + { + "bbox": [ + 46, + 312, + 287, + 337 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{Dice}" + }, + { + "bbox": [ + 46, + 312, + 287, + 337 + ], + "type": "text", + "content": " is the Dice Loss [35], " + }, + { + "bbox": [ + 46, + 312, + 287, + 337 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{cls}" + }, + { + "bbox": [ + 46, + 312, + 287, + 337 + ], + "type": "text", + "content": " is the Focal Loss[32], and " + }, + { + "bbox": [ + 46, + 312, + 287, + 337 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 46, + 312, + 287, + 337 + ], + "type": "text", + "content": " is set as 0.25." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 348, + 124, + 361 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 348, + 124, + 361 + ], + "spans": [ + { + "bbox": [ + 47, + 348, + 124, + 361 + ], + "type": "text", + "content": "4. Experiment" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 368, + 174, + 381 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 368, + 174, + 381 + ], + "spans": [ + { + "bbox": [ + 47, + 368, + 174, + 381 + ], + "type": "text", + "content": "4.1. Experimental Settings" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 386, + 287, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 386, + 287, + 458 + ], + "spans": [ + { + "bbox": [ + 46, + 386, + 287, + 458 + ], + "type": "text", + "content": "Datasets. We use the publicly available MS COCO[33] and VOC2012SBD [13] datasets for experiments. COCO17 has 118k training and 5k validation images with 80 common object categories. VOC consists of 20 categories and contains 10,582 images for model training and 1,449 validation images for evaluation." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 459, + 287, + 554 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 459, + 287, + 554 + ], + "spans": [ + { + "bbox": [ + 46, + 459, + 287, + 554 + ], + "type": "text", + "content": "Evaluation Metric. We use mean average precision mAP@[.5,.95] for the MS-COCO. The " + }, + { + "bbox": [ + 46, + 459, + 287, + 554 + ], + "type": "inline_equation", + "content": "\\{AP, AP_{50}, AP_{75}, AP_{Small}, AP_{Middle}, AP_{Large}\\}" + }, + { + "bbox": [ + 46, + 459, + 287, + 554 + ], + "type": "text", + "content": " is reported for MS-COCO and for VOC12SBD segmentation, and we report " + }, + { + "bbox": [ + 46, + 459, + 287, + 554 + ], + "type": "inline_equation", + "content": "AP_{25,50,75}" + }, + { + "bbox": [ + 46, + 459, + 287, + 554 + ], + "type": "text", + "content": ". The " + }, + { + "bbox": [ + 46, + 459, + 287, + 554 + ], + "type": "inline_equation", + "content": "mIoU_{box}" + }, + { + "bbox": [ + 46, + 459, + 287, + 554 + ], + "type": "text", + "content": " is the average IoU between predicted pseudo-boxes and GT-boxes in the training set. It measures SAPNet's ability to select mask proposals without using the segmentation branch." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 555, + 287, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 555, + 287, + 664 + ], + "spans": [ + { + "bbox": [ + 46, + 555, + 287, + 664 + ], + "type": "text", + "content": "Implementation Details. In our study, we employed the Stochastic Gradient Descent (SGD) optimizer, as detailed in [6]. Our experiments were conducted using the mmdetection toolbox [7], following standard training protocols for each dataset. We used the ResNet architecture [15], pretrained on ImageNet [36], as the backbone. For COCO, batch size was set at four images per GPU across eight GPUs, and for VOC2012, it was four GPUs. More details of the experiment are in Sec. 8 of the supplementary." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 670, + 200, + 684 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 670, + 200, + 684 + ], + "spans": [ + { + "bbox": [ + 47, + 670, + 200, + 684 + ], + "type": "text", + "content": "4.2. Experimental Comparisons" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 689, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 689, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 689, + 287, + 714 + ], + "type": "text", + "content": "Tab. 1 shows the comparison results between our method and previous SOTA approaches [11, 16, 34, 40, 42] on" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 72, + 545, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 179 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 179 + ], + "type": "text", + "content": "COCO. In our experiments, we provide SAM with both the labeled points and the annotations generated by the point annotation enhancer [9]. SAM then utilizes these inputs to generate subsequent mask proposals for selection and supervision. For fair comparison, we design two baselines: the top-1 scored mask from SAM and MIL-selected SAM mask proposals are used as SOLOv2 supervision, respectively. Tab. 1 shows our method substantially surpasses these baselines in performance." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 180, + 546, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 180, + 546, + 324 + ], + "spans": [ + { + "bbox": [ + 304, + 180, + 546, + 324 + ], + "type": "text", + "content": "Comparison with point-annotated methods. Our approach achieves a 31.2 AP performance with a ResNet-50 backbone, surpassing all previous point-annotated methods, including BESTIE on HRNet-48 and AttnShift on Vit-B. Our model exhibits significant improvements under a 1x training schedule, with a 13.5 AP increase when compared to the previous SOTA method, BESTIE. Furthermore, under a 3x training schedule, SAPNet outperforms AttnShift, which relies on large model training, with 13.4 AP, improvements. Importantly, our method is trained end-to-end without needing post-processing, achieving SOTA performance in point-annotated instance segmentation." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 325, + 545, + 444 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 325, + 545, + 444 + ], + "spans": [ + { + "bbox": [ + 304, + 325, + 545, + 444 + ], + "type": "text", + "content": "Comparison with other annotation-based methods. Our SAPNet has significantly elevated point annotation, regardless of point annotation's limitations in annotation time and quality compared to box annotation. Utilizing a ResNet-101 backbone and a 3x training schedule, SAPNet surpasses most box-annotated instance segmentation methods, achieving a 1.4 AP improvement over BoxInst. Moreover, SAPNet's segmentation performance nearly matches the mask-annotated methods, effectively bridging the gap between point-annotated and these techniques." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 445, + 546, + 564 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 445, + 546, + 564 + ], + "spans": [ + { + "bbox": [ + 304, + 445, + 546, + 564 + ], + "type": "text", + "content": "Segmentation performance on VOC2012SBD. Tab. 2 compares segmentation methods under different supervisions on the VOC2012 dataset. SAPNet reports an enhancement of 7.7 " + }, + { + "bbox": [ + 304, + 445, + 546, + 564 + ], + "type": "inline_equation", + "content": "AP" + }, + { + "bbox": [ + 304, + 445, + 546, + 564 + ], + "type": "text", + "content": " over the AttnShift approach, evidencing a notable advancement in performance. Thereby, it significantly outstrips image-level supervised segmentation methods. Additionally, SAPNet surpasses box-annotated segmentation methods, such as BoxInst by 3.4 " + }, + { + "bbox": [ + 304, + 445, + 546, + 564 + ], + "type": "inline_equation", + "content": "AP_{50}" + }, + { + "bbox": [ + 304, + 445, + 546, + 564 + ], + "type": "text", + "content": " and DiscoBox by 32.6 " + }, + { + "bbox": [ + 304, + 445, + 546, + 564 + ], + "type": "inline_equation", + "content": "AP_{50}" + }, + { + "bbox": [ + 304, + 445, + 546, + 564 + ], + "type": "text", + "content": ". Further, our point-prompted method achieves " + }, + { + "bbox": [ + 304, + 445, + 546, + 564 + ], + "type": "inline_equation", + "content": "92.3\\%" + }, + { + "bbox": [ + 304, + 445, + 546, + 564 + ], + "type": "text", + "content": " of the Mask-R-CNN." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 574, + 406, + 586 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 574, + 406, + 586 + ], + "spans": [ + { + "bbox": [ + 306, + 574, + 406, + 586 + ], + "type": "text", + "content": "4.3. Ablation Studies" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 593, + 545, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 593, + 545, + 616 + ], + "spans": [ + { + "bbox": [ + 304, + 593, + 545, + 616 + ], + "type": "text", + "content": "More experiments have been conducted on COCO to further analyze SAPNet's effectiveness and robustness." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "text", + "content": "Training Stage in SAPNet. The ablation study of the training stage is given in Tab. 3. We trained solov2 using the top-1 scored mask provided by SAM and compared it to the two training strategies of SAPNet. In the two-stage approach, the segmentation branch and multiple-mask supervision of SAPNet are removed. Instead, we use the selected mask to train a standalone instance segmentation model, as described by [42]. The end-to-end training method corre" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "3590" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 50, + 71, + 545, + 363 + ], + "blocks": [ + { + "bbox": [ + 50, + 71, + 545, + 363 + ], + "lines": [ + { + "bbox": [ + 50, + 71, + 545, + 363 + ], + "spans": [ + { + "bbox": [ + 50, + 71, + 545, + 363 + ], + "type": "table", + "html": "
MethodAnn.Backbonesched.Arch.mAP\\( \\mathrm{mAP}_{50} \\)\\( \\mathrm{mAP}_{75} \\)\\( \\mathrm{mAP}_{\\mathrm{s}} \\)\\( \\mathrm{mAP}_{\\mathrm{m}} \\)\\( \\mathrm{mAP}_{1} \\)
Fully-supervised instance segmentation models.
Mask R-CNN [16]\\( \\mathcal{M} \\)ResNet-501xMask R-CNN34.656.536.618.337.447.2
YOLACT-700 [5]\\( \\mathcal{M} \\)ResNet-1014.5xYOLACT31.254.032.812.133.347.
PolarMask [16]\\( \\mathcal{M} \\)ResNet-1012xPolarMask32.153.733.114.733.845.3
SOLOv2 [42]\\( \\mathcal{M} \\)ResNet-501xSOLOv234.854.936.913.437.853.7
CondInst [40]\\( \\mathcal{M} \\)ResNet-501xCondInst35.356.437.418.039.450.4
SwinMR [34]\\( \\mathcal{M} \\)Swin-S50eSwinMR43.267.046.124.846.362.1
Mask2Former [11]\\( \\mathcal{M} \\)Swin-S50eMask2Former46.169.452.825.449.768.5
Weakly-supervised instance segmentation models.
IRNet [45]\\( \\mathcal{I} \\)ResNet-501xMask R-CNN6.111.75.5---
BESTIE [21]\\( \\mathcal{I} \\)HRNet-481xMask R-CNN14.328.013.2---
BBTP [18]\\( \\mathcal{B} \\)ResNet-1011xMask R-CNN21.145.517.211.222.029.8
BoxInst [39]\\( \\mathcal{B} \\)ResNet-1013xCondInst33.256.533.616.235.345.1
DiscoBox [23]\\( \\mathcal{B} \\)ResNet-503xSOLOv232.053.632.611.733.748.4
Boxlevelset [27]\\( \\mathcal{B} \\)ResNet-1013xSOLOv233.456.834.115.236.846.8
WISE-Net [24]\\( \\mathcal{P} \\)ResNet-501xMask R-CNN7.818.28.8---
BESTIE†[21]\\( \\mathcal{P} \\)HRNet-481xMask R-CNN17.734.016.4---
AttnShift [31]\\( \\mathcal{P} \\)Vit-B50eMask R-CNN21.243.519.4---
SAM-SOLOv2\\( \\mathcal{P} \\)ResNet-501xSOLOv224.641.925.39.328.638.1
MIL-SOLOv2\\( \\mathcal{P} \\)ResNet-501xSOLOv226.847.726.811.231.540.4
SAPNet(ours)\\( \\mathcal{P} \\)ResNet-501xSOLOv231.251.832.312.635.147.8
SAPNet(ours)*\\( \\mathcal{P} \\)ResNet-1013xSOLOv234.656.036.615.739.552.1
", + "image_path": "77de95328c10c81a448c9b6cb5aef62fa56ccddd27290fb275cf47e1e1846cd0.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 50, + 415, + 287, + 567 + ], + "blocks": [ + { + "bbox": [ + 46, + 367, + 547, + 411 + ], + "lines": [ + { + "bbox": [ + 46, + 367, + 547, + 411 + ], + "spans": [ + { + "bbox": [ + 46, + 367, + 547, + 411 + ], + "type": "text", + "content": "Table 1. Mask annotation(M), image annotation(I), box annotation(B) and point annotation(P) performance on COCO-17 val. 'Ann.' is the type of the annotation and 'sched.' means schedule. * is the multi-scale augment training for re-training segmentation methods, and other experiments are on single-scale training. SwinMR is Swin-Transformer-Mask R-CNN. SwinMR and Mask2Former use multi-scale data augment strategies for SOTA." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 50, + 415, + 287, + 567 + ], + "lines": [ + { + "bbox": [ + 50, + 415, + 287, + 567 + ], + "spans": [ + { + "bbox": [ + 50, + 415, + 287, + 567 + ], + "type": "table", + "html": "
MethodSup.BackboneAP25AP50AP75
Mask R-CNN [16]MR-5078.068.843.3
Mask R-CNN [16]MR-10179.670.245.3
BoxInst [39]BR-101-61.437.0
DiscoBoxBR-10172.862.237.5
BESTIE [21]IHRNet53.541.724.2
IRNet [45]IR-50-46.723.5
BESTIE† [21]IHRNet61.251.026.6
WISE-Net [24]PR-5053.543.025.9
BESTIE [21]PHRNet58.646.726.3
BESTIE† [21]PHRNet66.456.130.2
Attnshift [31]PVit-S68.354.425.4
Attnshift† [31]PVit-S70.357.130.4
SAPNet(ours)PR-10176.564.858.7
", + "image_path": "5d2612b00f4dd8cd1a46ab6ac8af5f371b78baf12387c6a891f8a9122dda2381.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 46, + 569, + 287, + 590 + ], + "lines": [ + { + "bbox": [ + 46, + 569, + 287, + 590 + ], + "spans": [ + { + "bbox": [ + 46, + 569, + 287, + 590 + ], + "type": "text", + "content": "Table 2. Instance segmentation performance on the VOC2012 test set. " + }, + { + "bbox": [ + 46, + 569, + 287, + 590 + ], + "type": "inline_equation", + "content": "\\dagger" + }, + { + "bbox": [ + 46, + 569, + 287, + 590 + ], + "type": "text", + "content": " indicates applying MRCNN refinement." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 591, + 287, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 591, + 287, + 674 + ], + "spans": [ + { + "bbox": [ + 46, + 591, + 287, + 674 + ], + "type": "text", + "content": "sponds to the architecture illustrated in Fig. 2. Our findings indicate that our method is more competitive than directly employing SAM (31.2 AP vs 24.6 AP), and the visualization of Fig. 4 shows us this enhancement. Moreover, the end-to-end training strategy boasts a more elegant model structure and outperforms the two-stage approach in overall efficiency (31.2 AP vs 30.18 AP)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 677, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 288, + 713 + ], + "type": "text", + "content": "Effect of Each Component. Given the limited performance of SAM-top1, we opted for the single-MIL as our baseline. With a preliminary selection using MIL1, we" + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 307, + 415, + 415, + 497 + ], + "blocks": [ + { + "bbox": [ + 307, + 415, + 415, + 497 + ], + "lines": [ + { + "bbox": [ + 307, + 415, + 415, + 497 + ], + "spans": [ + { + "bbox": [ + 307, + 415, + 415, + 497 + ], + "type": "image", + "image_path": "7e58933038a2be704e32eebd534d169562eede1cfd41805e23e5e8e4dac96f03.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 500, + 547, + 544 + ], + "lines": [ + { + "bbox": [ + 305, + 500, + 547, + 544 + ], + "spans": [ + { + "bbox": [ + 305, + 500, + 547, + 544 + ], + "type": "text", + "content": "Figure 4. The comparative visualization between SAM-top1 and SAPNet is presented, showcasing SAM's segmentation outcomes in green masks and our results in yellow. The orange and red bounding boxes highlight the respective mask boundaries." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 416, + 415, + 542, + 497 + ], + "blocks": [ + { + "bbox": [ + 416, + 415, + 542, + 497 + ], + "lines": [ + { + "bbox": [ + 416, + 415, + 542, + 497 + ], + "spans": [ + { + "bbox": [ + 416, + 415, + 542, + 497 + ], + "type": "image", + "image_path": "f25761820fcc09c4e886e4b8e486fab5adb98d8eec758148b0187d4ca6bdbdbb.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 309, + 550, + 547, + 605 + ], + "blocks": [ + { + "bbox": [ + 309, + 550, + 547, + 605 + ], + "lines": [ + { + "bbox": [ + 309, + 550, + 547, + 605 + ], + "spans": [ + { + "bbox": [ + 309, + 550, + 547, + 605 + ], + "type": "table", + "html": "
train stage on cocosched.APAP50AP75
SAM-top11x24.641.925.3
Two stage1x30.249.831.5
End to end1x31.251.832.3
", + "image_path": "df86f796ed7d155ee948920c25991a164612e2b7ee31122527446c234b839c26.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 609, + 547, + 632 + ], + "lines": [ + { + "bbox": [ + 305, + 609, + 547, + 632 + ], + "spans": [ + { + "bbox": [ + 305, + 609, + 547, + 632 + ], + "type": "text", + "content": "Table 3. The experimental comparisons of segmenters in COCO dataset, SAM-top1 is the highest scoring mask generated by SAM." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 304, + 641, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 641, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 641, + 547, + 713 + ], + "type": "text", + "content": "have achieved a segmentation performance of 26.8 AP. i) Point Distance Guidance. We updated the proposal scores from the existing MIL by integrating the PDG module into the foundational MIL selection. This approach successfully segments adjacent objects of the same category, improving the segmentation performance by 0.7 points (27.5 vs 26.8)." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 314, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 314, + 758 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 314, + 758 + ], + "type": "text", + "content": "3591" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 51, + 70, + 284, + 159 + ], + "blocks": [ + { + "bbox": [ + 51, + 70, + 284, + 159 + ], + "lines": [ + { + "bbox": [ + 51, + 70, + 284, + 159 + ], + "spans": [ + { + "bbox": [ + 51, + 70, + 284, + 159 + ], + "type": "table", + "html": "
mil1PDGmil2PNPGBMSMPSmAP
26.8
27.5
27.7
29.7
30.8
31.2
", + "image_path": "c45def14b9991273165f483f0ae2d132797f34dbbbe2091457eb5b656859c6e5.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 46, + 162, + 287, + 218 + ], + "lines": [ + { + "bbox": [ + 46, + 162, + 287, + 218 + ], + "spans": [ + { + "bbox": [ + 46, + 162, + 287, + 218 + ], + "type": "text", + "content": "Table 4. The effect of each component in SAPNet: proposal selection module(MIL1), point distance guidance(PDG), positive and negative proposals generator(PNPG), proposal selection module(MIL2), box mining strategy(BMS), and Multi-mask Proposals Supervision(MPS) in Sec. 7 of supplementary." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 220, + 286, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 220, + 286, + 365 + ], + "spans": [ + { + "bbox": [ + 46, + 220, + 286, + 365 + ], + "type": "text", + "content": "ii) MIL2. Building on the previous step, we incorporate a second MIL selection module to refine the initially selected boxes, resulting in a performance increment of 0.2 points. iii) PNPG. For MIL2, we devised the positive-negative sample sets, aiming to enhance the input quality for the PRM module and use the negative samples to suppress background. This adjustment leads to a segmentation performance boost of 2 points (29.7 vs 27.7). iv) BMS. Within the PRM, we refine the selected boxes using BMS, pushing the segmentation performance up by 1.1 points (30.8 vs 29.7). v) MPS. Utilizing MPS for segmentation branch supervision yields a 0.4-point performance improvement." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 365, + 286, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 365, + 286, + 449 + ], + "spans": [ + { + "bbox": [ + 46, + 365, + 286, + 449 + ], + "type": "text", + "content": "Threshold of BMS. For point refinement, there are two constraints (described in Sec. 3.4). " + }, + { + "bbox": [ + 46, + 365, + 286, + 449 + ], + "type": "inline_equation", + "content": "T_{min1}" + }, + { + "bbox": [ + 46, + 365, + 286, + 449 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 365, + 286, + 449 + ], + "type": "inline_equation", + "content": "T_{min2}" + }, + { + "bbox": [ + 46, + 365, + 286, + 449 + ], + "type": "text", + "content": " are thresholds of the Box Mining Strategy. In Tab. 5, it shows that the two constraints together to obtain performance gain. After multiple experiments, we have found that there is a significant performance improvement when " + }, + { + "bbox": [ + 46, + 365, + 286, + 449 + ], + "type": "inline_equation", + "content": "T_{min1}" + }, + { + "bbox": [ + 46, + 365, + 286, + 449 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 365, + 286, + 449 + ], + "type": "inline_equation", + "content": "T_{min2}" + }, + { + "bbox": [ + 46, + 365, + 286, + 449 + ], + "type": "text", + "content": " are set to 0.6 and 0.3, respectively." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 449, + 286, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 449, + 286, + 544 + ], + "spans": [ + { + "bbox": [ + 46, + 449, + 286, + 544 + ], + "type": "text", + "content": "Components of PNPG. Tab. 6 presents the results of a dissected ablation study on the Positive and Negative Proposals Generator(PNPG), illustrating the respective impacts of the positive and negative examples on the model's performance. It is evident that the construction of negative examples plays a significant role in enhancing model efficacy. Furthermore, the beneficial effects of both positive and negative examples are observed to be cumulative." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 545, + 286, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 545, + 286, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 545, + 286, + 714 + ], + "type": "text", + "content": "Performance Analysis. As presented in Tab. 7, we conducted a statistical analysis to validate SAPNet's capability to address 'local' issue and compare the outcomes selected by the single-MIL with those obtained by SAPNet in the absence of segmentation branch integration. Specifically, the part problem generated by the single-MIL, where MIL is inclined to select proposals with a higher proportion of foreground, is exemplified in Fig. 6 of supplementary. On this premise, we initially establish an evaluative criterion " + }, + { + "bbox": [ + 46, + 545, + 286, + 714 + ], + "type": "inline_equation", + "content": "R_{v} = \\frac{area_{mask}}{area_{box}}" + }, + { + "bbox": [ + 46, + 545, + 286, + 714 + ], + "type": "text", + "content": ", which is the ratio of the mask area to the bounding box area. Subsequently, we compute " + }, + { + "bbox": [ + 46, + 545, + 286, + 714 + ], + "type": "inline_equation", + "content": "R_{v_i}" + }, + { + "bbox": [ + 46, + 545, + 286, + 714 + ], + "type": "text", + "content": " for each proposal within the proposal bag corresponding to every instance across the entire COCO dataset and select the maximum " + }, + { + "bbox": [ + 46, + 545, + 286, + 714 + ], + "type": "inline_equation", + "content": "R_{v_{max}}" + }, + { + "bbox": [ + 46, + 545, + 286, + 714 + ], + "type": "text", + "content": " to compute the mean value over the dataset," + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 309, + 71, + 547, + 149 + ], + "blocks": [ + { + "bbox": [ + 309, + 71, + 547, + 149 + ], + "lines": [ + { + "bbox": [ + 309, + 71, + 547, + 149 + ], + "spans": [ + { + "bbox": [ + 309, + 71, + 547, + 149 + ], + "type": "table", + "html": "
Tmin1Tmin2APAP50AP75APsAPmAPl
0.50.330.951.332.012.234.747.4
0.50.430.751.231.811.934.747.1
0.60.331.251.832.312.635.147.8
0.60.430.851.132.012.134.747.3
0.70.331.051.532.212.634.947.3
0.70.430.751.131.912.034.647.2
", + "image_path": "c659e505dcf0fe439a1de4ce93da2f6215f1b535634a4135eb8ab816a8108c9a.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 340, + 167, + 513, + 244 + ], + "blocks": [ + { + "bbox": [ + 345, + 153, + 504, + 164 + ], + "lines": [ + { + "bbox": [ + 345, + 153, + 504, + 164 + ], + "spans": [ + { + "bbox": [ + 345, + 153, + 504, + 164 + ], + "type": "text", + "content": "Table 5. Constraints in box mining strategy." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 340, + 167, + 513, + 244 + ], + "lines": [ + { + "bbox": [ + 340, + 167, + 513, + 244 + ], + "spans": [ + { + "bbox": [ + 340, + 167, + 513, + 244 + ], + "type": "table", + "html": "
PNPGAPAP50AP75
PPGNPG
29.349.730.0
29.850.530.8
30.751.231.7
31.251.832.3
", + "image_path": "0c07e2649a0abdc31fdee7927331295c5118d42be9011e3ea7d21e8a13654c7f.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 353, + 263, + 501, + 304 + ], + "blocks": [ + { + "bbox": [ + 332, + 248, + 518, + 258 + ], + "lines": [ + { + "bbox": [ + 332, + 248, + 518, + 258 + ], + "spans": [ + { + "bbox": [ + 332, + 248, + 518, + 258 + ], + "type": "text", + "content": "Table 6. Meticulous ablation experiments in PNPG" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 353, + 263, + 501, + 304 + ], + "lines": [ + { + "bbox": [ + 353, + 263, + 501, + 304 + ], + "spans": [ + { + "bbox": [ + 353, + 263, + 501, + 304 + ], + "type": "table", + "html": "
MethodGapmIoUbox
Single-MIL0.19963.8
SAPNet0.13169.1
", + "image_path": "6eb306980562b8b728013e500658b038230b28aa17f054bb406f16334ad9699f.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 334, + 308, + 516, + 319 + ], + "lines": [ + { + "bbox": [ + 334, + 308, + 516, + 319 + ], + "spans": [ + { + "bbox": [ + 334, + 308, + 516, + 319 + ], + "type": "text", + "content": "Table 7. Experimental analysis with part problem." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 305, + 328, + 545, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 328, + 545, + 388 + ], + "spans": [ + { + "bbox": [ + 305, + 328, + 545, + 388 + ], + "type": "text", + "content": "which is then designated as the threshold " + }, + { + "bbox": [ + 305, + 328, + 545, + 388 + ], + "type": "inline_equation", + "content": "T_{rv}" + }, + { + "bbox": [ + 305, + 328, + 545, + 388 + ], + "type": "text", + "content": ". Ultimately, we identify the ground truth " + }, + { + "bbox": [ + 305, + 328, + 545, + 388 + ], + "type": "inline_equation", + "content": "R_{vgt}" + }, + { + "bbox": [ + 305, + 328, + 545, + 388 + ], + "type": "text", + "content": " and objects where " + }, + { + "bbox": [ + 305, + 328, + 545, + 388 + ], + "type": "inline_equation", + "content": "R_{vmax}" + }, + { + "bbox": [ + 305, + 328, + 545, + 388 + ], + "type": "text", + "content": " exceeds " + }, + { + "bbox": [ + 305, + 328, + 545, + 388 + ], + "type": "inline_equation", + "content": "T_{rv}" + }, + { + "bbox": [ + 305, + 328, + 545, + 388 + ], + "type": "text", + "content": " and calculates the discrepancy between " + }, + { + "bbox": [ + 305, + 328, + 545, + 388 + ], + "type": "inline_equation", + "content": "R_{v}" + }, + { + "bbox": [ + 305, + 328, + 545, + 388 + ], + "type": "text", + "content": " values selected by single-MIL and SAPNet. The description is as follows:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 312, + 395, + 545, + 417 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 395, + 545, + 417 + ], + "spans": [ + { + "bbox": [ + 312, + 395, + 545, + 417 + ], + "type": "interline_equation", + "content": "G a p _ {s i n g l e} = R v _ {s i n g l e} - R v _ {g t}, \\quad G a p _ {o u r} = R v _ {o u r} - R v _ {g t}. \\tag {10}", + "image_path": "ef5f409d63af79df2d98cc779d319985e3f6ba2a29253ed6b41e6e40e989cd88.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 420, + 545, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 420, + 545, + 468 + ], + "spans": [ + { + "bbox": [ + 305, + 420, + 545, + 468 + ], + "type": "text", + "content": "Tab. 7 shows that the proposed SAPNet mitigates the locality issue faced by the single-MIL. Furthermore, the boxes selected via SAPNet exhibit a substantially higher IoU with GT than those selected by the single-MIL." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 481, + 378, + 493 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 481, + 378, + 493 + ], + "spans": [ + { + "bbox": [ + 306, + 481, + 378, + 493 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 499, + 545, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 499, + 545, + 635 + ], + "spans": [ + { + "bbox": [ + 304, + 499, + 545, + 635 + ], + "type": "text", + "content": "In this paper, we propose SAPNet, an innovative end-to-end point-prompted instance segmentation framework. SAPNet transforms point annotations into category-agnostic mask proposals and employs dual selection branches to elect the most semantic mask for each object, guiding the segmentation process. To address challenges such as indistinguishable adjacent objects of the same class and MIL's locality bias, we integrate PDG and PNPG, complemented by a Box Mining Strategy for enhanced proposal refinement. SAPNet uniquely merges segmentation and selection branches under multi-mask supervision, significantly enhancing its segmentation performance. Extensive experimental comparisons on VOC and COCO datasets validate the SAPNet's effectiveness in point-prompted instance segmentation." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 650, + 421, + 662 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 650, + 421, + 662 + ], + "spans": [ + { + "bbox": [ + 306, + 650, + 421, + 662 + ], + "type": "text", + "content": "6. Acknowledgements" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 305, + 665, + 545, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 665, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 305, + 665, + 545, + 712 + ], + "type": "text", + "content": "This work was supported in part by the Youth Innovation Promotion Association CAS, the National Natural Science Foundation of China (NSFC) under Grant No. 61836012, 61771447 and 62272438, and the Strategic Priority Research Program of the Chinese Academy of Sciences under Grant No.XDA27000000." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "3592" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 107, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 107, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 107, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 714 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 145 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 145 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 145 + ], + "type": "text", + "content": "[1] Jiwoon Ahn, Sunghyun Cho, and Suha Kwak. Weakly supervised learning of instance segmentation with inter-pixel relations. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2209-2218, 2019. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 146, + 288, + 179 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 146, + 288, + 179 + ], + "spans": [ + { + "bbox": [ + 53, + 146, + 288, + 179 + ], + "type": "text", + "content": "[2] Pablo Andres Arbeláez, Jordi Pont-Tuset, and Jonathan T. Barron et al. Multiscale combinatorial grouping. In CVPR, 2014. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 181, + 288, + 226 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 181, + 288, + 226 + ], + "spans": [ + { + "bbox": [ + 53, + 181, + 288, + 226 + ], + "type": "text", + "content": "[3] Aditya Arun, CV Jawahar, and M Pawan Kumar. Weakly supervised instance segmentation by learning annotation consistent instances. In European Conference on Computer Vision, pages 254-270. Springer, 2020. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 226, + 288, + 249 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 226, + 288, + 249 + ], + "spans": [ + { + "bbox": [ + 53, + 226, + 288, + 249 + ], + "type": "text", + "content": "[4] Hakan Bilen and Andrea Vedaldi. Weakly supervised deep detection networks. In CVPR, 2016. 2, 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 250, + 288, + 295 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 250, + 288, + 295 + ], + "spans": [ + { + "bbox": [ + 53, + 250, + 288, + 295 + ], + "type": "text", + "content": "[5] Daniel Bolya, Chong Zhou, Fanyi Xiao, and Yong Jae Lee. Yolact: Real-time instance segmentation. In Proceedings of the IEEE/CVF international conference on computer vision, pages 9157-9166, 2019. 1, 7" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 295, + 288, + 329 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 295, + 288, + 329 + ], + "spans": [ + { + "bbox": [ + 53, + 295, + 288, + 329 + ], + "type": "text", + "content": "[6] Léon Bottou. Stochastic gradient descent tricks. In Neural Networks: Tricks of the Trade: Second Edition, pages 421-436. Springer, 2012. 6" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 330, + 288, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 330, + 288, + 396 + ], + "spans": [ + { + "bbox": [ + 53, + 330, + 288, + 396 + ], + "type": "text", + "content": "[7] Kai Chen, Jiaqi Wang, Jiangmiao Pang, Yuhang Cao, Yu Xiong, Xiaoxiao Li, Shuyang Sun, Wansen Feng, Ziwei Liu, Jiarui Xu, et al. Mmdetection: Open mmlab detection toolbox and benchmark. arXiv preprint arXiv:1906.07155, 2019. https://github.com/open-mmlab/mmdetection.6" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 396, + 288, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 396, + 288, + 453 + ], + "spans": [ + { + "bbox": [ + 53, + 396, + 288, + 453 + ], + "type": "text", + "content": "[8] Keyan Chen, Chenyang Liu, Hao Chen, Haotian Zhang, Wenyuan Li, Zhengxia Zou, and Zhenwei Shi. Rsprompter: Learning to prompt for remote sensing instance segmentation based on visual foundation model. arXiv preprint arXiv:2306.16269, 2023. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 453, + 288, + 518 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 453, + 288, + 518 + ], + "spans": [ + { + "bbox": [ + 53, + 453, + 288, + 518 + ], + "type": "text", + "content": "[9] Pengfei Chen, Xuehui Yu, Xumeng Han, Najmul Hassan, Kai Wang, Jiachen Li, Jian Zhao, Humphrey Shi, Zhenjun Han, and Qixiang Ye. Point-to-box network for accurate object detection via single point supervision. In European Conference on Computer Vision, pages 51-67. Springer, 2022. 2, 3, 6" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 520, + 287, + 564 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 520, + 287, + 564 + ], + "spans": [ + { + "bbox": [ + 48, + 520, + 287, + 564 + ], + "type": "text", + "content": "[10] Tianle Chen, Zheda Mai, Ruiwen Li, and Wei-lun Chao. Segment anything model (sam) enhanced pseudo labels for weakly supervised semantic segmentation. arXiv preprint arXiv:2305.05803, 2023. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 565, + 287, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 565, + 287, + 609 + ], + "spans": [ + { + "bbox": [ + 48, + 565, + 287, + 609 + ], + "type": "text", + "content": "[11] Bowen Cheng, Ishan Misra, Alexander G. Schwing, Alexander Kirillov, and Rohit Girdhar. Masked-attention mask transformer for universal image segmentation. In CVPR, 2022. 1, 6, 7" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 610, + 288, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 610, + 288, + 656 + ], + "spans": [ + { + "bbox": [ + 48, + 610, + 288, + 656 + ], + "type": "text", + "content": "[12] Bowen Cheng, Omkar Parkhi, and Alexander Kirillov. Pointly-supervised instance segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2617-2626, 2022. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 657, + 287, + 700 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 657, + 287, + 700 + ], + "spans": [ + { + "bbox": [ + 48, + 657, + 287, + 700 + ], + "type": "text", + "content": "[13] Mark Everingham, Luc Van Gool, and Christopher K. I. Williams et al. The Pascal visual object classes (VOC) challenge. IJCV, 2010. http://host.robots.ox.ac.uk/pascal/VOC/.6" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 702, + 287, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 702, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 48, + 702, + 287, + 714 + ], + "type": "text", + "content": "[14] Junsong Fan, Zhaoxiang Zhang, and Tieniu Tan. Pointly-" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 308, + 73, + 545, + 714 + ], + "type": "list", + "angle": 0, + "index": 31, + "blocks": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "text", + "content": "supervised panoptic segmentation. In European Conference on Computer Vision, pages 319-336. Springer, 2022. 1" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 95, + 545, + 118 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 95, + 545, + 118 + ], + "spans": [ + { + "bbox": [ + 308, + 95, + 545, + 118 + ], + "type": "text", + "content": "[15] Kaiming He, Xiangyu Zhang, and Shaoqing Ren et al. Deep residual learning for image recognition. In CVPR, 2016. 6" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 118, + 545, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 118, + 545, + 139 + ], + "spans": [ + { + "bbox": [ + 308, + 118, + 545, + 139 + ], + "type": "text", + "content": "[16] Kaiming He, Georgia Gkioxari, and Piotr Dólar et al. Mask R-CNN. In ICCV, 2017. 1, 6, 7" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 140, + 545, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 140, + 545, + 183 + ], + "spans": [ + { + "bbox": [ + 308, + 140, + 545, + 183 + ], + "type": "text", + "content": "[17] Sheng He, Rina Bao, Jingpeng Li, P Ellen Grant, and Yangming Ou. Accuracy of segment-anything model (sam) in medical image segmentation tasks. arXiv preprint arXiv:2304.09324, 2023. 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 183, + 545, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 183, + 545, + 228 + ], + "spans": [ + { + "bbox": [ + 308, + 183, + 545, + 228 + ], + "type": "text", + "content": "[18] Cheng-Chun Hsu, Kuang-Jui Hsu, Chung-Chi Tsai, Yen-Yu Lin, and Yung-Yu Chuang. Weakly supervised instance segmentation using the bounding box tightness prior. In NeurIPS, 2019. 2, 7" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 228, + 545, + 261 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 228, + 545, + 261 + ], + "spans": [ + { + "bbox": [ + 308, + 228, + 545, + 261 + ], + "type": "text", + "content": "[19] Peng-Tao Jiang and Yuqi Yang. Segment anything is a good pseudo-label generator for weakly supervised semantic segmentation. arXiv preprint arXiv:2305.01275, 2023. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 261, + 545, + 294 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 261, + 545, + 294 + ], + "spans": [ + { + "bbox": [ + 308, + 261, + 545, + 294 + ], + "type": "text", + "content": "[20] Lei Ke, Mingqiao Ye, Martin Danelljan, Yifan Liu, Yu-Wing Tai, Chi-Keung Tang, and Fisher Yu. Segment anything in high quality. arXiv preprint arXiv:2306.01567, 2023. 1, 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 294, + 545, + 348 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 294, + 545, + 348 + ], + "spans": [ + { + "bbox": [ + 308, + 294, + 545, + 348 + ], + "type": "text", + "content": "[21] Beomyoung Kim, Youngjoon Yoo, Chaeun Rhee, and Junmo Kim. Beyond semantic to instance segmentation: Weakly-supervised instance segmentation via semantic knowledge transfer and self-refinement. In CVPR, 2022. 1, 2, 7" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 349, + 545, + 405 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 349, + 545, + 405 + ], + "spans": [ + { + "bbox": [ + 308, + 349, + 545, + 405 + ], + "type": "text", + "content": "[22] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. arXiv preprint arXiv:2304.02643, 2023. https://segment-anything.com/.1,2,3" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 405, + 545, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 405, + 545, + 480 + ], + "spans": [ + { + "bbox": [ + 308, + 405, + 545, + 480 + ], + "type": "text", + "content": "[23] Shiyi Lan, Zhiding Yu, Christopher Choy, Subhashree Radhakrishnan, Guilin Liu, Yuke Zhu, Larry S Davis, and Anima Anandkumar. Discobox: Weakly supervised instance segmentation and semantic correspondence from box supervision. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3406-3416, 2021. 1, 2, 7" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 481, + 545, + 515 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 481, + 545, + 515 + ], + "spans": [ + { + "bbox": [ + 308, + 481, + 545, + 515 + ], + "type": "text", + "content": "[24] Issam H. Laradji, Negar Rostamzadeh, Pedro O. Pinheiro, David Vázquez, and Mark Schmidt. Proposal-based instance segmentation with point supervision. In ICIP, 2020. 2, 7" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 515, + 545, + 570 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 515, + 545, + 570 + ], + "spans": [ + { + "bbox": [ + 308, + 515, + 545, + 570 + ], + "type": "text", + "content": "[25] Jungbeom Lee, Jihun Yi, Chaehun Shin, and Sungroh Yoon. Bbam: Bounding box attribution map for weakly supervised semantic and instance segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2643-2652, 2021. 2" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 308, + 570, + 545, + 614 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 570, + 545, + 614 + ], + "spans": [ + { + "bbox": [ + 308, + 570, + 545, + 614 + ], + "type": "text", + "content": "[26] Feng Li, Hao Zhang, Peize Sun, Xueyan Zou, Shilong Liu, Jianwei Yang, Chunyuan Li, Lei Zhang, and Jianfeng Gao. Semantic-sam: Segment and recognize anything at any granularity. arXiv preprint arXiv:2307.04767, 2023. 3" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 308, + 614, + 545, + 658 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 614, + 545, + 658 + ], + "spans": [ + { + "bbox": [ + 308, + 614, + 545, + 658 + ], + "type": "text", + "content": "[27] Wentong Li, Wenyu Liu, Jianke Zhu, Miaomiao Cui, XianSheng Hua, and Lei Zhang. Box-supervised instance segmentation with level set evolution. In European conference on computer vision, pages 1-18. Springer, 2022. 1, 7" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 308, + 658, + 545, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 658, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 308, + 658, + 545, + 714 + ], + "type": "text", + "content": "[28] Wentong Li, Yuqian Yuan, Song Wang, Jianke Zhu, Jianshu Li, Jian Liu, and Lei Zhang. Point2mask: Point-supervised panoptic segmentation via optimal transport. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 572-581, 2023. 1, 2" + } + ] + } + ], + "index": 30 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "3593" + } + ] + } + ], + "index": 32 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 713 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "type": "text", + "content": "[29] Wentong Li, Wenyu Liu, Jianke Zhu, Miaomiao Cui, Risheng Yu Xiansheng Hua, and Lei Zhang. Box2mask: Box-supervised instance segmentation via level-set evolution. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2024. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 129, + 287, + 173 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 129, + 287, + 173 + ], + "spans": [ + { + "bbox": [ + 48, + 129, + 287, + 173 + ], + "type": "text", + "content": "[30] Wentong Li, Yuqian Yuan, Song Wang, Wenyu Liu, Dongqi Tang, Jianke Zhu, Lei Zhang, et al. Label-efficient segmentation via affinity propagation. Advances in Neural Information Processing Systems, 36, 2024. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 174, + 287, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 174, + 287, + 217 + ], + "spans": [ + { + "bbox": [ + 48, + 174, + 287, + 217 + ], + "type": "text", + "content": "[31] Mingxiang Liao, Zonghao Guo, , and Yuze Wang et al. Attentionshift: Iteratively estimated part-based attention map for pointly supervised instance segmentation. In CVPR, 2023. 2, 7" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 219, + 287, + 240 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 219, + 287, + 240 + ], + "spans": [ + { + "bbox": [ + 48, + 219, + 287, + 240 + ], + "type": "text", + "content": "[32] Tsung-Yi Lin, Priya Goyal, and Ross B. Girshick et al. Focal loss for dense object detection. In ICCV, 2017. 5, 6" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 241, + 287, + 275 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 241, + 287, + 275 + ], + "spans": [ + { + "bbox": [ + 48, + 241, + 287, + 275 + ], + "type": "text", + "content": "[33] Tsung-Yi Lin, Michael Maire, and Serge et al. Belongie. Microsoft coco: Common objects in context. In ECCV, 2014. https://cocodataset.org/. 6" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 276, + 287, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 276, + 287, + 308 + ], + "spans": [ + { + "bbox": [ + 48, + 276, + 287, + 308 + ], + "type": "text", + "content": "[34] Ze Liu, Yutong Lin, and Yue Cao et al. Swin transformer: Hierarchical vision transformer using shifted windows. In ICCV, 2021. 6, 7" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 309, + 287, + 363 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 309, + 287, + 363 + ], + "spans": [ + { + "bbox": [ + 48, + 309, + 287, + 363 + ], + "type": "text", + "content": "[35] Fausto Milletari, Nassir Navab, and Seyed-Ahmad Ahmadi. V-net: Fully convolutional neural networks for volumetric medical image segmentation. In 2016 fourth international conference on 3D vision (3DV), pages 565-571. IEEE, 2016. 6" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 365, + 287, + 420 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 365, + 287, + 420 + ], + "spans": [ + { + "bbox": [ + 48, + 365, + 287, + 420 + ], + "type": "text", + "content": "[36] Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, et al. Imagenet large scale visual recognition challenge. International journal of computer vision, 115:211-252, 2015. 6" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 421, + 287, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 421, + 287, + 453 + ], + "spans": [ + { + "bbox": [ + 48, + 421, + 287, + 453 + ], + "type": "text", + "content": "[37] Peng Tang and Xinggang Wang et al. Multiple instance detection network with online instance classifier refinement. In CVPR, 2017. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 455, + 287, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 455, + 287, + 487 + ], + "spans": [ + { + "bbox": [ + 48, + 455, + 287, + 487 + ], + "type": "text", + "content": "[38] Peng Tang, Xinggang Wang, and Song Bai et al. PCL: proposal cluster learning for weakly supervised object detection. IEEE TPAMI, 2020. 5" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 488, + 287, + 520 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 488, + 287, + 520 + ], + "spans": [ + { + "bbox": [ + 48, + 488, + 287, + 520 + ], + "type": "text", + "content": "[39] Zhi Tian, Chunhua Shen, Xinlong Wang, and Hao Chen. Boxinst: High-performance instance segmentation with box annotations. In CVPR, 2021. 1, 2, 7" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 522, + 287, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 522, + 287, + 555 + ], + "spans": [ + { + "bbox": [ + 48, + 522, + 287, + 555 + ], + "type": "text", + "content": "[40] Zhi Tian, Bowen Zhang, Hao Chen, and Chunhua Shen. Instance and panoptic segmentation using conditional convolutions. IEEE TPAMI, 2023. 1, 2, 6, 7" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 556, + 287, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 556, + 287, + 588 + ], + "spans": [ + { + "bbox": [ + 48, + 556, + 287, + 588 + ], + "type": "text", + "content": "[41] Xinlong Wang, Tao Kong, Chunhua Shen, Yuning Jiang, and Lei Li. SOLO: segmenting objects by locations. In ECCV, 2020." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 590, + 287, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 590, + 287, + 645 + ], + "spans": [ + { + "bbox": [ + 48, + 590, + 287, + 645 + ], + "type": "text", + "content": "[42] Xinlong Wang, Rufeng Zhang, Tao Kong, Lei Li, and Chunhua Shen. Solov2: Dynamic and fast instance segmentation. Proc. Advances in Neural Information Processing Systems (NeurIPS), 2020. https://github.com/WXinlong/SOLO.1, 2, 3, 6, 7" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 647, + 287, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 647, + 287, + 679 + ], + "spans": [ + { + "bbox": [ + 48, + 647, + 287, + 679 + ], + "type": "text", + "content": "[43] Jinyu Yang, Mingqi Gao, Zhe Li, Shang Gao, Fangjing Wang, and Feng Zheng. Track anything: Segment anything meets videos. arXiv preprint arXiv:2304.11968, 2023. 3" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 48, + 681, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 681, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 681, + 287, + 713 + ], + "type": "text", + "content": "[44] Xu Zhao, Wenchao Ding, Yongqi An, Yinglong Du, Tao Yu, Min Li, Ming Tang, and Jinqiao Wang. Fast segment anything. arXiv preprint arXiv:2306.12156, 2023. 1, 3" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 72, + 545, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 72, + 545, + 106 + ], + "spans": [ + { + "bbox": [ + 307, + 72, + 545, + 106 + ], + "type": "text", + "content": "[45] Yanning Zhou, Hao Chen, Jiaqi Xu, Qi Dou, and Pheng-Ann Heng. Irnet: Instance relation network for overlapping cervical cell segmentation. In MICCAI, 2019. 1, 7" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "text", + "content": "3594" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/Semantically-Shifted Incremental Adapter-Tuning is A Continual ViTransformer/3e66199c-eb4c-4d3b-89c8-b97d56ae08e1_content_list.json b/2024/Semantically-Shifted Incremental Adapter-Tuning is A Continual ViTransformer/3e66199c-eb4c-4d3b-89c8-b97d56ae08e1_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..7144edb278d5c1680ddec2d69f2b0dfd899d01d3 --- /dev/null +++ b/2024/Semantically-Shifted Incremental Adapter-Tuning is A Continual ViTransformer/3e66199c-eb4c-4d3b-89c8-b97d56ae08e1_content_list.json @@ -0,0 +1,1668 @@ +[ + { + "type": "text", + "text": "Semantically-Shifted Incremental Adapter-Tuning is A Continual ViTransformer", + "text_level": 1, + "bbox": [ + 76, + 130, + 893, + 154 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yuwen Tan*, Qinhao Zhou*, Xiang Xiang*† \nSchool of Artificial Intelligence and Automation, Huazhong University of Science and Tech., Wuhan, China", + "bbox": [ + 91, + 180, + 555, + 234 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Ke Wang, Yuchuan Wu, Yongbin Li \nDAMO Academy, \nAlibaba Group, Beijing, China", + "bbox": [ + 589, + 180, + 875, + 234 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 268, + 313, + 284 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Class-incremental learning (CIL) aims to enable models to continuously learn new classes while overcoming catastrophic forgetting. The introduction of pre-trained models has brought new tuning paradigms to CIL. In this paper, we revisit different parameter-efficient tuning (PET) methods within the context of continual learning. We observe that adapter tuning demonstrates superiority over prompt-based methods, even without parameter expansion in each learning session. Motivated by this, we propose incrementally tuning the shared adapter without imposing parameter update constraints, enhancing the learning capacity of the backbone. Additionally, we employ feature sampling from stored prototypes to retrain a unified classifier, further improving its performance. We estimate the semantic shift of old prototypes without access to past samples and update stored prototypes session by session. Our proposed method eliminates model expansion and avoids retaining any image samples. It surpasses previous pre-trained model-based CIL methods and demonstrates remarkable continual learning capabilities. Experimental results on five CIL benchmarks validate the effectiveness of our approach, achieving state-of-the-art (SOTA) performance.", + "bbox": [ + 75, + 300, + 473, + 632 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 660, + 209, + 676 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In traditional deep learning, the model can access all the data at once and learning is performed on a static dataset. However, in real-life applications, data usually arrives in a stream format with new classes, requiring the model to learn continuously, known as class-incremental learning (CIL). The primary objective of CIL is to enable the model to learn continuously from non-stationary data streams, facilitating adaptation to new classes and mitigating catastrophic forgetting [7]. A number of methods [28, 34, 54] have been devoted to alleviating catastrophic forgetting. Those methods can be mainly divided into replay-based [2, 3, 28], regularization-based [1, 17, 43], and isolation-based meth", + "bbox": [ + 75, + 685, + 468, + 867 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/38761abf699db7fc658056c8941fcecba082ceb556f2f03f620f6f0d613fcd5c.jpg", + "image_caption": [ + "Figure 1. Comparison of different parameter-efficient tuning CIL baselines on CIFAR100 dataset. Left: The relationship between the average accuracy of the incremental sessions and the number of tunable parameters. Right: The average performance of old classes and new classes for each PET method." + ], + "image_footnote": [], + "bbox": [ + 511, + 268, + 696, + 393 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/8b6416936343e222840143636fa49bcfddbb157082155ef743fd2c04bfba11a8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 694, + 271, + 880, + 393 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ods [23, 24, 30]. However, all these methods assume that models are trained from scratch while ignoring the generalization ability of a strong pre-trained model [5] in the CIL.", + "bbox": [ + 496, + 474, + 892, + 520 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Pre-trained vision transformer models [5] have demonstrated excellent performance on various vision tasks. Recently, it has been explored in the field of CIL and continues to receive considerable attention [37, 38, 45, 50]. Due to the powerful representation capabilities of pre-trained models, CIL methods based on pre-trained models achieve significant performance improvements compared to traditional SOTA methods which are trained from scratch. CIL with a pre-trained model typically fixes the pre-trained model to retain the generalizability and adds a few additional training parameters such as adapter [4], prompt [15] and SSF [22], which is referred to as parameter-efficient tuning (PET).", + "bbox": [ + 496, + 522, + 893, + 703 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Inspired by language-based intelligence, current research in CIL is primarily focused on the prompt-based method [31, 37, 52]. Typically, these approaches require the construction of a pool of task-specific prompts during the training phase which increases storage overhead. Additionally, selecting prompts during the testing stage incurs additional computational costs. Other PET methods as well as fully fine-tuning are still in exploration in the context of CIL. Recently, SLCA [45] proposes fine-tuning the entire ViT and classifier incrementally with different learning rates. However, fine-tuning the entire pre-trained model requires substantial computational resources. In addition, Adam [50] initially explores the application of other PET", + "bbox": [ + 496, + 704, + 895, + 901 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 44 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Equal contribution, co-first author; also with Nat. Key Lab of MSIPT.", + "bbox": [ + 93, + 875, + 467, + 886 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "†Correspondence to xex@hust.edu.cn; also with Peng Cheng Lab.", + "bbox": [ + 98, + 887, + 468, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "23252", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "methods in CIL using first-session adaptation and branch fusion. Training in the first stage and subsequently freezing the model can reduce training time but result in lower accuracy for subsequent new classes. Our linear probing results reveal that the first-session adaptation is insufficient when there is a significant domain discrepancy between downstream data and the pre-trained model.", + "bbox": [ + 75, + 90, + 470, + 196 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this paper, we first revisit different PET methods within the CIL paradigm. As shown in Fig. 1, we observe that adapter tuning [4] is a better continual learner than prompt-tuning [15] and SSF-tuning [22]. When progressively fine-tuning the prompt and SSF parameters, the forgetting of old classes is catastrophic. In comparison, adapter tuning effectively balances learning new classes and maintaining performance in old classes. Unlike prompt-based methods, which require constructing a prompt pool, adapter tuning avoids catastrophic forgetting even sharing the same parameters across learning sessions. Additionally, the adapter balances the number of tuning parameters and model performance compared to fully fine-tuning. Moreover, unlike previous methods that use feature distillation loss to restrict changes in shared parameters as part of overall loss, we analyze that tuning with constraints hinders continual learning from the perspective of parameter sensitivity. Therefore, we train the adapter and task-specific classifier without parameter regularization in each session, allowing for greater plasticity in learning new classes.", + "bbox": [ + 75, + 204, + 473, + 507 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "As we only train the local classifier in each learning session, we propose to adopt a new classifier retraining method [32, 45, 54] to further improve the CIL performance. First, we implicitly compute the semantic shift [42] of previous prototypes which leverages the semantic shift of current task samples to estimate the change of old classes. Then, we sample several features according to the updated prototypes to retrain the classifier which is more effective than previous methods. The advantages of our proposed method can be summarized as follows: 1) Fine-tuning adapters significantly reduces training costs and improves learning efficiency; 2) We do not need to retain any image samples; 3) The accuracy for new classes is relatively high which verifies the continual learning capacity of the model.", + "bbox": [ + 75, + 515, + 470, + 727 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In summary, our proposed learning framework has the following main contributions: (1) Different from various devotion into the prompt-based methods for CIL, we discover that incrementally tuning adapter is a better continual learner even without constructing an adapter-pool; (2) After each session adaptation with local classifier, we propose to retrain a unified classifier with the semantic shift compensated prototypes which can further improve the performance; (3) Extensive experimental results on five CIL benchmarks demonstrate the superiority of the proposed simple but effective methods which achieves the SOTA.", + "bbox": [ + 75, + 734, + 470, + 902 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 500, + 89, + 642, + 106 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1. Class-incremental Learning", + "text_level": 1, + "bbox": [ + 500, + 114, + 751, + 131 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Class-incremental learning requires the model to be continuously updated with new class instances while retaining old knowledge [49]. Traditional CIL methods can be categorized into replay-based [2, 3, 28], regularization-based [17, 40, 43, 53], and parameter isolation-based methods [23, 24, 30]. Replay-based methods involve retaining or generating samples of previous classes and incorporating them into the current training phase. These methods often employ strategies for sample selection or sample generation to effectively replay past information. Regularization-based methods add constraints or penalties in the learning process which limit the update of the parameters that are important for old classes. Isolation-based methods aim to isolate and update task-specific parameters. By focusing on updating only a subset of parameters, these methods can mitigate catastrophic forgetting. To expand the representative capacity of a model without compromising its existing knowledge, methods for expanding the network have been proposed [34, 41, 48]. These methods dynamically extend the feature extraction network, combined with the replay-based method, achieving dramatic performance improvements.", + "bbox": [ + 496, + 137, + 893, + 455 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.2. Parameter-Efficient Tuning", + "text_level": 1, + "bbox": [ + 500, + 465, + 748, + 482 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Parameter-Efficient Tuning can be considered as a transfer learning method. It refers to not performing full fine-tuning on a pre-trained model, instead inserting and fine-tuning specific sub-modules within the network. This approach is initially demonstrated to have effective transfer learning results in NLP [13, 14, 19, 20]. Recently, similar approaches have been applied to vision transformer models as well. AdaptFormer [4] inserts lightweight modules after the MLP layers in the attention module and has been found to outperform full fine-tuning on action recognition benchmarks. Another PET approach SSF [22] surprisingly outperforms other methods in certain tasks even with a smaller number of parameters. Inspired by the prompt approach used in the language model, VPT [15] applies it to visual models and achieves impressive results across various downstream tasks while only introducing a small number of additional parameters. Furthermore, the prompt-based method has also been used in vision-language models [27, 46, 51, 52] to improve performance on various downstream tasks.", + "bbox": [ + 496, + 488, + 893, + 777 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.3. Continual Learning on a Pre-trained Model", + "text_level": 1, + "bbox": [ + 500, + 786, + 872, + 801 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The aforementioned CIL methods all involve training the model from scratch, while CIL with pre-trained model [35, 39, 50, 52] has gained much attention due to its strong feature representation ability. L2P [52] utilizes the pretrained model and learns a set of extra prompts dynamically to guide the model to solve corresponding tasks. Du", + "bbox": [ + 496, + 809, + 893, + 902 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "23253", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/357d2bfef8d65c36341d1ff6b88ea039f1345d7805989618c7494ad3d9e309dc.jpg", + "image_caption": [ + "(I) Incremental Adapter Tuning" + ], + "image_footnote": [], + "bbox": [ + 119, + 108, + 436, + 402 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/43934528ae263193d23078904a8836507b8bcb4e088a6e5da5c03cfc045ccfbb.jpg", + "image_caption": [ + "(II) Semantic Shift Estimation", + "(III) Unified Classifier Training" + ], + "image_footnote": [], + "bbox": [ + 450, + 99, + 851, + 281 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/b5d9fafbba2e6239eee9791679047ae39d6413db2ec80a4e28445ecce8e73f16.jpg", + "image_caption": [ + "Figure 2. The framework of our proposed method. Left: The illustration of the structure of ViT and adapter. The adapter and local classifier are incrementally trained in each session using the Eq. 4. Right: The process of retraining the classifier with semantic shift estimation." + ], + "image_footnote": [], + "bbox": [ + 457, + 300, + 848, + 404 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "alPrompt [37] proposes to learn of two mutually unrelated prompt spaces: the general prompt and the expert prompt. It encodes task-invariant instructions and task-specific instructions, respectively. CODAPrompt [31] introduces a decomposed attention-based continual learning prompting method, which offers a larger learning capacity than existing prompt-based methods [37, 52]. SLCA [45] explores the fine-tuning paradigm of the pre-trained models, setting different learning rates for backbone and classifiers, and gains excellent performance. Adam [50] proposes to construct the classifier by merging the embeddings of a pretrained model and an adapted downstream model. LAE [8] proposes a unified framework that calibrates the adaptation speed of tuning modules and ensembles PET modules to accomplish predictions.", + "bbox": [ + 75, + 441, + 472, + 670 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Methodology", + "text_level": 1, + "bbox": [ + 76, + 684, + 210, + 702 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Preliminary", + "text_level": 1, + "bbox": [ + 76, + 709, + 207, + 726 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Class-incremental learning formulation: We first introduce the definition of CIL. Consider a neural network $\\mathcal{M}_{\\theta} = f_{\\theta_{cls}}(\\mathcal{F}_{\\theta_{bne}}(\\cdot))$ with trainable parameters $\\theta = \\{\\theta_{bne},\\theta_{cls}\\}$ . $\\mathcal{F}_{\\theta_{bne}}$ represents the feature extraction backbone which extracts features from input images and $f_{\\theta_{cls}}$ stands for the classification layer that projects feature representations to class predictions. In CIL setting, $\\mathcal{M}_{\\theta}$ needs to learn a series of sessions from training data $D_{t} = \\{(x_{1}^{t},y_{1}^{t}),(x_{2}^{t},y_{2}^{t}),\\ldots \\}, t = 1,\\ldots ,T$ and satisfy the condition $Y(i)\\cap Y(j) = \\emptyset ,i\\neq j$ where $Y(i)$ represent the label set in session $i$ . The goal of $\\mathcal{M}_{\\theta}$ is to perform well", + "bbox": [ + 75, + 734, + 472, + 902 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "on test sets that contain all the classes learned denoted as $\\mathcal{Y} = Y(1) \\cup \\ldots \\cup Y(t)$ after $t$ -th session.", + "bbox": [ + 496, + 441, + 890, + 472 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Parameter-efficient tuning with Adapter: An adapter is a bottleneck structure [4] that can be incorporated into a pre-trained transformer-based network to facilitate transfer learning and enhance the performance of downstream tasks. An adapter typically consists of a downsampled MLP layer $W_{down} \\in \\mathbb{R}^{d \\times d}$ , a non-linear activation function $\\sigma$ , and an upsampled MLP layer $W_{up} \\in \\mathbb{R}^{d \\times d}$ . Denote the input as $x_{i}$ , we formalize the adapter as", + "bbox": [ + 496, + 473, + 892, + 595 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\no u t = x _ {i} + s \\cdot \\sigma \\left(x _ {i} * W _ {\\text {d o w n}}\\right) * W _ {u p}, \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 566, + 604, + 890, + 622 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $*$ stands for the matrix multiplication, $\\sigma$ denotes the activation function RELU, and $s$ denotes the scale factor.", + "bbox": [ + 496, + 623, + 890, + 652 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Parameter-efficient tuning with SSF: SSF [22] modulates pre-trained models using scale and shift factors to align the feature distribution of downstream tasks. SSF inserts its layers in each transformer operation. Suppose $x_{i}$ is the output of one of the modules, SSF can be represented as", + "bbox": [ + 496, + 654, + 890, + 729 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\ny = \\gamma \\odot x _ {i} + \\beta , \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 637, + 739, + 890, + 755 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\gamma \\in \\mathbb{R}^d$ and $\\beta \\in \\mathbb{R}^d$ denote the scale and shift factor, respectively. $\\odot$ stands for Hadamard product.", + "bbox": [ + 496, + 763, + 890, + 795 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Parameter-efficient tuning with VPT: Visual Prompt Tuning (VPT) inserts a small number of trainable parameters in the input space after the embedding layer [15]. It is called prompts and only these parameters will be updated in the fine-tuning process. Depending on the number of layers inserted, VPT can be categorized as VPT-shallow and VPT-deep. Suppose $P = \\{p^k \\in R^d | 1 \\leq k \\leq n\\}$ and the input", + "bbox": [ + 496, + 795, + 892, + 902 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "23254", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "embedding is $x$ , VPT will combine $x$ with $P$ as", + "bbox": [ + 76, + 90, + 392, + 104 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nx ^ {\\prime} = [ x, P ], \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 230, + 118, + 468, + 135 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $n$ is the number of prompts and the $x'$ will be passed into subsequent blocks.", + "bbox": [ + 76, + 140, + 468, + 171 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. Adapter-tuning without parameter constraints", + "text_level": 1, + "bbox": [ + 76, + 181, + 468, + 198 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Most of the work based on pre-trained models focuses on how to apply the prompt-tuning strategies to the CIL paradigm. However, tuning the same prompt parameters across each learning session will cause catastrophic forgetting. As shown in Fig. 1, when progressively training the shared extra module while keeping the pre-trained model fixed, the adapter demonstrates its superiority over other tuning methods such as prompt-tuning and SSF. Fine-tuning the shared adapter incrementally seems to well balance the learning of new classes and old-knowledge retaining. Based on this observation, we delve deeper into incremental adapter tuning and use it as our baseline. The whole framework of the proposed method is shown in Fig. 2. Some methods [25, 47] adopt the first-session adaption and then fix the backbone. In addition, previous methods often utilize knowledge distillation [12] (KD) loss to restrict parameter changes of the feature extractor to mitigate forgetting. Totally different from earlier methods [17, 21, 28], we propose that the shared adapter should be tuned incrementally without parameter constraints. Next, we will provide a detailed description of the proposed baseline and offer a reasonable explanation and analysis.", + "bbox": [ + 75, + 205, + 468, + 536 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Implementation of adapter-based baselines: During incremental training sessions, only adapter and classifier layers are updated, and the pre-trained ViT model is frozen. As the cosine classifier has shown great success in CIL, we follow ALICE [26] to use the cosine classifier with a margin. The margin hyper-parameter could also be used as a balance factor to decide the learning and retaining. The training loss can be formulated as follows:", + "bbox": [ + 75, + 537, + 468, + 657 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} ^ {t} = - \\frac {1}{N ^ {t}} \\sum_ {j = 1} ^ {N ^ {t}} \\log \\frac {e ^ {s \\left(\\cos \\theta_ {j} ^ {i} - m\\right)}}{e ^ {s \\left(\\cos \\theta_ {j} ^ {i} - m\\right)} + \\sum_ {c = 1} ^ {Y (t) - \\{i \\}} e ^ {s \\left(\\cos \\theta_ {j} ^ {c}\\right)}} \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 89, + 660, + 468, + 715 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\cos\\theta_{j}^{i} = \\frac{w_{i}*f_{j}}{||w_{i}||*||f_{j}||}$ , $N^t$ denotes the number of training samples of the current session, $s$ and $m$ represent the scale factor and margin factor, respectively.", + "bbox": [ + 76, + 715, + 468, + 763 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "As we do not retain any image samples, the gradients computed during the optimization of current samples not only affect the newly trained classifiers but also have an impact on the previously learned classifiers. The forgetting of the classifier is significant when no samples are retained. Thus, we follow previous work [8, 36, 45] to adopt the local training loss where we only compute the loss between current logits and labels and hinder the gradient updates of the previous classifier which alleviates the classifier forgetting.", + "bbox": [ + 75, + 763, + 468, + 901 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/ac25e42f8d7773a6e5121ab88de386b47d286589750d72ecca9801b378168139.jpg", + "image_caption": [ + "Figure 3. Comparison of the performance on ImageNetR dataset with different extent of parameter constraints. Left: The overall accuracy of each session. Right: The accuracy of new classes." + ], + "image_footnote": [], + "bbox": [ + 501, + 90, + 694, + 214 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/75f5180e732ccfbe142d9d19964860bfccf8aa09cca2f33bd8a160c3c39dedc4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 694, + 90, + 883, + 214 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/29a1640ab22aa0138ffadfc5931fc33b54b611ac45c2e33a46449ded5a59439a.jpg", + "image_caption": [ + "Figure 4. Parameter sensitivity analysis on the ImageNetR dataset. Left: The parameter sensitiveness of two incremental tasks. Right: The sensitiveness of different parameters in one task." + ], + "image_footnote": [], + "bbox": [ + 501, + 260, + 697, + 388 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/9efc944cb5097dbecb40a0fc2adc8dc87415b8a5d0a488d8589e8ecbc9609644.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 697, + 261, + 885, + 387 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Analysis of the adapter-based baseline: We will analyze why the adapter shows its superiority in the CIL over other PET methods, and why we choose to incrementally tune the shared adapter without parameter constraints.", + "bbox": [ + 498, + 436, + 890, + 496 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "First, we elaborate on why incrementally tuning the adapter is better in the context of CIL. By utilizing the residual structure, the adapter can retain the generalization capabilities from the pre-trained model while adapting to new tasks. The incremental tuning of the adapter exhibits a cumulative learning capability, where the representational capacity of the adapter is further enhanced as the learning sessions progress. In contrast, both SSF and prompt tuning have limitations when it comes to handling CIL. These methods suffer from overfitting to the current distribution. When the shared parameters excessively overfit each current task, the model gradually loses its generalization ability which is harmful for training a unified model for CIL. Then, we try to utilize KD loss to implicitly limit parameter updates and adjust the weighting factor. As shown in Fig. 3, the results demonstrate that unconstrained training is more beneficial for new-classes learning and improving overall performance. Based on this observation, we propose our proposition from the perspective of parameter sensitivity.", + "bbox": [ + 496, + 497, + 892, + 782 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Proposition 1: Confining the change of parameters of previous tasks hinders the plasticity of new classes due to the similarity of parameter sensitivity among tasks.", + "bbox": [ + 498, + 784, + 890, + 828 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Proof: Given the parameter set $\\theta = \\{\\theta_1, \\theta_2, \\dots, \\theta_N\\}$ and training set $D_t = (X_t, Y_t)$ in $t$ -th session, the definition of parameter sensitivity [9, 47] is defined as", + "bbox": [ + 498, + 829, + 890, + 875 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\ns _ {i} ^ {t} = \\mathcal {L} \\left(X _ {t}, Y _ {t} \\mid \\theta_ {i}\\right) - \\mathcal {L} \\left(X _ {t}, Y _ {t} \\mid \\theta_ {i} ^ {*}\\right), \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 576, + 883, + 890, + 902 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "23255", + "bbox": [ + 478, + 945, + 517, + 955 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\theta_{i}^{*} = \\theta_{i} + \\Delta \\theta_{i}$ and $\\mathcal{L}$ denotes the optimized loss in the classification task. We use the first-order Taylor expansion, and the parameter sensitivity can be rewritten as follows:", + "bbox": [ + 76, + 90, + 468, + 135 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\ns _ {i} = - g _ {i} \\Delta \\theta_ {i} = - \\frac {\\delta \\mathcal {L}}{\\delta \\theta_ {i}} * \\Delta \\theta_ {i}, \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 173, + 138, + 468, + 170 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "as $\\Delta \\theta_{i}$ denotes the update after the training process, we follow the work [9] to use the one-step update to approximate the $\\Delta \\theta_{i} = \\epsilon \\frac{\\delta\\mathcal{L}}{\\delta\\theta_{i}}$ . Therefore, the parameter can be approximately computed as $s_i \\approx -\\epsilon \\left(\\frac{\\delta\\mathcal{L}}{\\delta\\theta_i}\\right)^2$ . As shown in Fig. 4, the sensitivity values of tuning parameters for two different sessions are nearly equal and the most sensitive parameters are always up weights. This means that constraining the parameter update would hinder the learning of new classes and further impede the ability of the model for continual learning. Furthermore, in the experimental section, we demonstrate the representative capacity of the adapter continued to strengthen through incremental tuning.", + "bbox": [ + 75, + 180, + 470, + 364 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3. Semantic shift estimation without past samples", + "text_level": 1, + "bbox": [ + 76, + 373, + 468, + 390 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Due to the selective updating of classifiers corresponding to the current task during training, the classifiers across different learning sessions are not fully aligned in the same feature space. To further optimize classifiers, we store the prototypes after training the backbone and local classifier. However, as the backbone is trained incrementally with new classes, the feature distribution of old classes undergoes changes. Retraining the classifier with the previous prototypes is sub-optimal. Since the feature representability of the backbone updates over time, using outdated features may not effectively retrain a unified classifier. To solve this problem, we update the feature distribution of old classes by computing the semantic shift over the learning process. We follow SDC [42] to estimate the semantic shift of old prototypes without access to past samples.", + "bbox": [ + 75, + 396, + 468, + 622 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Suppose $\\varphi_c^t$ denotes the prototype of category $c$ in session $t$ and $r$ is the learning session that the category belongs to. We have no access to the samples of category $c$ to update the prototype in session $t$ (when $t > r$ ). The semantic shift of class $c$ between two sessions can be represented as", + "bbox": [ + 76, + 623, + 468, + 698 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\Delta_ {c} ^ {r \\rightarrow t} = \\varphi_ {c} ^ {t} - \\varphi_ {c} ^ {r}, \\quad \\varphi_ {c} ^ {r} = \\frac {1}{N _ {r} ^ {c}} \\sum_ {n = 1} ^ {N _ {r} ^ {c}} \\mathcal {F} \\left(X _ {r} ^ {c}, \\theta_ {r}\\right). \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 116, + 702, + 468, + 744 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "While we do not have access to data from the old class $c$ , we can only estimate the shift of current task categories on old and new models. The semantic shift of current samples between two sessions can be represented as", + "bbox": [ + 76, + 755, + 468, + 816 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\delta_ {i} ^ {t - 1 \\rightarrow t} = e _ {i} ^ {t} - e _ {i} ^ {t - 1}, \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 200, + 825, + 468, + 844 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $e$ denotes the embedding of one sample in the current task $t$ . We can compute $e_i^{t - 1}$ at the start of the current task with the model trained in task $t - 1$ . After training", + "bbox": [ + 76, + 854, + 468, + 900 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "on the new task, we compute $\\delta_i^{t - 1\\to t}$ and use it to estimate $\\Delta_c^{t - 1\\to t}$ . We compute the shift as", + "bbox": [ + 500, + 89, + 890, + 122 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\widetilde {\\Delta} _ {c} ^ {t - 1 \\rightarrow t} = \\frac {\\sum \\alpha_ {i} \\delta_ {i} ^ {t - 1 \\rightarrow t}}{\\sum \\alpha_ {i}}, c \\notin C ^ {t}, \\tag {9} \\\\ \\alpha_ {i} = \\mathbf {e} ^ {- \\frac {| | e _ {i} ^ {t - 1} - \\varphi_ {c} ^ {t - 1} | |}{2 \\sigma^ {2}}}, \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 584, + 126, + 890, + 186 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\sigma$ is the standard deviation of the distribution of class $c$ ; $C^t$ denotes classes learned in the current session. Before retraining the classifier, we update the prototypes with", + "bbox": [ + 498, + 200, + 890, + 246 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\{\\begin{array}{l l}\\varphi_ {c} = \\varphi_ {c} ^ {t - 1} + \\widetilde {\\Delta} _ {c} ^ {t - 1 \\rightarrow t}&, c \\notin C ^ {t}\\\\\\varphi_ {c} = \\frac {1}{N _ {c}} \\sum_ {i} e _ {c}&, c \\in C ^ {t},\\end{array}\\right. \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 568, + 255, + 890, + 291 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $N_{c}$ denotes the number of images in class $c$ .", + "bbox": [ + 500, + 297, + 834, + 313 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.4. Unified classifier training", + "text_level": 1, + "bbox": [ + 500, + 324, + 730, + 340 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Previous work [32, 45, 54] has attempted to retrain a unified classifier by modeling each class as a Gaussian distribution and sampling features from the distribution. We refer to this method as classifier alignment (CA) and adopt a similar approach that incorporates semantic shift estimation, which we denote as SSCA. Specifically, we compute the class prototypes $P_{c} = \\{\\varphi_{1},\\dots,\\varphi_{C}\\}$ and covariance $\\Sigma_{c} = \\{\\varsigma_{1},\\dots,\\varsigma_{C}\\}$ for each class after training process in each learning session. The calculation of class prototypes is based on Eq. 10. Due to the capability of the trained backbone network to provide well-distributed representations, each class exhibits an unimodal distribution. Therefore, we form a normal distribution $\\mathcal{N}(\\mu_c,\\Sigma_c)$ for each class with class prototype and variance. We sample features $\\mathcal{V}_c = \\{v_{c,1},\\dots v_{c,S_n}\\}$ from the distribution to obtain diverse samples, where $S_{n}$ is the number of the sample features for each class. Then, we use these features to train classification layers $\\theta_{cls}$ with a commonly used cross-entropy loss as", + "bbox": [ + 498, + 348, + 890, + 619 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} \\left(\\theta_ {c l s}, \\mathcal {V} _ {c}\\right) = - \\sum_ {i = 1} ^ {S _ {n} * C} \\log \\frac {\\mathbf {e} ^ {\\left(\\theta_ {c l s} ^ {j} \\left(v _ {i}\\right)\\right)}}{\\sum_ {k \\in C} \\mathbf {e} ^ {\\left(\\theta_ {c l s} ^ {k} \\left(v _ {i}\\right)\\right)}}, \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 535, + 625, + 890, + 666 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $C$ denotes all classes learned so far. We normalize the features and classifier the same as backbone training.", + "bbox": [ + 498, + 669, + 890, + 700 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 500, + 715, + 632, + 732 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Datasets and Evaluation Protocols", + "text_level": 1, + "bbox": [ + 500, + 739, + 797, + 755 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Dataset: We evaluate our method on four commonly-used CIL benchmarks and one cross-domain CIL dataset. We randomly split the dataset into 10 or 20 learning tasks. CIFAR100 [18] is a widely used dataset in CIL which consists of 60000 images, belonging to 100 different categories. CUB200 [33] is a dataset that contains approximately 11,788 images of 200 bird species with fine-grained class labels. Additionally, we also follow recent work [45, 50] to use the other three datasets which have a large", + "bbox": [ + 498, + 763, + 890, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "23256", + "bbox": [ + 478, + 945, + 519, + 955 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/552694258e6782543edf674b329e17b72e22f00b6216319331081807893685cb.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodParamsSplit-ImageNetRSplit-ImageNetACUB200CIFAR100
\\( A_{Last} \\uparrow \\)\\( A_{Avg} \\uparrow \\)\\( A_{Last} \\uparrow \\)\\( A_{Avg} \\uparrow \\)\\( A_{Last} \\uparrow \\)\\( A_{avg} \\uparrow \\)\\( A_{Last} \\uparrow \\)\\( A_{avg} \\uparrow \\)
Joint86M81.72±0.35-50.56±1.75-88.17±0.32-89.71±0.07-
FT86M20.93±0.8640.35±0.746.03±4.7416.57±5.822.05±1.6945.67±2.0422.17±1.0941.83±1.60
SLCA [45]86M79.35±0.2883.29±0.4661.05±0.6368.88±2.3184.68±0.0990.77±0.7991.26±0.3794.29±0.92
Adam-adapter [50]1.19M65.79±0.9872.42±1.4148.81±0.0858.84±1.3785.84±0.0891.33±0.4987.29±0.2791.21±1.33
Adam-ssf [50]0.2M66.61±0.0974.36±1.0048.94±0.1458.79±2.8285.67±0.1590.99±0.7685.27±0.2189.90±0.98
Adam-prompt [50]0.04M65.29±1.5272.97±0.5629.29±7.4239.14±7.5985.28±0.4790.89±0.8685.04±1.0489.49±0.58
LAE [8]0.19M72.29±0.1477.99±0.4647.18±1.1758.15±0.7380.97±0.5187.22±1.2185.25±0.4389.80±1.20
L2P [38]0.04M72.34±0.1777.36±0.6444.04±0.9351.24±2.2667.02±1.9079.62±1.6084.06±0.8888.26±1.34
ADA [6]1.19M73.76±0.2779.57±0.8450.16±0.2059.43±2.2076.13±0.9485.74±0.2688.25±0.2691.85±1.32
DualPrompt [37]0.25M69.10±0.6274.28±0.6653.19±0.7464.59±0.0868.48±0.4780.59±1.5086.93±0.2491.13±0.32
CODAPrompt [31]3.84M73.31±0.5078.47±0.5352.08±0.1263.92±0.1277.23±1.1281.90±0.8583.21±3.3987.71±3.17
SSIAT (Ours)1.19M79.38±0.5983.63±0.4362.43±1.6370.83±1.6388.75±0.3893.00±0.9091.35±0.2694.35±0.60
", + "bbox": [ + 91, + 88, + 880, + 277 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/c65dbad5c97f094610f8f85bf7d9c4dcffc02992b9ce62a69a4f09322a0603c3.jpg", + "image_caption": [ + "Figure 5. The performance of each learning session on four datasets. (a) ImageNetR; (b) ImageNetA; (c) CUB200; (d) CIFAR100. These curves are plotted by calculating the average performance across three different seeds for each incremental session." + ], + "image_footnote": [], + "bbox": [ + 86, + 294, + 285, + 422 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/6f3089f711c6abdb9a01251408912e42063b6e283306d0c5749cd35e4a860cc0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 282, + 294, + 483, + 422 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/da6c6bfa4fdbc1f75ccd7f2b7fc8fc253efa4bea5fd72daf44b1de72386be979.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 483, + 294, + 681, + 422 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/07aead82b1259ae870c4fcbfc8fe05b7051d0987697a8a32e0755c5215962f6c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 681, + 294, + 880, + 422 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/8eb4e21dc97033e3636f804767fb91802d66bfb5a36f4d3b74e9aefb9f3b8eff.jpg", + "table_caption": [ + "Table 1. Experimental results on four CIL benchmarks. All other methods are reproduced using the same seeds for a fair comparison." + ], + "table_footnote": [], + "table_body": "
MethodImageNetRImageNetA
\\( A_{Last} \\uparrow \\)\\( A_{Avg} \\uparrow \\)\\( A_{Last} \\uparrow \\)\\( A_{Avg} \\uparrow \\)
SLCA [45]74.63±1.5579.92±1.2936.69±21.3156.35±7.09
Adam-adapter[50]57.42±0.8464.75±0.7948.65±0.1259.55±1.07
Adam-ssf[50]64.30±0.9472.42±1.4747.27±4.3458.36±4.70
Adam-prompt[50]59.90±1.1368.02±1.0229.93±4.8839.13±4.19
LAE [8]69.86±0.4377.38±0.6139.52±0.7851.75±2.15
L2P [38]69.64±0.4275.28±0.5740.48±1.7849.62±1.46
DualPrompt [37]66.61±0.5872.45±0.3742.28±1.9453.39±1.64
CODAPrompt [31]69.96±0.5075.34±0.8544.62±1.9254.86±0.50
SSIAT (Ours)75.67±0.1482.30±0.3659.16±1.0368.45±1.92
", + "bbox": [ + 81, + 465, + 470, + 593 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 2. Experimental results for long-sequences (20 incremental sessions) on ImageNetR and ImageNetA dataset.", + "bbox": [ + 75, + 597, + 468, + 626 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "domain gap with pre-training data. ImageNetR [10] consists of 30,000 images with 200 categories. Although its categories overlap with ImageNet-21K [29], the images belong to a different domain. ImageNetA [11] is a real-world dataset that consists of 200 categories. This dataset exhibits significant class imbalance, with some categories having only a few training samples. VTAB [44] is a complex dataset that consists of 19 tasks covering a broad spectrum of domains and semantics. We follow previous work [50] to select 5 tasks to construct a cross-domain CIL dataset.", + "bbox": [ + 75, + 638, + 468, + 790 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Implementation details: We use ViT-B/16 [5] as the pre-trained model, which is pre-trained on ImageNet-21K [29]. The initial learning rate is set as 0.01 and we use the cosine Anneal scheduler. In our experiments, we train the first session for 20 epochs and 10 epochs for later sessions. Following previous papers [45, 50], we use common evaluation metrics in CIL. Specifically, we report the last session", + "bbox": [ + 75, + 794, + 470, + 902 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "accuracy $\\mathcal{A}_{Last}$ and average accuracy of the whole incremental sessions $\\mathcal{A}_{Avg} = \\frac{1}{T}\\sum_{i=1}^{T}\\mathcal{A}_i$ . We utilize three different seeds to generate three different class orders for evaluating various methods. We report the mean and standard deviation based on the three experiments. See codes1.", + "bbox": [ + 496, + 468, + 892, + 544 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Experiment Results", + "text_level": 1, + "bbox": [ + 498, + 551, + 687, + 569 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "For a fair comparison, we compare our methods with SOTA CIL methods based on the pre-trained vision transformer model. We compare our methods with prompt-based methods L2P [52], DualPrompt [37], CODAPrompt [31], finetuning methods SLCA [45], and adapter-based method [6, 8, 50]. Tab. 1 shows $\\mathcal{A}_{Avg}$ and $\\mathcal{A}_{Last}$ with three different seeds on four CIL benchmarks.", + "bbox": [ + 496, + 575, + 890, + 680 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "CUB200 & CIFAR100: We first report the results of each method on the CUB200 and CIFAR100 datasets. Since these two datasets overlap with the pre-training data, methods based on a pre-trained model achieve a huge improvement in performance compared with methods that are trained from scratch. For example, as shown in Tab. 1, the average accuracy on L2P, DualPrompt, and CODAPrompt reached $88.26\\%$ , $91.13\\%$ , and $87.71\\%$ on CIFAR100, respectively. Nevertheless, our method still outperforms those prompt-based methods. Besides, our method does not require the construction of a prompt pool which allows each task to learn specific prompt parameters. The adapter is shared across tasks and our method avoids the parameter", + "bbox": [ + 496, + 681, + 892, + 878 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "1https://github.com/HAIV-Lab/SSIAT", + "bbox": [ + 517, + 886, + 787, + 898 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "23257", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/59651ade57516cd523197da551cad070f000ab3f9e0e2012a2ef8c178f2788da.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodSes.1Ses.2Ses.3Ses.4Ses.5Avg↑
Adam-adapter[50]87.6086.0789.1482.7284.3585.97
Adam-ssf[50]89.6088.2189.9480.5082.3886.13
Adam-vpt[50]90.2087.5789.6980.3982.1886.01
SLCA[45]94.8092.4393.5493.9894.3393.82
LAE [8]97.9985.2679.6878.7874.3683.21
SSIAT (Ours)96.1092.7194.0993.6894.5094.21
", + "bbox": [ + 81, + 88, + 467, + 181 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 3. Experimental results for different methods on VTAB dataset which contain 5 datasets from different domains.", + "bbox": [ + 76, + 186, + 467, + 213 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "expansion with tasks increasing. Even though the Adam-adapter/SSF/prompt only needs to train in the first stage which requires less training time, the performance of those methods is inferior to our proposed method. Although the performance of SLCA is comparable to our method in CIFAR100, the number of tuning parameters of our method is much smaller. Besides that, the average performance of our method on CUB200 is $93.00\\%$ , nearly $2.3\\%$ improvement over SLCA. Fig. 5 (c) (d) shows the incremental accuracy of each session on CUB200 and CIFAR100 and our method is always at the top of all lines in the incremental process.", + "bbox": [ + 75, + 218, + 467, + 383 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "ImageNetR & ImageNetA: We report the performance on ImageNetR and ImageNetA in Tab. 1. These two datasets are more difficult due to the domain gap with the pre-training data. It can be seen that the performance of each method on these two datasets is lower than CIFAR100 and CUB200. Besides, we can see that SLCA outperforms other previous methods significantly on these two datasets. Notably, SLCA achieves an impressive last accuracy on ImageNetR, surpassing the other methods. In contrast, our method achieves SOTA-level performance on both datasets with fewer tuning parameters. Based on Fig. 5, the performance of our method is slightly higher than SLCA in several learning sessions with fewer tuning parameters on the ImageNetR dataset. On the ImageNetA dataset, our method achieves the last accuracy of $62.43\\%$ , surpassing SLCA by $1.39\\%$ . The average accuracy across all sessions is $70.83\\%$ , showing a $2\\%$ improvement.", + "bbox": [ + 75, + 383, + 467, + 640 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Additionally, we evaluate the performance of each method under the condition of long sequences. In this setting, each session consists of only 10 classes, and the results are summarized in Tab. 2. Our method also maintains excellent performance in terms of $\\mathcal{A}_{Last}$ and $\\mathcal{A}_{Avg}$ . The performance of SLCA is highly dependent on the class order in which the training data appears, resulting in a substantial variance in $\\mathcal{A}_{Last}$ on ImageNetA. In contrast, the Adam-based methods remain relatively stable in long-sequence settings. For Adam-SSF, the long sequence only leads to a nearly $2\\%$ performance drop in ImageNetR. However, for SLCA, its performance drops by $5\\%$ on ImageNetR and nearly $10\\%$ on ImageNetA. In comparison, our method demonstrates excellent stability on long sequences and outperforms other methods by a large margin.", + "bbox": [ + 75, + 643, + 467, + 868 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "VTAB: VTAB is a cross-domain CIL dataset where each task provides training data from a different domain. Based", + "bbox": [ + 76, + 869, + 467, + 900 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "on the results presented in Tab. 3, it can be observed that both SLCA and our method perform well in cross-domain CIL. Specifically, in the last incremental stage, our method achieves an accuracy that is $12\\%$ higher than the Adam-based methods. Adam-based methods only perform finetuning in the first task and are not able to adapt well to subsequent tasks on the cross-domain dataset.", + "bbox": [ + 496, + 90, + 890, + 196 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3. Ablation Study", + "text_level": 1, + "bbox": [ + 500, + 209, + 651, + 224 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Baselines with different PET methods: Tab. 4 shows the results of baselines with three different parameter-efficient tuning methods in each incremental session. It can be observed that the pre-trained model with an adapter achieves the best performance in terms of both the last session accuracy and average accuracy. Fig. 1 demonstrates that tuning with an adapter achieves a better balance between learning new classes and retaining knowledge of old classes. Both VPT-deep and SSF methods tend to prioritize learning new categories, which leads to increased forgetting of previously learned categories. Although VPT-shallow performs well on CIFAR, its limited parameters hinder the model from incrementally learning new classes on ImageNetR. More results on the other datasets can be found in the Supp.", + "bbox": [ + 496, + 233, + 890, + 445 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Unified classifier retraining vs. Separate local classifier: As we train separate task-specific classifiers in each incremental session, we propose to retrain the classifier to find the optimal decision boundary for all the classes. Tab. 5 displays the ablation experiments of the classifier re-trained on ImageNetA which is the most difficult benchmark. It can be observed that whether it is a linear or a cosine classifier, retraining the classifier leads to a significant performance improvement. Additionally, incorporating the computation of prototype semantic shifts further enhances the performance by an additional $2\\%$ in the cosine classifier. Compared to the classifier alignment methods that do not involve computing updated prototypes, our method demonstrates its superiority as the incremental stages progress. More results on the other datasets can be found in the Supp.", + "bbox": [ + 496, + 446, + 890, + 672 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Progressively tuning vs. first session adaptation: Tab. 6 shows the linear probing results of different adaption ways. After finishing the training of the last session, we freeze the pre-trained backbone and only train the classifier using all the samples. It is evident that not performing tuning and solely freezing the pre-trained model leads to the worst performance, regardless of the dataset. First-session adaptation proves to be a good choice as it reduces training time and works well for datasets like CIFAR100 and CUB200. However, for datasets such as ImageNetA and ImageNetR, which have significant domain gaps from the pre-trained model, relying solely on first-session adaptation is suboptimal. By continuously fine-tuning the adapter, we observe that the backbone exhibits stronger representability compared to only tuning in the first session.", + "bbox": [ + 496, + 674, + 890, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "23258", + "bbox": [ + 478, + 945, + 517, + 955 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/ae86d835ecbe60876852924b1f787e7c7ccc30db0aa84f8ab73b1ef6eaf01c9d.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
PET MethodParamsSes.1Ses.2Ses.3Ses.4Ses.5Ses.6Ses.7Ses.8Ses.9Ses.10Avg↑
SSF [22]0.2M98.5091.9088.5785.0283.9278.7077.7977.8973.0274.9183.03
VPT-deep [15]0.046M97.6069.1568.7056.6055.5648.8755.9756.0553.4855.2161.72
VPT-shallow [15]0.004M98.4092.9588.8092.0687.2686.3785.6485.3185.3685.1088.72
Adapter [4]1.19M98.5095.3591.6091.0890.9290.0889.8089.6288.9889.2991.52
", + "bbox": [ + 96, + 88, + 875, + 176 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/572b37d67bb8fa993d47f43fe2f05dee017e19ccdd399be0bb8b3b0fd265e3ab.jpg", + "table_caption": [ + "Table 4. Experimental results for baselines with different efficient tuning methods on CIFAR100. We report the overall performance of each session and the average performance." + ], + "table_footnote": [], + "table_body": "
ClassifierMethodSes.1Ses.2Ses.3Ses.4Ses.5Ses.6Ses.7Ses.8Ses.9Ses.10Avg↑
Linearw/o CA74.6568.3763.9058.8258.0255.4854.0352.8951.6252.1358.99
w/ CA74.6571.5967.9364.2462.0860.9059.0357.3256.4156.8563.10
w/ SSCA74.6570.9267.6463.9162.6560.9660.3858.5558.1357.7763.55
Cosinew/o CA82.6677.7872.2067.6366.0163.1859.9759.3558.9357.9166.56
w/ CA82.6679.7074.5670.4068.1965.6663.4061.7760.7059.7868.68
w/ SSCA82.6680.6075.9172.4171.5669.0166.1064.6063.0062.4370.83
", + "bbox": [ + 109, + 215, + 859, + 335 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/28fa07fbb5cb9c98a60b3434de2ff437cccb88cc064985031ffaa3b03d178a18.jpg", + "table_caption": [ + "Table 5. Ablation results for unified classifier training and semantic shift estimation on ImageNetA. We report the overall performance of each session and the average performance. We run the experiments with three seeds and reported the average performance." + ], + "table_footnote": [], + "table_body": "
MethodCIFARImageNetRImageNetACUB
No-Adapt.86.0868.4233.7186.77
First-Adapt.91.3378.0263.5389.27
All-Adapt.92.5782.0265.9689.86
Δ↑1.24%4.00%2.43%0.59%
", + "bbox": [ + 83, + 376, + 467, + 455 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/dcbb19d2b990fb42607f805ea0e5a7466ca0c37a3b7602354d2b1d57e6533a5f.jpg", + "table_caption": [ + "Table 6. Linear probing results of different training ways on four datasets. We retrain the classifier using all the data on the fixed-trained backbone." + ], + "table_footnote": [], + "table_body": "
StructureParamsCIFARImageNetRImageNetA
AdaptMLP-P [4]1.19M94.35±0.6083.63±0.4370.83±1.63
AdaptMLP-S [4]1.19M94.16±0.8883.19±0.4771.00±1.52
Convpass [16]1.63M94.08±0.9983.64±0.3569.96±1.09
Adapter [13]2.38M94.26±0.9183.65±0.5070.94±1.42
", + "bbox": [ + 81, + 505, + 468, + 575 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Different structures of the adapter: In this paper, we follow AdaptFormer [4] to use parallel adapterMLP as the adapter structure. We also delve deeper into different adapter structures such as Adapter [13] and Convpass [16]. Although these different tuning structures may exhibit performance differences under static settings, the performance differences among those adapter structures are minimal in the context of CIL shown in Tab. 7. This offers us the flexibility to employ various adapter structures within the context of the CIL paradigm.", + "bbox": [ + 75, + 613, + 468, + 763 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Comparison to traditional CIL methods: We conduct evaluations by comparing our approach to SOTA traditional CIL methods shown in Tab. 8. We replace the Resnet backbone with the pre-trained ViT model for fair comparison. The results indicate that the performance of iCaRL tends to be inferior compared to SOTA model expansion methods and our proposed method, even when past samples are stored. It can be observed that methods such as Foster and Der, which dynamically expand feature extraction net", + "bbox": [ + 75, + 763, + 468, + 901 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/e5ea17a09c5444667c1c6d81e7aab9d578b2d91ae99aaf1f87881bd55a6da305.jpg", + "table_caption": [ + "Table 7. Experimental results of different adapter structures. We report the average performance and standard deviation." + ], + "table_footnote": [], + "table_body": "
MethodImageNetRImageNetA
\\( A_{Last} \\uparrow \\)\\( A_{Avg} \\uparrow \\)\\( A_{Last} \\uparrow \\)\\( A_{Avg} \\uparrow \\)
iCaRL [28]61.70±0.5671.34±0.6729.32±2.3640.11±1.36
Foster [34]75.87±0.3881.54±0.8212.44±17.4517.01±20.44
Der [41]75.63±0.8681.13±0.1138.43±2.3946.43±3.29
Memo [48]65.38±0.9073.80±0.8628.45±2.3740.27±1.22
SSIAT (Ours)79.38±0.5983.63±0.4362.43±1.6370.83±1.63
", + "bbox": [ + 513, + 376, + 875, + 462 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 8. Comparison to traditional CIL methods on ImageNetR and ImageNetA dataset.", + "bbox": [ + 498, + 465, + 890, + 494 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "works, achieve impressive results on ImageNetR. The average accuracy of these methods is only $2\\%$ lower than our method. However, on ImageNetA, where there are few-shot samples for many classes, these methods exhibit low performance. More ablation experiments related to hyperparameters can be found in the supp.", + "bbox": [ + 496, + 496, + 890, + 588 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 599, + 619, + 614 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Class-incremental learning on a pre-trained model has received significant attention in recent years. In this paper, we first revisit different PET methods in the context of CIL. Then, we propose that incrementally tuning the shared adapter and local classifier without constraints exhibits less forgetting and gains plasticity for learning new classes. Moreover, to train a unified classifier, we calculate the semantic shift of old prototypes and retrain the classifier using updated prototypes in each session. The proposed method eliminates the need for constructing an adapter pool and avoids retaining any image samples. Experimental results on five benchmarks demonstrate the effectiveness of our method which achieves the SOTA performance.", + "bbox": [ + 496, + 626, + 890, + 823 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgement. This research was supported by Natural Science Fund of Hubei Province (Grant # 2022CFB823), Alibaba Innovation Research program under Grant Contract # CRAQ7WHZ11220001-20978282, and HUST Independent Innovation Research Fund (Grant # 2021XXJS096).", + "bbox": [ + 496, + 824, + 893, + 900 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "23259", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 174, + 106 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Rahaf Aljundi, Francesca Babiloni, Mohamed Elhoseiny, Marcus Rohrbach, and Tinne Tuytelaars. Memory aware synapses: Learning what (not) to forget. In Proceedings of the European conference on computer vision (ECCV), pages 139-154, 2018. 1", + "[2] Jihwan Bang, Heesu Kim, YoungJoon Yoo, Jung-Woo Ha, and Jonghyun Choi. Rainbow memory: Continual learning with a memory of diverse samples. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 8218-8227, 2021. 1, 2", + "[3] Arslan Chaudhry, Puneet K Dokania, Thalaiyasingam Ajthan, and Philip HS Torr. Riemannian walk for incremental learning: Understanding forgetting and intransigence. In Proceedings of the European conference on computer vision (ECCV), pages 532-547, 2018. 1, 2", + "[4] Shoufa Chen, Chongjian Ge, Zhan Tong, Jiangliu Wang, Yibing Song, Jue Wang, and Ping Luo. Adaptformer: Adapting vision transformers for scalable visual recognition. Advances in Neural Information Processing Systems, 35:16664-16678, 2022. 1, 2, 3, 8", + "[5] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929, 2020. 1, 6", + "[6] Beyza Ermis, Giovanni Zappella, Martin Wistuba, Aditya Rawal, and Cedric Archambeau. Memory efficient continual learning with transformers. Advances in Neural Information Processing Systems, 35:10629-10642, 2022. 6", + "[7] Robert M French. Catastrophic forgetting in connectionist networks. Trends in cognitive sciences, 3(4):128-135, 1999. 1", + "[8] Qiankun Gao, Chen Zhao, Yifan Sun, Teng Xi, Gang Zhang, Bernard Ghanem, and Jian Zhang. A unified continual learning framework with general parameter-efficient tuning. arXiv preprint arXiv:2303.10070, 2023. 3, 4, 6, 7", + "[9] Haoyu He, Jianfei Cai, Jing Zhang, Dacheng Tao, and Bohan Zhuang. Sensitivity-aware visual parameter-efficient tuning. arXiv preprint arXiv:2303.08566, 2023. 4, 5", + "[10] Dan Hendrycks, Steven Basart, Norman Mu, Saurav Kadavath, Frank Wang, Evan Dorundo, Rahul Desai, Tyler Zhu, Samyak Parajuli, Mike Guo, et al. The many faces of robustness: A critical analysis of out-of-distribution generalization. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 8340-8349, 2021. 6", + "[11] Dan Hendrycks, Kevin Zhao, Steven Basart, Jacob Steinhardt, and Dawn Song. Natural adversarial examples. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15262-15271, 2021. 6", + "[12] Geoffrey Hinton, Oriol Vinyals, and Jeff Dean. Distilling the knowledge in a neural network. arXiv preprint arXiv:1503.02531, 2015. 4", + "[13] Neil Houlsby, Andrei Giurgiu, Stanislaw Jastrzebski, Bruna Morrone, Quentin De Laroussilhe, Andrea Gesmundo, Mona Attariyan, and Sylvain Gelly. Parameter-efficient transfer" + ], + "bbox": [ + 78, + 114, + 472, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "learning for nlp. In International Conference on Machine Learning, pages 2790-2799. PMLR, 2019. 2, 8", + "[14] Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. Lora: Low-rank adaptation of large language models. arXiv preprint arXiv:2106.09685, 2021. 2", + "[15] Menglin Jia, Luming Tang, Bor-Chun Chen, Claire Cardie, Serge Belongie, Bharath Hariharan, and Ser-Nam Lim. Visual prompt tuning. In European Conference on Computer Vision, pages 709-727. Springer, 2022. 1, 2, 3, 8", + "[16] Shibo Jie and Zhi-Hong Deng. Convolutional bypasses are better vision transformer adapters. arXiv preprint arXiv:2207.07039, 2022.8", + "[17] James Kirkpatrick, Razvan Pascanu, Neil Rabinowitz, Joel Veness, Guillaume Desjardins, Andrei A Rusu, Kieran Milan, John Quan, Tiago Ramalho, Agnieszka Grabska-Barwinska, et al. Overcoming catastrophic forgetting in neural networks. Proceedings of the national academy of sciences, 114(13):3521-3526, 2017. 1, 2, 4", + "[18] Alex Krizhevsky, Geoffrey Hinton, et al. Learning multiple layers of features from tiny images. 2009. 5", + "[19] Brian Lester, Rami Al-Rfou, and Noah Constant. The power of scale for parameter-efficient prompt tuning. arXiv preprint arXiv:2104.08691, 2021. 2", + "[20] Xiang Lisa Li and Percy Liang. Prefix-tuning: Optimizing continuous prompts for generation. arXiv preprint arXiv:2101.00190, 2021. 2", + "[21] Zhizhong Li and Derek Hoiem. Learning without forgetting. IEEE transactions on pattern analysis and machine intelligence, 40(12):2935-2947, 2017. 4", + "[22] Dongze Lian, Daquan Zhou, Jiashi Feng, and Xinchao Wang. Scaling & shifting your features: A new baseline for efficient model tuning. Advances in Neural Information Processing Systems, 35:109-123, 2022. 1, 2, 3, 8", + "[23] Arun Mallya and Svetlana Lazebnik. Packet: Adding multiple tasks to a single network by iterative pruning. In Proceedings of the IEEE conference on Computer Vision and Pattern Recognition, pages 7765-7773, 2018. 1, 2", + "[24] Arun Mallya, Dillon Davis, and Svetlana Lazebnik. Piggyback: Adapting a single network to multiple tasks by learning to mask weights. In Proceedings of the European conference on computer vision (ECCV), pages 67-82, 2018. 1, 2", + "[25] Aristeidis Panos, Yuriko Kobe, Daniel Olmeda Reino, Rahaf Aljundi, and Richard E Turner. First session adaptation: A strong replay-free baseline for class-incremental learning. arXiv preprint arXiv:2303.13199, 2023. 4", + "[26] Can Peng, Kun Zhao, Tianren Wang, Meng Li, and Brian C Lovell. Few-shot class-incremental learning from an open-set perspective. In European Conference on Computer Vision, pages 382-397. Springer, 2022. 4", + "[27] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021. 2" + ], + "bbox": [ + 501, + 92, + 893, + 898 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "23260", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[28] Sylvestre-Alvise Rebuffi, Alexander Kolesnikov, Georg Sperl, and Christoph H Lampert. icarl: Incremental classifier and representation learning. In Proceedings of the IEEE conference on Computer Vision and Pattern Recognition, pages 2001-2010, 2017. 1, 2, 4, 8", + "[29] Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, et al. Imagenet large scale visual recognition challenge. International journal of computer vision, 115:211-252, 2015. 6", + "[30] Joan Serra, Didac Suris, Marius Miron, and Alexandros Karatzoglou. Overcoming catastrophic forgetting with hard attention to the task. In International conference on machine learning, pages 4548-4557. PMLR, 2018. 1, 2", + "[31] James Seale Smith, Leonid Karlinsky, Vyshnavi Gutta, Paola Cascante-Bonilla, Donghyun Kim, Assaf Arbelle, Rameswar Panda, Rogerio Feris, and Zsolt Kira. Coda-prompt: Continual decomposed attention-based prompting for rehearsal-free continual learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11909-11919, 2023. 1, 3, 6", + "[32] Yu-Ming Tang, Yi-Xing Peng, and Wei-Shi Zheng. When prompt-based incremental learning does not meet strong pretraining. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 1706-1716, 2023. 2, 5", + "[33] Catherine Wah, Steve Branson, Peter Welinder, Pietro Perona, and Serge Belongie. The caltech-ucsd birds-200-2011 dataset. 2011. 5", + "[34] Fu-Yun Wang, Da-Wei Zhou, Han-Jia Ye, and De-Chuan Zhan. Foster: Feature boosting and compression for class incremental learning. In European conference on computer vision, pages 398–414. Springer, 2022. 1, 2, 8", + "[35] Yabin Wang, Zhiwu Huang, and Xiaopeng Hong. S-prompts learning with pre-trained transformers: An occam's razor for domain incremental learning. Advances in Neural Information Processing Systems, 35:5682-5695, 2022. 2", + "[36] Yabin Wang, Zhiheng Ma, Zhiwu Huang, Yaowei Wang, Zhou Su, and Xiaopeng Hong. Isolation and impartial aggregation: A paradigm of incremental learning without interference. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 10209-10217, 2023. 4", + "[37] Zifeng Wang, Zizhao Zhang, Sayna Ebrahimi, Ruoxi Sun, Han Zhang, Chen-Yu Lee, Xiaqi Ren, Guolong Su, Vincent Perot, Jennifer Dy, et al. Dualprompt: Complementary prompting for rehearsal-free continual learning. In European Conference on Computer Vision, pages 631-648. Springer, 2022. 1, 3, 6", + "[38] Zifeng Wang, Zizhao Zhang, Chen-Yu Lee, Han Zhang, Ruoxi Sun, Xiaoqi Ren, Guolong Su, Vincent Perot, Jennifer Dy, and Tomas Pfister. Learning to prompt for continual learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 139-149, 2022. 1, 6", + "[39] Tz-Ying Wu, Gurumurthy Swaminathan, Zhizhong Li, Avinash Ravichandran, Nuno Vasconcelos, Rahul Bhotika, and Stefano Soatto. Class-incremental learning with strong" + ], + "bbox": [ + 78, + 90, + 470, + 901 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "pre-trained models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9601-9610, 2022. 2", + "[40] Xiang Xiang, Yuwen Tan, Qian Wan, Jing Ma, Alan Yuille, and Gregory D Hager. Coarse-to-fine incremental few-shot learning. In European Conference on Computer Vision, pages 205-222. Springer, 2022. 2", + "[41] Shipeng Yan, Jiangwei Xie, and Xuming He. Der: Dynamically expandable representation for class incremental learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3014-3023, 2021. 2, 8", + "[42] Lu Yu, Bartlomiej Twardowski, Xialei Liu, Luis Herranz, Kai Wang, Yongmei Cheng, Shangling Jui, and Joost van de Weijer. Semantic drift compensation for class-incremental learning. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 6982-6991, 2020. 2, 5", + "[43] Friedemann Zenke, Ben Poole, and Surya Ganguli. Continual learning through synaptic intelligence. In International conference on machine learning, pages 3987-3995. PMLR, 2017. 1, 2", + "[44] Xiaohua Zhai, Joan Puigcerver, Alexander Kolesnikov, Pierre Ruyssen, Carlos Riquelme, Mario Lucic, Josip Djolonga, Andre Susano Pinto, Maxim Neumann, Alexey Dosovitskiy, et al. A large-scale study of representation learning with the visual task adaptation benchmark. arXiv preprint arXiv:1910.04867, 2019. 6", + "[45] Gengwei Zhang, Liyuan Wang, Guoliang Kang, Ling Chen, and Yunchao Wei. Slca: Slow learner with classifier alignment for continual learning on a pre-trained model. arXiv preprint arXiv:2303.05118, 2023. 1, 2, 3, 4, 5, 6, 7", + "[46] Renrui Zhang, Ziyu Guo, Wei Zhang, Kunchang Li, Xupeng Miao, Bin Cui, Yu Qiao, Peng Gao, and Hongsheng Li. Pointclip: Point cloud understanding by clip. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8552-8562, 2022. 2", + "[47] Hengyuan Zhao, Hao Luo, Yuyang Zhao, Pichao Wang, Fan Wang, and Mike Zheng Shou. Revisit parameter-efficient transfer learning: A two-stage paradigm. arXiv preprint arXiv:2303.07910, 2023. 4", + "[48] Da-Wei Zhou, Qi-Wei Wang, Han-Jia Ye, and De-Chuan Zhan. A model or 603 exemplars: Towards memory-efficient class-incremental learning. arXiv preprint arXiv:2205.13218, 2022. 2, 8", + "[49] Da-Wei Zhou, Qi-Wei Wang, Zhi-Hong Qi, Han-Jia Ye, DeChuan Zhan, and Ziwei Liu. Deep class-incremental learning: A survey. arXiv preprint arXiv:2302.03648, 2023. 2", + "[50] Da-Wei Zhou, Han-Jia Ye, De-Chuan Zhan, and Ziwei Liu. Revisiting class-incremental learning with pre-trained models: Generalizability and adaptivity are all you need. arXiv preprint arXiv:2303.07338, 2023. 1, 2, 3, 5, 6, 7", + "[51] Kaiyang Zhou, Jingkang Yang, Chen Change Loy, and Ziwei Liu. Conditional prompt learning for vision-language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16816-16825, 2022. 2" + ], + "bbox": [ + 501, + 92, + 890, + 898 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "23261", + "bbox": [ + 478, + 945, + 517, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[52] Kaiyang Zhou, Jingkang Yang, Chen Change Loy, and Ziwei Liu. Learning to prompt for vision-language models. International Journal of Computer Vision, 130(9):2337-2348, 2022. 1, 2, 3, 6", + "[53] Qinhao Zhou, Xiang Xiang, and Jing Ma. Hierarchical task-incremental learning with feature-space initialization inspired by neural collapse. Neural Processing Letters, pages 1-17, 2023. 2", + "[54] Fei Zhu, Xu-Yao Zhang, Chuang Wang, Fei Yin, and Cheng-Lin Liu. Prototype augmentation and self-supervision for incremental learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5871-5880, 2021. 1, 2, 5" + ], + "bbox": [ + 78, + 90, + 468, + 273 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "23262", + "bbox": [ + 478, + 945, + 517, + 955 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/2024/Semantically-Shifted Incremental Adapter-Tuning is A Continual ViTransformer/3e66199c-eb4c-4d3b-89c8-b97d56ae08e1_model.json b/2024/Semantically-Shifted Incremental Adapter-Tuning is A Continual ViTransformer/3e66199c-eb4c-4d3b-89c8-b97d56ae08e1_model.json new file mode 100644 index 0000000000000000000000000000000000000000..73bdb6ad3346de309eaa5f450f33bc3141378973 --- /dev/null +++ b/2024/Semantically-Shifted Incremental Adapter-Tuning is A Continual ViTransformer/3e66199c-eb4c-4d3b-89c8-b97d56ae08e1_model.json @@ -0,0 +1,2290 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.0, + 0.812, + 0.045 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.131, + 0.895, + 0.155 + ], + "angle": 0, + "content": "Semantically-Shifted Incremental Adapter-Tuning is A Continual ViTransformer" + }, + { + "type": "text", + "bbox": [ + 0.093, + 0.181, + 0.556, + 0.236 + ], + "angle": 0, + "content": "Yuwen Tan*, Qinhao Zhou*, Xiang Xiang*† \nSchool of Artificial Intelligence and Automation, Huazhong University of Science and Tech., Wuhan, China" + }, + { + "type": "text", + "bbox": [ + 0.59, + 0.181, + 0.877, + 0.235 + ], + "angle": 0, + "content": "Ke Wang, Yuchuan Wu, Yongbin Li \nDAMO Academy, \nAlibaba Group, Beijing, China" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.269, + 0.314, + 0.285 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.301, + 0.474, + 0.633 + ], + "angle": 0, + "content": "Class-incremental learning (CIL) aims to enable models to continuously learn new classes while overcoming catastrophic forgetting. The introduction of pre-trained models has brought new tuning paradigms to CIL. In this paper, we revisit different parameter-efficient tuning (PET) methods within the context of continual learning. We observe that adapter tuning demonstrates superiority over prompt-based methods, even without parameter expansion in each learning session. Motivated by this, we propose incrementally tuning the shared adapter without imposing parameter update constraints, enhancing the learning capacity of the backbone. Additionally, we employ feature sampling from stored prototypes to retrain a unified classifier, further improving its performance. We estimate the semantic shift of old prototypes without access to past samples and update stored prototypes session by session. Our proposed method eliminates model expansion and avoids retaining any image samples. It surpasses previous pre-trained model-based CIL methods and demonstrates remarkable continual learning capabilities. Experimental results on five CIL benchmarks validate the effectiveness of our approach, achieving state-of-the-art (SOTA) performance." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.661, + 0.21, + 0.677 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.686, + 0.47, + 0.868 + ], + "angle": 0, + "content": "In traditional deep learning, the model can access all the data at once and learning is performed on a static dataset. However, in real-life applications, data usually arrives in a stream format with new classes, requiring the model to learn continuously, known as class-incremental learning (CIL). The primary objective of CIL is to enable the model to learn continuously from non-stationary data streams, facilitating adaptation to new classes and mitigating catastrophic forgetting [7]. A number of methods [28, 34, 54] have been devoted to alleviating catastrophic forgetting. Those methods can be mainly divided into replay-based [2, 3, 28], regularization-based [1, 17, 43], and isolation-based meth" + }, + { + "type": "image", + "bbox": [ + 0.513, + 0.269, + 0.697, + 0.395 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.696, + 0.272, + 0.882, + 0.395 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.396, + 0.893, + 0.465 + ], + "angle": 0, + "content": "Figure 1. Comparison of different parameter-efficient tuning CIL baselines on CIFAR100 dataset. Left: The relationship between the average accuracy of the incremental sessions and the number of tunable parameters. Right: The average performance of old classes and new classes for each PET method." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.476, + 0.893, + 0.521 + ], + "angle": 0, + "content": "ods [23, 24, 30]. However, all these methods assume that models are trained from scratch while ignoring the generalization ability of a strong pre-trained model [5] in the CIL." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.523, + 0.895, + 0.704 + ], + "angle": 0, + "content": "Pre-trained vision transformer models [5] have demonstrated excellent performance on various vision tasks. Recently, it has been explored in the field of CIL and continues to receive considerable attention [37, 38, 45, 50]. Due to the powerful representation capabilities of pre-trained models, CIL methods based on pre-trained models achieve significant performance improvements compared to traditional SOTA methods which are trained from scratch. CIL with a pre-trained model typically fixes the pre-trained model to retain the generalizability and adds a few additional training parameters such as adapter [4], prompt [15] and SSF [22], which is referred to as parameter-efficient tuning (PET)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.705, + 0.896, + 0.902 + ], + "angle": 0, + "content": "Inspired by language-based intelligence, current research in CIL is primarily focused on the prompt-based method [31, 37, 52]. Typically, these approaches require the construction of a pool of task-specific prompts during the training phase which increases storage overhead. Additionally, selecting prompts during the testing stage incurs additional computational costs. Other PET methods as well as fully fine-tuning are still in exploration in the context of CIL. Recently, SLCA [45] proposes fine-tuning the entire ViT and classifier incrementally with different learning rates. However, fine-tuning the entire pre-trained model requires substantial computational resources. In addition, Adam [50] initially explores the application of other PET" + }, + { + "type": "page_footnote", + "bbox": [ + 0.094, + 0.875, + 0.468, + 0.887 + ], + "angle": 0, + "content": "*Equal contribution, co-first author; also with Nat. Key Lab of MSIPT." + }, + { + "type": "page_footnote", + "bbox": [ + 0.099, + 0.888, + 0.469, + 0.9 + ], + "angle": 0, + "content": "†Correspondence to xex@hust.edu.cn; also with Peng Cheng Lab." + }, + { + "type": "list", + "bbox": [ + 0.094, + 0.875, + 0.469, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "23252" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.471, + 0.198 + ], + "angle": 0, + "content": "methods in CIL using first-session adaptation and branch fusion. Training in the first stage and subsequently freezing the model can reduce training time but result in lower accuracy for subsequent new classes. Our linear probing results reveal that the first-session adaptation is insufficient when there is a significant domain discrepancy between downstream data and the pre-trained model." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.205, + 0.474, + 0.508 + ], + "angle": 0, + "content": "In this paper, we first revisit different PET methods within the CIL paradigm. As shown in Fig. 1, we observe that adapter tuning [4] is a better continual learner than prompt-tuning [15] and SSF-tuning [22]. When progressively fine-tuning the prompt and SSF parameters, the forgetting of old classes is catastrophic. In comparison, adapter tuning effectively balances learning new classes and maintaining performance in old classes. Unlike prompt-based methods, which require constructing a prompt pool, adapter tuning avoids catastrophic forgetting even sharing the same parameters across learning sessions. Additionally, the adapter balances the number of tuning parameters and model performance compared to fully fine-tuning. Moreover, unlike previous methods that use feature distillation loss to restrict changes in shared parameters as part of overall loss, we analyze that tuning with constraints hinders continual learning from the perspective of parameter sensitivity. Therefore, we train the adapter and task-specific classifier without parameter regularization in each session, allowing for greater plasticity in learning new classes." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.516, + 0.472, + 0.728 + ], + "angle": 0, + "content": "As we only train the local classifier in each learning session, we propose to adopt a new classifier retraining method [32, 45, 54] to further improve the CIL performance. First, we implicitly compute the semantic shift [42] of previous prototypes which leverages the semantic shift of current task samples to estimate the change of old classes. Then, we sample several features according to the updated prototypes to retrain the classifier which is more effective than previous methods. The advantages of our proposed method can be summarized as follows: 1) Fine-tuning adapters significantly reduces training costs and improves learning efficiency; 2) We do not need to retain any image samples; 3) The accuracy for new classes is relatively high which verifies the continual learning capacity of the model." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.735, + 0.472, + 0.903 + ], + "angle": 0, + "content": "In summary, our proposed learning framework has the following main contributions: (1) Different from various devotion into the prompt-based methods for CIL, we discover that incrementally tuning adapter is a better continual learner even without constructing an adapter-pool; (2) After each session adaptation with local classifier, we propose to retrain a unified classifier with the semantic shift compensated prototypes which can further improve the performance; (3) Extensive experimental results on five CIL benchmarks demonstrate the superiority of the proposed simple but effective methods which achieves the SOTA." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.09, + 0.643, + 0.107 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.115, + 0.753, + 0.132 + ], + "angle": 0, + "content": "2.1. Class-incremental Learning" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.138, + 0.895, + 0.457 + ], + "angle": 0, + "content": "Class-incremental learning requires the model to be continuously updated with new class instances while retaining old knowledge [49]. Traditional CIL methods can be categorized into replay-based [2, 3, 28], regularization-based [17, 40, 43, 53], and parameter isolation-based methods [23, 24, 30]. Replay-based methods involve retaining or generating samples of previous classes and incorporating them into the current training phase. These methods often employ strategies for sample selection or sample generation to effectively replay past information. Regularization-based methods add constraints or penalties in the learning process which limit the update of the parameters that are important for old classes. Isolation-based methods aim to isolate and update task-specific parameters. By focusing on updating only a subset of parameters, these methods can mitigate catastrophic forgetting. To expand the representative capacity of a model without compromising its existing knowledge, methods for expanding the network have been proposed [34, 41, 48]. These methods dynamically extend the feature extraction network, combined with the replay-based method, achieving dramatic performance improvements." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.466, + 0.749, + 0.483 + ], + "angle": 0, + "content": "2.2. Parameter-Efficient Tuning" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.489, + 0.895, + 0.779 + ], + "angle": 0, + "content": "Parameter-Efficient Tuning can be considered as a transfer learning method. It refers to not performing full fine-tuning on a pre-trained model, instead inserting and fine-tuning specific sub-modules within the network. This approach is initially demonstrated to have effective transfer learning results in NLP [13, 14, 19, 20]. Recently, similar approaches have been applied to vision transformer models as well. AdaptFormer [4] inserts lightweight modules after the MLP layers in the attention module and has been found to outperform full fine-tuning on action recognition benchmarks. Another PET approach SSF [22] surprisingly outperforms other methods in certain tasks even with a smaller number of parameters. Inspired by the prompt approach used in the language model, VPT [15] applies it to visual models and achieves impressive results across various downstream tasks while only introducing a small number of additional parameters. Furthermore, the prompt-based method has also been used in vision-language models [27, 46, 51, 52] to improve performance on various downstream tasks." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.787, + 0.873, + 0.803 + ], + "angle": 0, + "content": "2.3. Continual Learning on a Pre-trained Model" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.81, + 0.894, + 0.903 + ], + "angle": 0, + "content": "The aforementioned CIL methods all involve training the model from scratch, while CIL with pre-trained model [35, 39, 50, 52] has gained much attention due to its strong feature representation ability. L2P [52] utilizes the pretrained model and learns a set of extra prompts dynamically to guide the model to solve corresponding tasks. Du" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "23253" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.125, + 0.092, + 0.347, + 0.107 + ], + "angle": 0, + "content": "(I) Incremental Adapter Tuning" + }, + { + "type": "image", + "bbox": [ + 0.12, + 0.109, + 0.437, + 0.403 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.452, + 0.092, + 0.662, + 0.107 + ], + "angle": 0, + "content": "(II) Semantic Shift Estimation" + }, + { + "type": "image", + "bbox": [ + 0.452, + 0.1, + 0.852, + 0.282 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.452, + 0.284, + 0.668, + 0.299 + ], + "angle": 0, + "content": "(III) Unified Classifier Training" + }, + { + "type": "image", + "bbox": [ + 0.458, + 0.301, + 0.849, + 0.405 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.407, + 0.892, + 0.435 + ], + "angle": 0, + "content": "Figure 2. The framework of our proposed method. Left: The illustration of the structure of ViT and adapter. The adapter and local classifier are incrementally trained in each session using the Eq. 4. Right: The process of retraining the classifier with semantic shift estimation." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.443, + 0.473, + 0.671 + ], + "angle": 0, + "content": "alPrompt [37] proposes to learn of two mutually unrelated prompt spaces: the general prompt and the expert prompt. It encodes task-invariant instructions and task-specific instructions, respectively. CODAPrompt [31] introduces a decomposed attention-based continual learning prompting method, which offers a larger learning capacity than existing prompt-based methods [37, 52]. SLCA [45] explores the fine-tuning paradigm of the pre-trained models, setting different learning rates for backbone and classifiers, and gains excellent performance. Adam [50] proposes to construct the classifier by merging the embeddings of a pretrained model and an adapted downstream model. LAE [8] proposes a unified framework that calibrates the adaptation speed of tuning modules and ensembles PET modules to accomplish predictions." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.685, + 0.212, + 0.703 + ], + "angle": 0, + "content": "3. Methodology" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.71, + 0.208, + 0.727 + ], + "angle": 0, + "content": "3.1. Preliminary" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.735, + 0.473, + 0.903 + ], + "angle": 0, + "content": "Class-incremental learning formulation: We first introduce the definition of CIL. Consider a neural network \\(\\mathcal{M}_{\\theta} = f_{\\theta_{cls}}(\\mathcal{F}_{\\theta_{bne}}(\\cdot))\\) with trainable parameters \\(\\theta = \\{\\theta_{bne},\\theta_{cls}\\}\\). \\(\\mathcal{F}_{\\theta_{bne}}\\) represents the feature extraction backbone which extracts features from input images and \\(f_{\\theta_{cls}}\\) stands for the classification layer that projects feature representations to class predictions. In CIL setting, \\(\\mathcal{M}_{\\theta}\\) needs to learn a series of sessions from training data \\(D_{t} = \\{(x_{1}^{t},y_{1}^{t}),(x_{2}^{t},y_{2}^{t}),\\ldots \\}, t = 1,\\ldots ,T\\) and satisfy the condition \\(Y(i)\\cap Y(j) = \\emptyset ,i\\neq j\\) where \\(Y(i)\\) represent the label set in session \\(i\\). The goal of \\(\\mathcal{M}_{\\theta}\\) is to perform well" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.443, + 0.892, + 0.473 + ], + "angle": 0, + "content": "on test sets that contain all the classes learned denoted as \\(\\mathcal{Y} = Y(1) \\cup \\ldots \\cup Y(t)\\) after \\(t\\)-th session." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.474, + 0.893, + 0.596 + ], + "angle": 0, + "content": "Parameter-efficient tuning with Adapter: An adapter is a bottleneck structure [4] that can be incorporated into a pre-trained transformer-based network to facilitate transfer learning and enhance the performance of downstream tasks. An adapter typically consists of a downsampled MLP layer \\( W_{down} \\in \\mathbb{R}^{d \\times d} \\), a non-linear activation function \\( \\sigma \\), and an upsampled MLP layer \\( W_{up} \\in \\mathbb{R}^{d \\times d} \\). Denote the input as \\( x_{i} \\), we formalize the adapter as" + }, + { + "type": "equation", + "bbox": [ + 0.568, + 0.606, + 0.891, + 0.623 + ], + "angle": 0, + "content": "\\[\no u t = x _ {i} + s \\cdot \\sigma \\left(x _ {i} * W _ {\\text {d o w n}}\\right) * W _ {u p}, \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.624, + 0.891, + 0.654 + ], + "angle": 0, + "content": "where \\(*\\) stands for the matrix multiplication, \\(\\sigma\\) denotes the activation function RELU, and \\(s\\) denotes the scale factor." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.655, + 0.892, + 0.73 + ], + "angle": 0, + "content": "Parameter-efficient tuning with SSF: SSF [22] modulates pre-trained models using scale and shift factors to align the feature distribution of downstream tasks. SSF inserts its layers in each transformer operation. Suppose \\( x_{i} \\) is the output of one of the modules, SSF can be represented as" + }, + { + "type": "equation", + "bbox": [ + 0.638, + 0.74, + 0.891, + 0.756 + ], + "angle": 0, + "content": "\\[\ny = \\gamma \\odot x _ {i} + \\beta , \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.765, + 0.891, + 0.796 + ], + "angle": 0, + "content": "where \\(\\gamma \\in \\mathbb{R}^d\\) and \\(\\beta \\in \\mathbb{R}^d\\) denote the scale and shift factor, respectively. \\(\\odot\\) stands for Hadamard product." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.796, + 0.893, + 0.903 + ], + "angle": 0, + "content": "Parameter-efficient tuning with VPT: Visual Prompt Tuning (VPT) inserts a small number of trainable parameters in the input space after the embedding layer [15]. It is called prompts and only these parameters will be updated in the fine-tuning process. Depending on the number of layers inserted, VPT can be categorized as VPT-shallow and VPT-deep. Suppose \\( P = \\{p^k \\in R^d | 1 \\leq k \\leq n\\} \\) and the input" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "23254" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.393, + 0.106 + ], + "angle": 0, + "content": "embedding is \\(x\\), VPT will combine \\(x\\) with \\(P\\) as" + }, + { + "type": "equation", + "bbox": [ + 0.231, + 0.119, + 0.469, + 0.136 + ], + "angle": 0, + "content": "\\[\nx ^ {\\prime} = [ x, P ], \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.141, + 0.47, + 0.172 + ], + "angle": 0, + "content": "where \\( n \\) is the number of prompts and the \\( x' \\) will be passed into subsequent blocks." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.182, + 0.469, + 0.199 + ], + "angle": 0, + "content": "3.2. Adapter-tuning without parameter constraints" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.206, + 0.47, + 0.537 + ], + "angle": 0, + "content": "Most of the work based on pre-trained models focuses on how to apply the prompt-tuning strategies to the CIL paradigm. However, tuning the same prompt parameters across each learning session will cause catastrophic forgetting. As shown in Fig. 1, when progressively training the shared extra module while keeping the pre-trained model fixed, the adapter demonstrates its superiority over other tuning methods such as prompt-tuning and SSF. Fine-tuning the shared adapter incrementally seems to well balance the learning of new classes and old-knowledge retaining. Based on this observation, we delve deeper into incremental adapter tuning and use it as our baseline. The whole framework of the proposed method is shown in Fig. 2. Some methods [25, 47] adopt the first-session adaption and then fix the backbone. In addition, previous methods often utilize knowledge distillation [12] (KD) loss to restrict parameter changes of the feature extractor to mitigate forgetting. Totally different from earlier methods [17, 21, 28], we propose that the shared adapter should be tuned incrementally without parameter constraints. Next, we will provide a detailed description of the proposed baseline and offer a reasonable explanation and analysis." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.538, + 0.47, + 0.659 + ], + "angle": 0, + "content": "Implementation of adapter-based baselines: During incremental training sessions, only adapter and classifier layers are updated, and the pre-trained ViT model is frozen. As the cosine classifier has shown great success in CIL, we follow ALICE [26] to use the cosine classifier with a margin. The margin hyper-parameter could also be used as a balance factor to decide the learning and retaining. The training loss can be formulated as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.091, + 0.661, + 0.469, + 0.717 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} ^ {t} = - \\frac {1}{N ^ {t}} \\sum_ {j = 1} ^ {N ^ {t}} \\log \\frac {e ^ {s \\left(\\cos \\theta_ {j} ^ {i} - m\\right)}}{e ^ {s \\left(\\cos \\theta_ {j} ^ {i} - m\\right)} + \\sum_ {c = 1} ^ {Y (t) - \\{i \\}} e ^ {s \\left(\\cos \\theta_ {j} ^ {c}\\right)}} \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.717, + 0.469, + 0.764 + ], + "angle": 0, + "content": "where \\( \\cos\\theta_{j}^{i} = \\frac{w_{i}*f_{j}}{||w_{i}||*||f_{j}||} \\), \\( N^t \\) denotes the number of training samples of the current session, \\( s \\) and \\( m \\) represent the scale factor and margin factor, respectively." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.765, + 0.469, + 0.902 + ], + "angle": 0, + "content": "As we do not retain any image samples, the gradients computed during the optimization of current samples not only affect the newly trained classifiers but also have an impact on the previously learned classifiers. The forgetting of the classifier is significant when no samples are retained. Thus, we follow previous work [8, 36, 45] to adopt the local training loss where we only compute the loss between current logits and labels and hinder the gradient updates of the previous classifier which alleviates the classifier forgetting." + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.091, + 0.695, + 0.215 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.695, + 0.091, + 0.885, + 0.215 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.216, + 0.892, + 0.258 + ], + "angle": 0, + "content": "Figure 3. Comparison of the performance on ImageNetR dataset with different extent of parameter constraints. Left: The overall accuracy of each session. Right: The accuracy of new classes." + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.261, + 0.698, + 0.389 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.698, + 0.262, + 0.886, + 0.388 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.391, + 0.89, + 0.432 + ], + "angle": 0, + "content": "Figure 4. Parameter sensitivity analysis on the ImageNetR dataset. Left: The parameter sensitiveness of two incremental tasks. Right: The sensitiveness of different parameters in one task." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.437, + 0.892, + 0.497 + ], + "angle": 0, + "content": "Analysis of the adapter-based baseline: We will analyze why the adapter shows its superiority in the CIL over other PET methods, and why we choose to incrementally tune the shared adapter without parameter constraints." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.498, + 0.893, + 0.784 + ], + "angle": 0, + "content": "First, we elaborate on why incrementally tuning the adapter is better in the context of CIL. By utilizing the residual structure, the adapter can retain the generalization capabilities from the pre-trained model while adapting to new tasks. The incremental tuning of the adapter exhibits a cumulative learning capability, where the representational capacity of the adapter is further enhanced as the learning sessions progress. In contrast, both SSF and prompt tuning have limitations when it comes to handling CIL. These methods suffer from overfitting to the current distribution. When the shared parameters excessively overfit each current task, the model gradually loses its generalization ability which is harmful for training a unified model for CIL. Then, we try to utilize KD loss to implicitly limit parameter updates and adjust the weighting factor. As shown in Fig. 3, the results demonstrate that unconstrained training is more beneficial for new-classes learning and improving overall performance. Based on this observation, we propose our proposition from the perspective of parameter sensitivity." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.785, + 0.892, + 0.829 + ], + "angle": 0, + "content": "Proposition 1: Confining the change of parameters of previous tasks hinders the plasticity of new classes due to the similarity of parameter sensitivity among tasks." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.83, + 0.892, + 0.875 + ], + "angle": 0, + "content": "Proof: Given the parameter set \\(\\theta = \\{\\theta_1, \\theta_2, \\dots, \\theta_N\\}\\) and training set \\(D_t = (X_t, Y_t)\\) in \\(t\\)-th session, the definition of parameter sensitivity [9, 47] is defined as" + }, + { + "type": "equation", + "bbox": [ + 0.578, + 0.885, + 0.891, + 0.903 + ], + "angle": 0, + "content": "\\[\ns _ {i} ^ {t} = \\mathcal {L} \\left(X _ {t}, Y _ {t} \\mid \\theta_ {i}\\right) - \\mathcal {L} \\left(X _ {t}, Y _ {t} \\mid \\theta_ {i} ^ {*}\\right), \\tag {5}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.946, + 0.518, + 0.957 + ], + "angle": 0, + "content": "23255" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.47, + 0.136 + ], + "angle": 0, + "content": "where \\(\\theta_{i}^{*} = \\theta_{i} + \\Delta \\theta_{i}\\) and \\(\\mathcal{L}\\) denotes the optimized loss in the classification task. We use the first-order Taylor expansion, and the parameter sensitivity can be rewritten as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.174, + 0.14, + 0.469, + 0.171 + ], + "angle": 0, + "content": "\\[\ns _ {i} = - g _ {i} \\Delta \\theta_ {i} = - \\frac {\\delta \\mathcal {L}}{\\delta \\theta_ {i}} * \\Delta \\theta_ {i}, \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.181, + 0.471, + 0.365 + ], + "angle": 0, + "content": "as \\(\\Delta \\theta_{i}\\) denotes the update after the training process, we follow the work [9] to use the one-step update to approximate the \\(\\Delta \\theta_{i} = \\epsilon \\frac{\\delta\\mathcal{L}}{\\delta\\theta_{i}}\\). Therefore, the parameter can be approximately computed as \\(s_i \\approx -\\epsilon \\left(\\frac{\\delta\\mathcal{L}}{\\delta\\theta_i}\\right)^2\\). As shown in Fig. 4, the sensitivity values of tuning parameters for two different sessions are nearly equal and the most sensitive parameters are always up weights. This means that constraining the parameter update would hinder the learning of new classes and further impede the ability of the model for continual learning. Furthermore, in the experimental section, we demonstrate the representative capacity of the adapter continued to strengthen through incremental tuning." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.374, + 0.47, + 0.391 + ], + "angle": 0, + "content": "3.3. Semantic shift estimation without past samples" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.397, + 0.47, + 0.623 + ], + "angle": 0, + "content": "Due to the selective updating of classifiers corresponding to the current task during training, the classifiers across different learning sessions are not fully aligned in the same feature space. To further optimize classifiers, we store the prototypes after training the backbone and local classifier. However, as the backbone is trained incrementally with new classes, the feature distribution of old classes undergoes changes. Retraining the classifier with the previous prototypes is sub-optimal. Since the feature representability of the backbone updates over time, using outdated features may not effectively retrain a unified classifier. To solve this problem, we update the feature distribution of old classes by computing the semantic shift over the learning process. We follow SDC [42] to estimate the semantic shift of old prototypes without access to past samples." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.624, + 0.47, + 0.699 + ], + "angle": 0, + "content": "Suppose \\(\\varphi_c^t\\) denotes the prototype of category \\(c\\) in session \\(t\\) and \\(r\\) is the learning session that the category belongs to. We have no access to the samples of category \\(c\\) to update the prototype in session \\(t\\) (when \\(t > r\\)). The semantic shift of class \\(c\\) between two sessions can be represented as" + }, + { + "type": "equation", + "bbox": [ + 0.117, + 0.703, + 0.469, + 0.746 + ], + "angle": 0, + "content": "\\[\n\\Delta_ {c} ^ {r \\rightarrow t} = \\varphi_ {c} ^ {t} - \\varphi_ {c} ^ {r}, \\quad \\varphi_ {c} ^ {r} = \\frac {1}{N _ {r} ^ {c}} \\sum_ {n = 1} ^ {N _ {r} ^ {c}} \\mathcal {F} \\left(X _ {r} ^ {c}, \\theta_ {r}\\right). \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.756, + 0.469, + 0.817 + ], + "angle": 0, + "content": "While we do not have access to data from the old class \\( c \\), we can only estimate the shift of current task categories on old and new models. The semantic shift of current samples between two sessions can be represented as" + }, + { + "type": "equation", + "bbox": [ + 0.201, + 0.826, + 0.469, + 0.845 + ], + "angle": 0, + "content": "\\[\n\\delta_ {i} ^ {t - 1 \\rightarrow t} = e _ {i} ^ {t} - e _ {i} ^ {t - 1}, \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.856, + 0.47, + 0.901 + ], + "angle": 0, + "content": "where \\( e \\) denotes the embedding of one sample in the current task \\( t \\). We can compute \\( e_i^{t - 1} \\) at the start of the current task with the model trained in task \\( t - 1 \\). After training" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.09, + 0.892, + 0.123 + ], + "angle": 0, + "content": "on the new task, we compute \\(\\delta_i^{t - 1\\to t}\\) and use it to estimate \\(\\Delta_c^{t - 1\\to t}\\). We compute the shift as" + }, + { + "type": "equation", + "bbox": [ + 0.585, + 0.127, + 0.892, + 0.188 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\widetilde {\\Delta} _ {c} ^ {t - 1 \\rightarrow t} = \\frac {\\sum \\alpha_ {i} \\delta_ {i} ^ {t - 1 \\rightarrow t}}{\\sum \\alpha_ {i}}, c \\notin C ^ {t}, \\tag {9} \\\\ \\alpha_ {i} = \\mathbf {e} ^ {- \\frac {| | e _ {i} ^ {t - 1} - \\varphi_ {c} ^ {t - 1} | |}{2 \\sigma^ {2}}}, \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.201, + 0.892, + 0.247 + ], + "angle": 0, + "content": "where \\(\\sigma\\) is the standard deviation of the distribution of class \\(c\\); \\(C^t\\) denotes classes learned in the current session. Before retraining the classifier, we update the prototypes with" + }, + { + "type": "equation", + "bbox": [ + 0.57, + 0.256, + 0.892, + 0.292 + ], + "angle": 0, + "content": "\\[\n\\left\\{\\begin{array}{l l}\\varphi_ {c} = \\varphi_ {c} ^ {t - 1} + \\widetilde {\\Delta} _ {c} ^ {t - 1 \\rightarrow t}&, c \\notin C ^ {t}\\\\\\varphi_ {c} = \\frac {1}{N _ {c}} \\sum_ {i} e _ {c}&, c \\in C ^ {t},\\end{array}\\right. \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.299, + 0.836, + 0.314 + ], + "angle": 0, + "content": "where \\(N_{c}\\) denotes the number of images in class \\(c\\)." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.325, + 0.731, + 0.341 + ], + "angle": 0, + "content": "3.4. Unified classifier training" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.349, + 0.892, + 0.621 + ], + "angle": 0, + "content": "Previous work [32, 45, 54] has attempted to retrain a unified classifier by modeling each class as a Gaussian distribution and sampling features from the distribution. We refer to this method as classifier alignment (CA) and adopt a similar approach that incorporates semantic shift estimation, which we denote as SSCA. Specifically, we compute the class prototypes \\( P_{c} = \\{\\varphi_{1},\\dots,\\varphi_{C}\\} \\) and covariance \\( \\Sigma_{c} = \\{\\varsigma_{1},\\dots,\\varsigma_{C}\\} \\) for each class after training process in each learning session. The calculation of class prototypes is based on Eq. 10. Due to the capability of the trained backbone network to provide well-distributed representations, each class exhibits an unimodal distribution. Therefore, we form a normal distribution \\( \\mathcal{N}(\\mu_c,\\Sigma_c) \\) for each class with class prototype and variance. We sample features \\( \\mathcal{V}_c = \\{v_{c,1},\\dots v_{c,S_n}\\} \\) from the distribution to obtain diverse samples, where \\( S_{n} \\) is the number of the sample features for each class. Then, we use these features to train classification layers \\( \\theta_{cls} \\) with a commonly used cross-entropy loss as" + }, + { + "type": "equation", + "bbox": [ + 0.537, + 0.626, + 0.891, + 0.667 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} \\left(\\theta_ {c l s}, \\mathcal {V} _ {c}\\right) = - \\sum_ {i = 1} ^ {S _ {n} * C} \\log \\frac {\\mathbf {e} ^ {\\left(\\theta_ {c l s} ^ {j} \\left(v _ {i}\\right)\\right)}}{\\sum_ {k \\in C} \\mathbf {e} ^ {\\left(\\theta_ {c l s} ^ {k} \\left(v _ {i}\\right)\\right)}}, \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.67, + 0.891, + 0.702 + ], + "angle": 0, + "content": "where \\( C \\) denotes all classes learned so far. We normalize the features and classifier the same as backbone training." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.716, + 0.633, + 0.733 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.741, + 0.799, + 0.756 + ], + "angle": 0, + "content": "4.1. Datasets and Evaluation Protocols" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.765, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Dataset: We evaluate our method on four commonly-used CIL benchmarks and one cross-domain CIL dataset. We randomly split the dataset into 10 or 20 learning tasks. CIFAR100 [18] is a widely used dataset in CIL which consists of 60000 images, belonging to 100 different categories. CUB200 [33] is a dataset that contains approximately 11,788 images of 200 bird species with fine-grained class labels. Additionally, we also follow recent work [45, 50] to use the other three datasets which have a large" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.946, + 0.521, + 0.957 + ], + "angle": 0, + "content": "23256" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.093, + 0.089, + 0.881, + 0.278 + ], + "angle": 0, + "content": "
MethodParamsSplit-ImageNetRSplit-ImageNetACUB200CIFAR100
\\( A_{Last} \\uparrow \\)\\( A_{Avg} \\uparrow \\)\\( A_{Last} \\uparrow \\)\\( A_{Avg} \\uparrow \\)\\( A_{Last} \\uparrow \\)\\( A_{avg} \\uparrow \\)\\( A_{Last} \\uparrow \\)\\( A_{avg} \\uparrow \\)
Joint86M81.72±0.35-50.56±1.75-88.17±0.32-89.71±0.07-
FT86M20.93±0.8640.35±0.746.03±4.7416.57±5.822.05±1.6945.67±2.0422.17±1.0941.83±1.60
SLCA [45]86M79.35±0.2883.29±0.4661.05±0.6368.88±2.3184.68±0.0990.77±0.7991.26±0.3794.29±0.92
Adam-adapter [50]1.19M65.79±0.9872.42±1.4148.81±0.0858.84±1.3785.84±0.0891.33±0.4987.29±0.2791.21±1.33
Adam-ssf [50]0.2M66.61±0.0974.36±1.0048.94±0.1458.79±2.8285.67±0.1590.99±0.7685.27±0.2189.90±0.98
Adam-prompt [50]0.04M65.29±1.5272.97±0.5629.29±7.4239.14±7.5985.28±0.4790.89±0.8685.04±1.0489.49±0.58
LAE [8]0.19M72.29±0.1477.99±0.4647.18±1.1758.15±0.7380.97±0.5187.22±1.2185.25±0.4389.80±1.20
L2P [38]0.04M72.34±0.1777.36±0.6444.04±0.9351.24±2.2667.02±1.9079.62±1.6084.06±0.8888.26±1.34
ADA [6]1.19M73.76±0.2779.57±0.8450.16±0.2059.43±2.2076.13±0.9485.74±0.2688.25±0.2691.85±1.32
DualPrompt [37]0.25M69.10±0.6274.28±0.6653.19±0.7464.59±0.0868.48±0.4780.59±1.5086.93±0.2491.13±0.32
CODAPrompt [31]3.84M73.31±0.5078.47±0.5352.08±0.1263.92±0.1277.23±1.1281.90±0.8583.21±3.3987.71±3.17
SSIAT (Ours)1.19M79.38±0.5983.63±0.4362.43±1.6370.83±1.6388.75±0.3893.00±0.9091.35±0.2694.35±0.60
" + }, + { + "type": "table_caption", + "bbox": [ + 0.093, + 0.281, + 0.877, + 0.294 + ], + "angle": 0, + "content": "Table 1. Experimental results on four CIL benchmarks. All other methods are reproduced using the same seeds for a fair comparison." + }, + { + "type": "image", + "bbox": [ + 0.087, + 0.295, + 0.287, + 0.423 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.284, + 0.295, + 0.484, + 0.423 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.484, + 0.295, + 0.682, + 0.423 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.682, + 0.295, + 0.882, + 0.423 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.425, + 0.893, + 0.453 + ], + "angle": 0, + "content": "Figure 5. The performance of each learning session on four datasets. (a) ImageNetR; (b) ImageNetA; (c) CUB200; (d) CIFAR100. These curves are plotted by calculating the average performance across three different seeds for each incremental session." + }, + { + "type": "table", + "bbox": [ + 0.082, + 0.466, + 0.472, + 0.594 + ], + "angle": 0, + "content": "
MethodImageNetRImageNetA
\\( A_{Last} \\uparrow \\)\\( A_{Avg} \\uparrow \\)\\( A_{Last} \\uparrow \\)\\( A_{Avg} \\uparrow \\)
SLCA [45]74.63±1.5579.92±1.2936.69±21.3156.35±7.09
Adam-adapter[50]57.42±0.8464.75±0.7948.65±0.1259.55±1.07
Adam-ssf[50]64.30±0.9472.42±1.4747.27±4.3458.36±4.70
Adam-prompt[50]59.90±1.1368.02±1.0229.93±4.8839.13±4.19
LAE [8]69.86±0.4377.38±0.6139.52±0.7851.75±2.15
L2P [38]69.64±0.4275.28±0.5740.48±1.7849.62±1.46
DualPrompt [37]66.61±0.5872.45±0.3742.28±1.9453.39±1.64
CODAPrompt [31]69.96±0.5075.34±0.8544.62±1.9254.86±0.50
SSIAT (Ours)75.67±0.1482.30±0.3659.16±1.0368.45±1.92
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.598, + 0.47, + 0.627 + ], + "angle": 0, + "content": "Table 2. Experimental results for long-sequences (20 incremental sessions) on ImageNetR and ImageNetA dataset." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.64, + 0.47, + 0.791 + ], + "angle": 0, + "content": "domain gap with pre-training data. ImageNetR [10] consists of 30,000 images with 200 categories. Although its categories overlap with ImageNet-21K [29], the images belong to a different domain. ImageNetA [11] is a real-world dataset that consists of 200 categories. This dataset exhibits significant class imbalance, with some categories having only a few training samples. VTAB [44] is a complex dataset that consists of 19 tasks covering a broad spectrum of domains and semantics. We follow previous work [50] to select 5 tasks to construct a cross-domain CIL dataset." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.795, + 0.471, + 0.903 + ], + "angle": 0, + "content": "Implementation details: We use ViT-B/16 [5] as the pre-trained model, which is pre-trained on ImageNet-21K [29]. The initial learning rate is set as 0.01 and we use the cosine Anneal scheduler. In our experiments, we train the first session for 20 epochs and 10 epochs for later sessions. Following previous papers [45, 50], we use common evaluation metrics in CIL. Specifically, we report the last session" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.469, + 0.893, + 0.545 + ], + "angle": 0, + "content": "accuracy \\(\\mathcal{A}_{Last}\\) and average accuracy of the whole incremental sessions \\(\\mathcal{A}_{Avg} = \\frac{1}{T}\\sum_{i=1}^{T}\\mathcal{A}_i\\). We utilize three different seeds to generate three different class orders for evaluating various methods. We report the mean and standard deviation based on the three experiments. See codes1." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.553, + 0.689, + 0.57 + ], + "angle": 0, + "content": "4.2. Experiment Results" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.576, + 0.892, + 0.681 + ], + "angle": 0, + "content": "For a fair comparison, we compare our methods with SOTA CIL methods based on the pre-trained vision transformer model. We compare our methods with prompt-based methods L2P [52], DualPrompt [37], CODAPrompt [31], finetuning methods SLCA [45], and adapter-based method [6, 8, 50]. Tab. 1 shows \\(\\mathcal{A}_{Avg}\\) and \\(\\mathcal{A}_{Last}\\) with three different seeds on four CIL benchmarks." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.682, + 0.893, + 0.879 + ], + "angle": 0, + "content": "CUB200 & CIFAR100: We first report the results of each method on the CUB200 and CIFAR100 datasets. Since these two datasets overlap with the pre-training data, methods based on a pre-trained model achieve a huge improvement in performance compared with methods that are trained from scratch. For example, as shown in Tab. 1, the average accuracy on L2P, DualPrompt, and CODAPrompt reached \\(88.26\\%\\), \\(91.13\\%\\), and \\(87.71\\%\\) on CIFAR100, respectively. Nevertheless, our method still outperforms those prompt-based methods. Besides, our method does not require the construction of a prompt pool which allows each task to learn specific prompt parameters. The adapter is shared across tasks and our method avoids the parameter" + }, + { + "type": "page_footnote", + "bbox": [ + 0.518, + 0.887, + 0.788, + 0.9 + ], + "angle": 0, + "content": "1https://github.com/HAIV-Lab/SSIAT" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "23257" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.082, + 0.089, + 0.468, + 0.182 + ], + "angle": 0, + "content": "
MethodSes.1Ses.2Ses.3Ses.4Ses.5Avg↑
Adam-adapter[50]87.6086.0789.1482.7284.3585.97
Adam-ssf[50]89.6088.2189.9480.5082.3886.13
Adam-vpt[50]90.2087.5789.6980.3982.1886.01
SLCA[45]94.8092.4393.5493.9894.3393.82
LAE [8]97.9985.2679.6878.7874.3683.21
SSIAT (Ours)96.1092.7194.0993.6894.5094.21
" + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.187, + 0.468, + 0.214 + ], + "angle": 0, + "content": "Table 3. Experimental results for different methods on VTAB dataset which contain 5 datasets from different domains." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.219, + 0.468, + 0.384 + ], + "angle": 0, + "content": "expansion with tasks increasing. Even though the Adam-adapter/SSF/prompt only needs to train in the first stage which requires less training time, the performance of those methods is inferior to our proposed method. Although the performance of SLCA is comparable to our method in CIFAR100, the number of tuning parameters of our method is much smaller. Besides that, the average performance of our method on CUB200 is \\(93.00\\%\\), nearly \\(2.3\\%\\) improvement over SLCA. Fig. 5 (c) (d) shows the incremental accuracy of each session on CUB200 and CIFAR100 and our method is always at the top of all lines in the incremental process." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.385, + 0.468, + 0.641 + ], + "angle": 0, + "content": "ImageNetR & ImageNetA: We report the performance on ImageNetR and ImageNetA in Tab. 1. These two datasets are more difficult due to the domain gap with the pre-training data. It can be seen that the performance of each method on these two datasets is lower than CIFAR100 and CUB200. Besides, we can see that SLCA outperforms other previous methods significantly on these two datasets. Notably, SLCA achieves an impressive last accuracy on ImageNetR, surpassing the other methods. In contrast, our method achieves SOTA-level performance on both datasets with fewer tuning parameters. Based on Fig. 5, the performance of our method is slightly higher than SLCA in several learning sessions with fewer tuning parameters on the ImageNetR dataset. On the ImageNetA dataset, our method achieves the last accuracy of \\(62.43\\%\\), surpassing SLCA by \\(1.39\\%\\). The average accuracy across all sessions is \\(70.83\\%\\), showing a \\(2\\%\\) improvement." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.644, + 0.468, + 0.869 + ], + "angle": 0, + "content": "Additionally, we evaluate the performance of each method under the condition of long sequences. In this setting, each session consists of only 10 classes, and the results are summarized in Tab. 2. Our method also maintains excellent performance in terms of \\( \\mathcal{A}_{Last} \\) and \\( \\mathcal{A}_{Avg} \\). The performance of SLCA is highly dependent on the class order in which the training data appears, resulting in a substantial variance in \\( \\mathcal{A}_{Last} \\) on ImageNetA. In contrast, the Adam-based methods remain relatively stable in long-sequence settings. For Adam-SSF, the long sequence only leads to a nearly \\( 2\\% \\) performance drop in ImageNetR. However, for SLCA, its performance drops by \\( 5\\% \\) on ImageNetR and nearly \\( 10\\% \\) on ImageNetA. In comparison, our method demonstrates excellent stability on long sequences and outperforms other methods by a large margin." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.87, + 0.468, + 0.901 + ], + "angle": 0, + "content": "VTAB: VTAB is a cross-domain CIL dataset where each task provides training data from a different domain. Based" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.198 + ], + "angle": 0, + "content": "on the results presented in Tab. 3, it can be observed that both SLCA and our method perform well in cross-domain CIL. Specifically, in the last incremental stage, our method achieves an accuracy that is \\(12\\%\\) higher than the Adam-based methods. Adam-based methods only perform finetuning in the first task and are not able to adapt well to subsequent tasks on the cross-domain dataset." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.21, + 0.653, + 0.226 + ], + "angle": 0, + "content": "4.3. Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.234, + 0.892, + 0.446 + ], + "angle": 0, + "content": "Baselines with different PET methods: Tab. 4 shows the results of baselines with three different parameter-efficient tuning methods in each incremental session. It can be observed that the pre-trained model with an adapter achieves the best performance in terms of both the last session accuracy and average accuracy. Fig. 1 demonstrates that tuning with an adapter achieves a better balance between learning new classes and retaining knowledge of old classes. Both VPT-deep and SSF methods tend to prioritize learning new categories, which leads to increased forgetting of previously learned categories. Although VPT-shallow performs well on CIFAR, its limited parameters hinder the model from incrementally learning new classes on ImageNetR. More results on the other datasets can be found in the Supp." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.447, + 0.892, + 0.674 + ], + "angle": 0, + "content": "Unified classifier retraining vs. Separate local classifier: As we train separate task-specific classifiers in each incremental session, we propose to retrain the classifier to find the optimal decision boundary for all the classes. Tab. 5 displays the ablation experiments of the classifier re-trained on ImageNetA which is the most difficult benchmark. It can be observed that whether it is a linear or a cosine classifier, retraining the classifier leads to a significant performance improvement. Additionally, incorporating the computation of prototype semantic shifts further enhances the performance by an additional \\(2\\%\\) in the cosine classifier. Compared to the classifier alignment methods that do not involve computing updated prototypes, our method demonstrates its superiority as the incremental stages progress. More results on the other datasets can be found in the Supp." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.675, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Progressively tuning vs. first session adaptation: Tab. 6 shows the linear probing results of different adaption ways. After finishing the training of the last session, we freeze the pre-trained backbone and only train the classifier using all the samples. It is evident that not performing tuning and solely freezing the pre-trained model leads to the worst performance, regardless of the dataset. First-session adaptation proves to be a good choice as it reduces training time and works well for datasets like CIFAR100 and CUB200. However, for datasets such as ImageNetA and ImageNetR, which have significant domain gaps from the pre-trained model, relying solely on first-session adaptation is suboptimal. By continuously fine-tuning the adapter, we observe that the backbone exhibits stronger representability compared to only tuning in the first session." + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.946, + 0.518, + 0.957 + ], + "angle": 0, + "content": "23258" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.097, + 0.089, + 0.877, + 0.178 + ], + "angle": 0, + "content": "
PET MethodParamsSes.1Ses.2Ses.3Ses.4Ses.5Ses.6Ses.7Ses.8Ses.9Ses.10Avg↑
SSF [22]0.2M98.5091.9088.5785.0283.9278.7077.7977.8973.0274.9183.03
VPT-deep [15]0.046M97.6069.1568.7056.6055.5648.8755.9756.0553.4855.2161.72
VPT-shallow [15]0.004M98.4092.9588.8092.0687.2686.3785.6485.3185.3685.1088.72
Adapter [4]1.19M98.5095.3591.6091.0890.9290.0889.8089.6288.9889.2991.52
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.181, + 0.895, + 0.21 + ], + "angle": 0, + "content": "Table 4. Experimental results for baselines with different efficient tuning methods on CIFAR100. We report the overall performance of each session and the average performance." + }, + { + "type": "table", + "bbox": [ + 0.11, + 0.216, + 0.861, + 0.336 + ], + "angle": 0, + "content": "
ClassifierMethodSes.1Ses.2Ses.3Ses.4Ses.5Ses.6Ses.7Ses.8Ses.9Ses.10Avg↑
Linearw/o CA74.6568.3763.9058.8258.0255.4854.0352.8951.6252.1358.99
w/ CA74.6571.5967.9364.2462.0860.9059.0357.3256.4156.8563.10
w/ SSCA74.6570.9267.6463.9162.6560.9660.3858.5558.1357.7763.55
Cosinew/o CA82.6677.7872.2067.6366.0163.1859.9759.3558.9357.9166.56
w/ CA82.6679.7074.5670.4068.1965.6663.4061.7760.7059.7868.68
w/ SSCA82.6680.6075.9172.4171.5669.0166.1064.6063.0062.4370.83
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.34, + 0.895, + 0.369 + ], + "angle": 0, + "content": "Table 5. Ablation results for unified classifier training and semantic shift estimation on ImageNetA. We report the overall performance of each session and the average performance. We run the experiments with three seeds and reported the average performance." + }, + { + "type": "table", + "bbox": [ + 0.084, + 0.377, + 0.468, + 0.456 + ], + "angle": 0, + "content": "
MethodCIFARImageNetRImageNetACUB
No-Adapt.86.0868.4233.7186.77
First-Adapt.91.3378.0263.5389.27
All-Adapt.92.5782.0265.9689.86
Δ↑1.24%4.00%2.43%0.59%
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.461, + 0.47, + 0.502 + ], + "angle": 0, + "content": "Table 6. Linear probing results of different training ways on four datasets. We retrain the classifier using all the data on the fixed-trained backbone." + }, + { + "type": "table", + "bbox": [ + 0.082, + 0.506, + 0.469, + 0.576 + ], + "angle": 0, + "content": "
StructureParamsCIFARImageNetRImageNetA
AdaptMLP-P [4]1.19M94.35±0.6083.63±0.4370.83±1.63
AdaptMLP-S [4]1.19M94.16±0.8883.19±0.4771.00±1.52
Convpass [16]1.63M94.08±0.9983.64±0.3569.96±1.09
Adapter [13]2.38M94.26±0.9183.65±0.5070.94±1.42
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.58, + 0.47, + 0.609 + ], + "angle": 0, + "content": "Table 7. Experimental results of different adapter structures. We report the average performance and standard deviation." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.614, + 0.469, + 0.765 + ], + "angle": 0, + "content": "Different structures of the adapter: In this paper, we follow AdaptFormer [4] to use parallel adapterMLP as the adapter structure. We also delve deeper into different adapter structures such as Adapter [13] and Convpass [16]. Although these different tuning structures may exhibit performance differences under static settings, the performance differences among those adapter structures are minimal in the context of CIL shown in Tab. 7. This offers us the flexibility to employ various adapter structures within the context of the CIL paradigm." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.765, + 0.47, + 0.902 + ], + "angle": 0, + "content": "Comparison to traditional CIL methods: We conduct evaluations by comparing our approach to SOTA traditional CIL methods shown in Tab. 8. We replace the Resnet backbone with the pre-trained ViT model for fair comparison. The results indicate that the performance of iCaRL tends to be inferior compared to SOTA model expansion methods and our proposed method, even when past samples are stored. It can be observed that methods such as Foster and Der, which dynamically expand feature extraction net" + }, + { + "type": "table", + "bbox": [ + 0.514, + 0.377, + 0.876, + 0.463 + ], + "angle": 0, + "content": "
MethodImageNetRImageNetA
\\( A_{Last} \\uparrow \\)\\( A_{Avg} \\uparrow \\)\\( A_{Last} \\uparrow \\)\\( A_{Avg} \\uparrow \\)
iCaRL [28]61.70±0.5671.34±0.6729.32±2.3640.11±1.36
Foster [34]75.87±0.3881.54±0.8212.44±17.4517.01±20.44
Der [41]75.63±0.8681.13±0.1138.43±2.3946.43±3.29
Memo [48]65.38±0.9073.80±0.8628.45±2.3740.27±1.22
SSIAT (Ours)79.38±0.5983.63±0.4362.43±1.6370.83±1.63
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.466, + 0.892, + 0.495 + ], + "angle": 0, + "content": "Table 8. Comparison to traditional CIL methods on ImageNetR and ImageNetA dataset." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.497, + 0.892, + 0.589 + ], + "angle": 0, + "content": "works, achieve impressive results on ImageNetR. The average accuracy of these methods is only \\(2\\%\\) lower than our method. However, on ImageNetA, where there are few-shot samples for many classes, these methods exhibit low performance. More ablation experiments related to hyperparameters can be found in the supp." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.6, + 0.62, + 0.616 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.627, + 0.892, + 0.824 + ], + "angle": 0, + "content": "Class-incremental learning on a pre-trained model has received significant attention in recent years. In this paper, we first revisit different PET methods in the context of CIL. Then, we propose that incrementally tuning the shared adapter and local classifier without constraints exhibits less forgetting and gains plasticity for learning new classes. Moreover, to train a unified classifier, we calculate the semantic shift of old prototypes and retrain the classifier using updated prototypes in each session. The proposed method eliminates the need for constructing an adapter pool and avoids retaining any image samples. Experimental results on five benchmarks demonstrate the effectiveness of our method which achieves the SOTA performance." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.825, + 0.894, + 0.901 + ], + "angle": 0, + "content": "Acknowledgement. This research was supported by Natural Science Fund of Hubei Province (Grant # 2022CFB823), Alibaba Innovation Research program under Grant Contract # CRAQ7WHZ11220001-20978282, and HUST Independent Innovation Research Fund (Grant # 2021XXJS096)." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "23259" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.079, + 0.09, + 0.176, + 0.107 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.473, + 0.184 + ], + "angle": 0, + "content": "[1] Rahaf Aljundi, Francesca Babiloni, Mohamed Elhoseiny, Marcus Rohrbach, and Tinne Tuytelaars. Memory aware synapses: Learning what (not) to forget. In Proceedings of the European conference on computer vision (ECCV), pages 139-154, 2018. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.185, + 0.472, + 0.255 + ], + "angle": 0, + "content": "[2] Jihwan Bang, Heesu Kim, YoungJoon Yoo, Jung-Woo Ha, and Jonghyun Choi. Rainbow memory: Continual learning with a memory of diverse samples. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 8218-8227, 2021. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.256, + 0.472, + 0.325 + ], + "angle": 0, + "content": "[3] Arslan Chaudhry, Puneet K Dokania, Thalaiyasingam Ajthan, and Philip HS Torr. Riemannian walk for incremental learning: Understanding forgetting and intransigence. In Proceedings of the European conference on computer vision (ECCV), pages 532-547, 2018. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.326, + 0.472, + 0.394 + ], + "angle": 0, + "content": "[4] Shoufa Chen, Chongjian Ge, Zhan Tong, Jiangliu Wang, Yibing Song, Jue Wang, and Ping Luo. Adaptformer: Adapting vision transformers for scalable visual recognition. Advances in Neural Information Processing Systems, 35:16664-16678, 2022. 1, 2, 3, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.395, + 0.472, + 0.478 + ], + "angle": 0, + "content": "[5] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929, 2020. 1, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.48, + 0.472, + 0.535 + ], + "angle": 0, + "content": "[6] Beyza Ermis, Giovanni Zappella, Martin Wistuba, Aditya Rawal, and Cedric Archambeau. Memory efficient continual learning with transformers. Advances in Neural Information Processing Systems, 35:10629-10642, 2022. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.536, + 0.472, + 0.576 + ], + "angle": 0, + "content": "[7] Robert M French. Catastrophic forgetting in connectionist networks. Trends in cognitive sciences, 3(4):128-135, 1999. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.578, + 0.472, + 0.634 + ], + "angle": 0, + "content": "[8] Qiankun Gao, Chen Zhao, Yifan Sun, Teng Xi, Gang Zhang, Bernard Ghanem, and Jian Zhang. A unified continual learning framework with general parameter-efficient tuning. arXiv preprint arXiv:2303.10070, 2023. 3, 4, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.635, + 0.472, + 0.676 + ], + "angle": 0, + "content": "[9] Haoyu He, Jianfei Cai, Jing Zhang, Dacheng Tao, and Bohan Zhuang. Sensitivity-aware visual parameter-efficient tuning. arXiv preprint arXiv:2303.08566, 2023. 4, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.677, + 0.472, + 0.76 + ], + "angle": 0, + "content": "[10] Dan Hendrycks, Steven Basart, Norman Mu, Saurav Kadavath, Frank Wang, Evan Dorundo, Rahul Desai, Tyler Zhu, Samyak Parajuli, Mike Guo, et al. The many faces of robustness: A critical analysis of out-of-distribution generalization. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 8340-8349, 2021. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.761, + 0.472, + 0.816 + ], + "angle": 0, + "content": "[11] Dan Hendrycks, Kevin Zhao, Steven Basart, Jacob Steinhardt, and Dawn Song. Natural adversarial examples. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15262-15271, 2021. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.817, + 0.472, + 0.858 + ], + "angle": 0, + "content": "[12] Geoffrey Hinton, Oriol Vinyals, and Jeff Dean. Distilling the knowledge in a neural network. arXiv preprint arXiv:1503.02531, 2015. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.859, + 0.472, + 0.901 + ], + "angle": 0, + "content": "[13] Neil Houlsby, Andrei Giurgiu, Stanislaw Jastrzebski, Bruna Morrone, Quentin De Laroussilhe, Andrea Gesmundo, Mona Attariyan, and Sylvain Gelly. Parameter-efficient transfer" + }, + { + "type": "list", + "bbox": [ + 0.079, + 0.115, + 0.473, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.892, + 0.121 + ], + "angle": 0, + "content": "learning for nlp. In International Conference on Machine Learning, pages 2790-2799. PMLR, 2019. 2, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.122, + 0.894, + 0.178 + ], + "angle": 0, + "content": "[14] Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. Lora: Low-rank adaptation of large language models. arXiv preprint arXiv:2106.09685, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.179, + 0.894, + 0.234 + ], + "angle": 0, + "content": "[15] Menglin Jia, Luming Tang, Bor-Chun Chen, Claire Cardie, Serge Belongie, Bharath Hariharan, and Ser-Nam Lim. Visual prompt tuning. In European Conference on Computer Vision, pages 709-727. Springer, 2022. 1, 2, 3, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.235, + 0.894, + 0.276 + ], + "angle": 0, + "content": "[16] Shibo Jie and Zhi-Hong Deng. Convolutional bypasses are better vision transformer adapters. arXiv preprint arXiv:2207.07039, 2022.8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.278, + 0.892, + 0.361 + ], + "angle": 0, + "content": "[17] James Kirkpatrick, Razvan Pascanu, Neil Rabinowitz, Joel Veness, Guillaume Desjardins, Andrei A Rusu, Kieran Milan, John Quan, Tiago Ramalho, Agnieszka Grabska-Barwinska, et al. Overcoming catastrophic forgetting in neural networks. Proceedings of the national academy of sciences, 114(13):3521-3526, 2017. 1, 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.362, + 0.892, + 0.391 + ], + "angle": 0, + "content": "[18] Alex Krizhevsky, Geoffrey Hinton, et al. Learning multiple layers of features from tiny images. 2009. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.392, + 0.892, + 0.432 + ], + "angle": 0, + "content": "[19] Brian Lester, Rami Al-Rfou, and Noah Constant. The power of scale for parameter-efficient prompt tuning. arXiv preprint arXiv:2104.08691, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.434, + 0.892, + 0.476 + ], + "angle": 0, + "content": "[20] Xiang Lisa Li and Percy Liang. Prefix-tuning: Optimizing continuous prompts for generation. arXiv preprint arXiv:2101.00190, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.477, + 0.892, + 0.518 + ], + "angle": 0, + "content": "[21] Zhizhong Li and Derek Hoiem. Learning without forgetting. IEEE transactions on pattern analysis and machine intelligence, 40(12):2935-2947, 2017. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.52, + 0.892, + 0.576 + ], + "angle": 0, + "content": "[22] Dongze Lian, Daquan Zhou, Jiashi Feng, and Xinchao Wang. Scaling & shifting your features: A new baseline for efficient model tuning. Advances in Neural Information Processing Systems, 35:109-123, 2022. 1, 2, 3, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.577, + 0.892, + 0.632 + ], + "angle": 0, + "content": "[23] Arun Mallya and Svetlana Lazebnik. Packet: Adding multiple tasks to a single network by iterative pruning. In Proceedings of the IEEE conference on Computer Vision and Pattern Recognition, pages 7765-7773, 2018. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.633, + 0.892, + 0.702 + ], + "angle": 0, + "content": "[24] Arun Mallya, Dillon Davis, and Svetlana Lazebnik. Piggyback: Adapting a single network to multiple tasks by learning to mask weights. In Proceedings of the European conference on computer vision (ECCV), pages 67-82, 2018. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.704, + 0.892, + 0.76 + ], + "angle": 0, + "content": "[25] Aristeidis Panos, Yuriko Kobe, Daniel Olmeda Reino, Rahaf Aljundi, and Richard E Turner. First session adaptation: A strong replay-free baseline for class-incremental learning. arXiv preprint arXiv:2303.13199, 2023. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.761, + 0.892, + 0.816 + ], + "angle": 0, + "content": "[26] Can Peng, Kun Zhao, Tianren Wang, Meng Li, and Brian C Lovell. Few-shot class-incremental learning from an open-set perspective. In European Conference on Computer Vision, pages 382-397. Springer, 2022. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.817, + 0.892, + 0.9 + ], + "angle": 0, + "content": "[27] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021. 2" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.521, + 0.957 + ], + "angle": 0, + "content": "23260" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.161 + ], + "angle": 0, + "content": "[28] Sylvestre-Alvise Rebuffi, Alexander Kolesnikov, Georg Sperl, and Christoph H Lampert. icarl: Incremental classifier and representation learning. In Proceedings of the IEEE conference on Computer Vision and Pattern Recognition, pages 2001-2010, 2017. 1, 2, 4, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.164, + 0.472, + 0.234 + ], + "angle": 0, + "content": "[29] Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, et al. Imagenet large scale visual recognition challenge. International journal of computer vision, 115:211-252, 2015. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.235, + 0.471, + 0.29 + ], + "angle": 0, + "content": "[30] Joan Serra, Didac Suris, Marius Miron, and Alexandros Karatzoglou. Overcoming catastrophic forgetting with hard attention to the task. In International conference on machine learning, pages 4548-4557. PMLR, 2018. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.292, + 0.471, + 0.387 + ], + "angle": 0, + "content": "[31] James Seale Smith, Leonid Karlinsky, Vyshnavi Gutta, Paola Cascante-Bonilla, Donghyun Kim, Assaf Arbelle, Rameswar Panda, Rogerio Feris, and Zsolt Kira. Coda-prompt: Continual decomposed attention-based prompting for rehearsal-free continual learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11909-11919, 2023. 1, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.39, + 0.471, + 0.457 + ], + "angle": 0, + "content": "[32] Yu-Ming Tang, Yi-Xing Peng, and Wei-Shi Zheng. When prompt-based incremental learning does not meet strong pretraining. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 1706-1716, 2023. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.461, + 0.469, + 0.502 + ], + "angle": 0, + "content": "[33] Catherine Wah, Steve Branson, Peter Welinder, Pietro Perona, and Serge Belongie. The caltech-ucsd birds-200-2011 dataset. 2011. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.505, + 0.47, + 0.56 + ], + "angle": 0, + "content": "[34] Fu-Yun Wang, Da-Wei Zhou, Han-Jia Ye, and De-Chuan Zhan. Foster: Feature boosting and compression for class incremental learning. In European conference on computer vision, pages 398–414. Springer, 2022. 1, 2, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.562, + 0.47, + 0.617 + ], + "angle": 0, + "content": "[35] Yabin Wang, Zhiwu Huang, and Xiaopeng Hong. S-prompts learning with pre-trained transformers: An occam's razor for domain incremental learning. Advances in Neural Information Processing Systems, 35:5682-5695, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.619, + 0.47, + 0.687 + ], + "angle": 0, + "content": "[36] Yabin Wang, Zhiheng Ma, Zhiwu Huang, Yaowei Wang, Zhou Su, and Xiaopeng Hong. Isolation and impartial aggregation: A paradigm of incremental learning without interference. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 10209-10217, 2023. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.69, + 0.47, + 0.771 + ], + "angle": 0, + "content": "[37] Zifeng Wang, Zizhao Zhang, Sayna Ebrahimi, Ruoxi Sun, Han Zhang, Chen-Yu Lee, Xiaqi Ren, Guolong Su, Vincent Perot, Jennifer Dy, et al. Dualprompt: Complementary prompting for rehearsal-free continual learning. In European Conference on Computer Vision, pages 631-648. Springer, 2022. 1, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.775, + 0.47, + 0.856 + ], + "angle": 0, + "content": "[38] Zifeng Wang, Zizhao Zhang, Chen-Yu Lee, Han Zhang, Ruoxi Sun, Xiaoqi Ren, Guolong Su, Vincent Perot, Jennifer Dy, and Tomas Pfister. Learning to prompt for continual learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 139-149, 2022. 1, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.859, + 0.47, + 0.902 + ], + "angle": 0, + "content": "[39] Tz-Ying Wu, Gurumurthy Swaminathan, Zhizhong Li, Avinash Ravichandran, Nuno Vasconcelos, Rahul Bhotika, and Stefano Soatto. Class-incremental learning with strong" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.472, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.892, + 0.134 + ], + "angle": 0, + "content": "pre-trained models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9601-9610, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.136, + 0.892, + 0.192 + ], + "angle": 0, + "content": "[40] Xiang Xiang, Yuwen Tan, Qian Wan, Jing Ma, Alan Yuille, and Gregory D Hager. Coarse-to-fine incremental few-shot learning. In European Conference on Computer Vision, pages 205-222. Springer, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.194, + 0.892, + 0.261 + ], + "angle": 0, + "content": "[41] Shipeng Yan, Jiangwei Xie, and Xuming He. Der: Dynamically expandable representation for class incremental learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3014-3023, 2021. 2, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.264, + 0.892, + 0.345 + ], + "angle": 0, + "content": "[42] Lu Yu, Bartlomiej Twardowski, Xialei Liu, Luis Herranz, Kai Wang, Yongmei Cheng, Shangling Jui, and Joost van de Weijer. Semantic drift compensation for class-incremental learning. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 6982-6991, 2020. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.349, + 0.892, + 0.403 + ], + "angle": 0, + "content": "[43] Friedemann Zenke, Ben Poole, and Surya Ganguli. Continual learning through synaptic intelligence. In International conference on machine learning, pages 3987-3995. PMLR, 2017. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.406, + 0.892, + 0.487 + ], + "angle": 0, + "content": "[44] Xiaohua Zhai, Joan Puigcerver, Alexander Kolesnikov, Pierre Ruyssen, Carlos Riquelme, Mario Lucic, Josip Djolonga, Andre Susano Pinto, Maxim Neumann, Alexey Dosovitskiy, et al. A large-scale study of representation learning with the visual task adaptation benchmark. arXiv preprint arXiv:1910.04867, 2019. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.49, + 0.892, + 0.545 + ], + "angle": 0, + "content": "[45] Gengwei Zhang, Liyuan Wang, Guoliang Kang, Ling Chen, and Yunchao Wei. Slca: Slow learner with classifier alignment for continual learning on a pre-trained model. arXiv preprint arXiv:2303.05118, 2023. 1, 2, 3, 4, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.548, + 0.892, + 0.616 + ], + "angle": 0, + "content": "[46] Renrui Zhang, Ziyu Guo, Wei Zhang, Kunchang Li, Xupeng Miao, Bin Cui, Yu Qiao, Peng Gao, and Hongsheng Li. Pointclip: Point cloud understanding by clip. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8552-8562, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.618, + 0.892, + 0.672 + ], + "angle": 0, + "content": "[47] Hengyuan Zhao, Hao Luo, Yuyang Zhao, Pichao Wang, Fan Wang, and Mike Zheng Shou. Revisit parameter-efficient transfer learning: A two-stage paradigm. arXiv preprint arXiv:2303.07910, 2023. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.675, + 0.892, + 0.729 + ], + "angle": 0, + "content": "[48] Da-Wei Zhou, Qi-Wei Wang, Han-Jia Ye, and De-Chuan Zhan. A model or 603 exemplars: Towards memory-efficient class-incremental learning. arXiv preprint arXiv:2205.13218, 2022. 2, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.732, + 0.892, + 0.773 + ], + "angle": 0, + "content": "[49] Da-Wei Zhou, Qi-Wei Wang, Zhi-Hong Qi, Han-Jia Ye, DeChuan Zhan, and Ziwei Liu. Deep class-incremental learning: A survey. arXiv preprint arXiv:2302.03648, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.775, + 0.892, + 0.83 + ], + "angle": 0, + "content": "[50] Da-Wei Zhou, Han-Jia Ye, De-Chuan Zhan, and Ziwei Liu. Revisiting class-incremental learning with pre-trained models: Generalizability and adaptivity are all you need. arXiv preprint arXiv:2303.07338, 2023. 1, 2, 3, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.832, + 0.892, + 0.899 + ], + "angle": 0, + "content": "[51] Kaiyang Zhou, Jingkang Yang, Chen Change Loy, and Ziwei Liu. Conditional prompt learning for vision-language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16816-16825, 2022. 2" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.946, + 0.518, + 0.957 + ], + "angle": 0, + "content": "23261" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.146 + ], + "angle": 0, + "content": "[52] Kaiyang Zhou, Jingkang Yang, Chen Change Loy, and Ziwei Liu. Learning to prompt for vision-language models. International Journal of Computer Vision, 130(9):2337-2348, 2022. 1, 2, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.149, + 0.47, + 0.203 + ], + "angle": 0, + "content": "[53] Qinhao Zhou, Xiang Xiang, and Jing Ma. Hierarchical task-incremental learning with feature-space initialization inspired by neural collapse. Neural Processing Letters, pages 1-17, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.206, + 0.47, + 0.275 + ], + "angle": 0, + "content": "[54] Fei Zhu, Xu-Yao Zhang, Chuang Wang, Fei Yin, and Cheng-Lin Liu. Prototype augmentation and self-supervision for incremental learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5871-5880, 2021. 1, 2, 5" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.275 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.946, + 0.519, + 0.957 + ], + "angle": 0, + "content": "23262" + } + ] +] \ No newline at end of file diff --git a/2024/Semantically-Shifted Incremental Adapter-Tuning is A Continual ViTransformer/3e66199c-eb4c-4d3b-89c8-b97d56ae08e1_origin.pdf b/2024/Semantically-Shifted Incremental Adapter-Tuning is A Continual ViTransformer/3e66199c-eb4c-4d3b-89c8-b97d56ae08e1_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..5272c0f4734d45746bfa3021ba63c0ae1664274c --- /dev/null +++ b/2024/Semantically-Shifted Incremental Adapter-Tuning is A Continual ViTransformer/3e66199c-eb4c-4d3b-89c8-b97d56ae08e1_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5289365d02bf0853c35273bf48c8a2e5381a25094bf281be68ae3d847245ace5 +size 3585132 diff --git a/2024/Semantically-Shifted Incremental Adapter-Tuning is A Continual ViTransformer/full.md b/2024/Semantically-Shifted Incremental Adapter-Tuning is A Continual ViTransformer/full.md new file mode 100644 index 0000000000000000000000000000000000000000..a2d1f1eb9ecf44aabfe89533a6d35a939590eab0 --- /dev/null +++ b/2024/Semantically-Shifted Incremental Adapter-Tuning is A Continual ViTransformer/full.md @@ -0,0 +1,333 @@ +# Semantically-Shifted Incremental Adapter-Tuning is A Continual ViTransformer + +Yuwen Tan*, Qinhao Zhou*, Xiang Xiang*† +School of Artificial Intelligence and Automation, Huazhong University of Science and Tech., Wuhan, China + +Ke Wang, Yuchuan Wu, Yongbin Li +DAMO Academy, +Alibaba Group, Beijing, China + +# Abstract + +Class-incremental learning (CIL) aims to enable models to continuously learn new classes while overcoming catastrophic forgetting. The introduction of pre-trained models has brought new tuning paradigms to CIL. In this paper, we revisit different parameter-efficient tuning (PET) methods within the context of continual learning. We observe that adapter tuning demonstrates superiority over prompt-based methods, even without parameter expansion in each learning session. Motivated by this, we propose incrementally tuning the shared adapter without imposing parameter update constraints, enhancing the learning capacity of the backbone. Additionally, we employ feature sampling from stored prototypes to retrain a unified classifier, further improving its performance. We estimate the semantic shift of old prototypes without access to past samples and update stored prototypes session by session. Our proposed method eliminates model expansion and avoids retaining any image samples. It surpasses previous pre-trained model-based CIL methods and demonstrates remarkable continual learning capabilities. Experimental results on five CIL benchmarks validate the effectiveness of our approach, achieving state-of-the-art (SOTA) performance. + +# 1. Introduction + +In traditional deep learning, the model can access all the data at once and learning is performed on a static dataset. However, in real-life applications, data usually arrives in a stream format with new classes, requiring the model to learn continuously, known as class-incremental learning (CIL). The primary objective of CIL is to enable the model to learn continuously from non-stationary data streams, facilitating adaptation to new classes and mitigating catastrophic forgetting [7]. A number of methods [28, 34, 54] have been devoted to alleviating catastrophic forgetting. Those methods can be mainly divided into replay-based [2, 3, 28], regularization-based [1, 17, 43], and isolation-based meth + +![](images/38761abf699db7fc658056c8941fcecba082ceb556f2f03f620f6f0d613fcd5c.jpg) +Figure 1. Comparison of different parameter-efficient tuning CIL baselines on CIFAR100 dataset. Left: The relationship between the average accuracy of the incremental sessions and the number of tunable parameters. Right: The average performance of old classes and new classes for each PET method. + +![](images/8b6416936343e222840143636fa49bcfddbb157082155ef743fd2c04bfba11a8.jpg) + +ods [23, 24, 30]. However, all these methods assume that models are trained from scratch while ignoring the generalization ability of a strong pre-trained model [5] in the CIL. + +Pre-trained vision transformer models [5] have demonstrated excellent performance on various vision tasks. Recently, it has been explored in the field of CIL and continues to receive considerable attention [37, 38, 45, 50]. Due to the powerful representation capabilities of pre-trained models, CIL methods based on pre-trained models achieve significant performance improvements compared to traditional SOTA methods which are trained from scratch. CIL with a pre-trained model typically fixes the pre-trained model to retain the generalizability and adds a few additional training parameters such as adapter [4], prompt [15] and SSF [22], which is referred to as parameter-efficient tuning (PET). + +Inspired by language-based intelligence, current research in CIL is primarily focused on the prompt-based method [31, 37, 52]. Typically, these approaches require the construction of a pool of task-specific prompts during the training phase which increases storage overhead. Additionally, selecting prompts during the testing stage incurs additional computational costs. Other PET methods as well as fully fine-tuning are still in exploration in the context of CIL. Recently, SLCA [45] proposes fine-tuning the entire ViT and classifier incrementally with different learning rates. However, fine-tuning the entire pre-trained model requires substantial computational resources. In addition, Adam [50] initially explores the application of other PET + +methods in CIL using first-session adaptation and branch fusion. Training in the first stage and subsequently freezing the model can reduce training time but result in lower accuracy for subsequent new classes. Our linear probing results reveal that the first-session adaptation is insufficient when there is a significant domain discrepancy between downstream data and the pre-trained model. + +In this paper, we first revisit different PET methods within the CIL paradigm. As shown in Fig. 1, we observe that adapter tuning [4] is a better continual learner than prompt-tuning [15] and SSF-tuning [22]. When progressively fine-tuning the prompt and SSF parameters, the forgetting of old classes is catastrophic. In comparison, adapter tuning effectively balances learning new classes and maintaining performance in old classes. Unlike prompt-based methods, which require constructing a prompt pool, adapter tuning avoids catastrophic forgetting even sharing the same parameters across learning sessions. Additionally, the adapter balances the number of tuning parameters and model performance compared to fully fine-tuning. Moreover, unlike previous methods that use feature distillation loss to restrict changes in shared parameters as part of overall loss, we analyze that tuning with constraints hinders continual learning from the perspective of parameter sensitivity. Therefore, we train the adapter and task-specific classifier without parameter regularization in each session, allowing for greater plasticity in learning new classes. + +As we only train the local classifier in each learning session, we propose to adopt a new classifier retraining method [32, 45, 54] to further improve the CIL performance. First, we implicitly compute the semantic shift [42] of previous prototypes which leverages the semantic shift of current task samples to estimate the change of old classes. Then, we sample several features according to the updated prototypes to retrain the classifier which is more effective than previous methods. The advantages of our proposed method can be summarized as follows: 1) Fine-tuning adapters significantly reduces training costs and improves learning efficiency; 2) We do not need to retain any image samples; 3) The accuracy for new classes is relatively high which verifies the continual learning capacity of the model. + +In summary, our proposed learning framework has the following main contributions: (1) Different from various devotion into the prompt-based methods for CIL, we discover that incrementally tuning adapter is a better continual learner even without constructing an adapter-pool; (2) After each session adaptation with local classifier, we propose to retrain a unified classifier with the semantic shift compensated prototypes which can further improve the performance; (3) Extensive experimental results on five CIL benchmarks demonstrate the superiority of the proposed simple but effective methods which achieves the SOTA. + +# 2. Related Work + +# 2.1. Class-incremental Learning + +Class-incremental learning requires the model to be continuously updated with new class instances while retaining old knowledge [49]. Traditional CIL methods can be categorized into replay-based [2, 3, 28], regularization-based [17, 40, 43, 53], and parameter isolation-based methods [23, 24, 30]. Replay-based methods involve retaining or generating samples of previous classes and incorporating them into the current training phase. These methods often employ strategies for sample selection or sample generation to effectively replay past information. Regularization-based methods add constraints or penalties in the learning process which limit the update of the parameters that are important for old classes. Isolation-based methods aim to isolate and update task-specific parameters. By focusing on updating only a subset of parameters, these methods can mitigate catastrophic forgetting. To expand the representative capacity of a model without compromising its existing knowledge, methods for expanding the network have been proposed [34, 41, 48]. These methods dynamically extend the feature extraction network, combined with the replay-based method, achieving dramatic performance improvements. + +# 2.2. Parameter-Efficient Tuning + +Parameter-Efficient Tuning can be considered as a transfer learning method. It refers to not performing full fine-tuning on a pre-trained model, instead inserting and fine-tuning specific sub-modules within the network. This approach is initially demonstrated to have effective transfer learning results in NLP [13, 14, 19, 20]. Recently, similar approaches have been applied to vision transformer models as well. AdaptFormer [4] inserts lightweight modules after the MLP layers in the attention module and has been found to outperform full fine-tuning on action recognition benchmarks. Another PET approach SSF [22] surprisingly outperforms other methods in certain tasks even with a smaller number of parameters. Inspired by the prompt approach used in the language model, VPT [15] applies it to visual models and achieves impressive results across various downstream tasks while only introducing a small number of additional parameters. Furthermore, the prompt-based method has also been used in vision-language models [27, 46, 51, 52] to improve performance on various downstream tasks. + +# 2.3. Continual Learning on a Pre-trained Model + +The aforementioned CIL methods all involve training the model from scratch, while CIL with pre-trained model [35, 39, 50, 52] has gained much attention due to its strong feature representation ability. L2P [52] utilizes the pretrained model and learns a set of extra prompts dynamically to guide the model to solve corresponding tasks. Du + +![](images/357d2bfef8d65c36341d1ff6b88ea039f1345d7805989618c7494ad3d9e309dc.jpg) +(I) Incremental Adapter Tuning + +![](images/43934528ae263193d23078904a8836507b8bcb4e088a6e5da5c03cfc045ccfbb.jpg) +(II) Semantic Shift Estimation +(III) Unified Classifier Training + +![](images/b5d9fafbba2e6239eee9791679047ae39d6413db2ec80a4e28445ecce8e73f16.jpg) +Figure 2. The framework of our proposed method. Left: The illustration of the structure of ViT and adapter. The adapter and local classifier are incrementally trained in each session using the Eq. 4. Right: The process of retraining the classifier with semantic shift estimation. + +alPrompt [37] proposes to learn of two mutually unrelated prompt spaces: the general prompt and the expert prompt. It encodes task-invariant instructions and task-specific instructions, respectively. CODAPrompt [31] introduces a decomposed attention-based continual learning prompting method, which offers a larger learning capacity than existing prompt-based methods [37, 52]. SLCA [45] explores the fine-tuning paradigm of the pre-trained models, setting different learning rates for backbone and classifiers, and gains excellent performance. Adam [50] proposes to construct the classifier by merging the embeddings of a pretrained model and an adapted downstream model. LAE [8] proposes a unified framework that calibrates the adaptation speed of tuning modules and ensembles PET modules to accomplish predictions. + +# 3. Methodology + +# 3.1. Preliminary + +Class-incremental learning formulation: We first introduce the definition of CIL. Consider a neural network $\mathcal{M}_{\theta} = f_{\theta_{cls}}(\mathcal{F}_{\theta_{bne}}(\cdot))$ with trainable parameters $\theta = \{\theta_{bne},\theta_{cls}\}$ . $\mathcal{F}_{\theta_{bne}}$ represents the feature extraction backbone which extracts features from input images and $f_{\theta_{cls}}$ stands for the classification layer that projects feature representations to class predictions. In CIL setting, $\mathcal{M}_{\theta}$ needs to learn a series of sessions from training data $D_{t} = \{(x_{1}^{t},y_{1}^{t}),(x_{2}^{t},y_{2}^{t}),\ldots \}, t = 1,\ldots ,T$ and satisfy the condition $Y(i)\cap Y(j) = \emptyset ,i\neq j$ where $Y(i)$ represent the label set in session $i$ . The goal of $\mathcal{M}_{\theta}$ is to perform well + +on test sets that contain all the classes learned denoted as $\mathcal{Y} = Y(1) \cup \ldots \cup Y(t)$ after $t$ -th session. + +Parameter-efficient tuning with Adapter: An adapter is a bottleneck structure [4] that can be incorporated into a pre-trained transformer-based network to facilitate transfer learning and enhance the performance of downstream tasks. An adapter typically consists of a downsampled MLP layer $W_{down} \in \mathbb{R}^{d \times d}$ , a non-linear activation function $\sigma$ , and an upsampled MLP layer $W_{up} \in \mathbb{R}^{d \times d}$ . Denote the input as $x_{i}$ , we formalize the adapter as + +$$ +o u t = x _ {i} + s \cdot \sigma \left(x _ {i} * W _ {\text {d o w n}}\right) * W _ {u p}, \tag {1} +$$ + +where $*$ stands for the matrix multiplication, $\sigma$ denotes the activation function RELU, and $s$ denotes the scale factor. + +Parameter-efficient tuning with SSF: SSF [22] modulates pre-trained models using scale and shift factors to align the feature distribution of downstream tasks. SSF inserts its layers in each transformer operation. Suppose $x_{i}$ is the output of one of the modules, SSF can be represented as + +$$ +y = \gamma \odot x _ {i} + \beta , \tag {2} +$$ + +where $\gamma \in \mathbb{R}^d$ and $\beta \in \mathbb{R}^d$ denote the scale and shift factor, respectively. $\odot$ stands for Hadamard product. + +Parameter-efficient tuning with VPT: Visual Prompt Tuning (VPT) inserts a small number of trainable parameters in the input space after the embedding layer [15]. It is called prompts and only these parameters will be updated in the fine-tuning process. Depending on the number of layers inserted, VPT can be categorized as VPT-shallow and VPT-deep. Suppose $P = \{p^k \in R^d | 1 \leq k \leq n\}$ and the input + +embedding is $x$ , VPT will combine $x$ with $P$ as + +$$ +x ^ {\prime} = [ x, P ], \tag {3} +$$ + +where $n$ is the number of prompts and the $x'$ will be passed into subsequent blocks. + +# 3.2. Adapter-tuning without parameter constraints + +Most of the work based on pre-trained models focuses on how to apply the prompt-tuning strategies to the CIL paradigm. However, tuning the same prompt parameters across each learning session will cause catastrophic forgetting. As shown in Fig. 1, when progressively training the shared extra module while keeping the pre-trained model fixed, the adapter demonstrates its superiority over other tuning methods such as prompt-tuning and SSF. Fine-tuning the shared adapter incrementally seems to well balance the learning of new classes and old-knowledge retaining. Based on this observation, we delve deeper into incremental adapter tuning and use it as our baseline. The whole framework of the proposed method is shown in Fig. 2. Some methods [25, 47] adopt the first-session adaption and then fix the backbone. In addition, previous methods often utilize knowledge distillation [12] (KD) loss to restrict parameter changes of the feature extractor to mitigate forgetting. Totally different from earlier methods [17, 21, 28], we propose that the shared adapter should be tuned incrementally without parameter constraints. Next, we will provide a detailed description of the proposed baseline and offer a reasonable explanation and analysis. + +Implementation of adapter-based baselines: During incremental training sessions, only adapter and classifier layers are updated, and the pre-trained ViT model is frozen. As the cosine classifier has shown great success in CIL, we follow ALICE [26] to use the cosine classifier with a margin. The margin hyper-parameter could also be used as a balance factor to decide the learning and retaining. The training loss can be formulated as follows: + +$$ +\mathcal {L} ^ {t} = - \frac {1}{N ^ {t}} \sum_ {j = 1} ^ {N ^ {t}} \log \frac {e ^ {s \left(\cos \theta_ {j} ^ {i} - m\right)}}{e ^ {s \left(\cos \theta_ {j} ^ {i} - m\right)} + \sum_ {c = 1} ^ {Y (t) - \{i \}} e ^ {s \left(\cos \theta_ {j} ^ {c}\right)}} \tag {4} +$$ + +where $\cos\theta_{j}^{i} = \frac{w_{i}*f_{j}}{||w_{i}||*||f_{j}||}$ , $N^t$ denotes the number of training samples of the current session, $s$ and $m$ represent the scale factor and margin factor, respectively. + +As we do not retain any image samples, the gradients computed during the optimization of current samples not only affect the newly trained classifiers but also have an impact on the previously learned classifiers. The forgetting of the classifier is significant when no samples are retained. Thus, we follow previous work [8, 36, 45] to adopt the local training loss where we only compute the loss between current logits and labels and hinder the gradient updates of the previous classifier which alleviates the classifier forgetting. + +![](images/ac25e42f8d7773a6e5121ab88de386b47d286589750d72ecca9801b378168139.jpg) +Figure 3. Comparison of the performance on ImageNetR dataset with different extent of parameter constraints. Left: The overall accuracy of each session. Right: The accuracy of new classes. + +![](images/75f5180e732ccfbe142d9d19964860bfccf8aa09cca2f33bd8a160c3c39dedc4.jpg) + +![](images/29a1640ab22aa0138ffadfc5931fc33b54b611ac45c2e33a46449ded5a59439a.jpg) +Figure 4. Parameter sensitivity analysis on the ImageNetR dataset. Left: The parameter sensitiveness of two incremental tasks. Right: The sensitiveness of different parameters in one task. + +![](images/9efc944cb5097dbecb40a0fc2adc8dc87415b8a5d0a488d8589e8ecbc9609644.jpg) + +Analysis of the adapter-based baseline: We will analyze why the adapter shows its superiority in the CIL over other PET methods, and why we choose to incrementally tune the shared adapter without parameter constraints. + +First, we elaborate on why incrementally tuning the adapter is better in the context of CIL. By utilizing the residual structure, the adapter can retain the generalization capabilities from the pre-trained model while adapting to new tasks. The incremental tuning of the adapter exhibits a cumulative learning capability, where the representational capacity of the adapter is further enhanced as the learning sessions progress. In contrast, both SSF and prompt tuning have limitations when it comes to handling CIL. These methods suffer from overfitting to the current distribution. When the shared parameters excessively overfit each current task, the model gradually loses its generalization ability which is harmful for training a unified model for CIL. Then, we try to utilize KD loss to implicitly limit parameter updates and adjust the weighting factor. As shown in Fig. 3, the results demonstrate that unconstrained training is more beneficial for new-classes learning and improving overall performance. Based on this observation, we propose our proposition from the perspective of parameter sensitivity. + +Proposition 1: Confining the change of parameters of previous tasks hinders the plasticity of new classes due to the similarity of parameter sensitivity among tasks. + +Proof: Given the parameter set $\theta = \{\theta_1, \theta_2, \dots, \theta_N\}$ and training set $D_t = (X_t, Y_t)$ in $t$ -th session, the definition of parameter sensitivity [9, 47] is defined as + +$$ +s _ {i} ^ {t} = \mathcal {L} \left(X _ {t}, Y _ {t} \mid \theta_ {i}\right) - \mathcal {L} \left(X _ {t}, Y _ {t} \mid \theta_ {i} ^ {*}\right), \tag {5} +$$ + +where $\theta_{i}^{*} = \theta_{i} + \Delta \theta_{i}$ and $\mathcal{L}$ denotes the optimized loss in the classification task. We use the first-order Taylor expansion, and the parameter sensitivity can be rewritten as follows: + +$$ +s _ {i} = - g _ {i} \Delta \theta_ {i} = - \frac {\delta \mathcal {L}}{\delta \theta_ {i}} * \Delta \theta_ {i}, \tag {6} +$$ + +as $\Delta \theta_{i}$ denotes the update after the training process, we follow the work [9] to use the one-step update to approximate the $\Delta \theta_{i} = \epsilon \frac{\delta\mathcal{L}}{\delta\theta_{i}}$ . Therefore, the parameter can be approximately computed as $s_i \approx -\epsilon \left(\frac{\delta\mathcal{L}}{\delta\theta_i}\right)^2$ . As shown in Fig. 4, the sensitivity values of tuning parameters for two different sessions are nearly equal and the most sensitive parameters are always up weights. This means that constraining the parameter update would hinder the learning of new classes and further impede the ability of the model for continual learning. Furthermore, in the experimental section, we demonstrate the representative capacity of the adapter continued to strengthen through incremental tuning. + +# 3.3. Semantic shift estimation without past samples + +Due to the selective updating of classifiers corresponding to the current task during training, the classifiers across different learning sessions are not fully aligned in the same feature space. To further optimize classifiers, we store the prototypes after training the backbone and local classifier. However, as the backbone is trained incrementally with new classes, the feature distribution of old classes undergoes changes. Retraining the classifier with the previous prototypes is sub-optimal. Since the feature representability of the backbone updates over time, using outdated features may not effectively retrain a unified classifier. To solve this problem, we update the feature distribution of old classes by computing the semantic shift over the learning process. We follow SDC [42] to estimate the semantic shift of old prototypes without access to past samples. + +Suppose $\varphi_c^t$ denotes the prototype of category $c$ in session $t$ and $r$ is the learning session that the category belongs to. We have no access to the samples of category $c$ to update the prototype in session $t$ (when $t > r$ ). The semantic shift of class $c$ between two sessions can be represented as + +$$ +\Delta_ {c} ^ {r \rightarrow t} = \varphi_ {c} ^ {t} - \varphi_ {c} ^ {r}, \quad \varphi_ {c} ^ {r} = \frac {1}{N _ {r} ^ {c}} \sum_ {n = 1} ^ {N _ {r} ^ {c}} \mathcal {F} \left(X _ {r} ^ {c}, \theta_ {r}\right). \tag {7} +$$ + +While we do not have access to data from the old class $c$ , we can only estimate the shift of current task categories on old and new models. The semantic shift of current samples between two sessions can be represented as + +$$ +\delta_ {i} ^ {t - 1 \rightarrow t} = e _ {i} ^ {t} - e _ {i} ^ {t - 1}, \tag {8} +$$ + +where $e$ denotes the embedding of one sample in the current task $t$ . We can compute $e_i^{t - 1}$ at the start of the current task with the model trained in task $t - 1$ . After training + +on the new task, we compute $\delta_i^{t - 1\to t}$ and use it to estimate $\Delta_c^{t - 1\to t}$ . We compute the shift as + +$$ +\begin{array}{l} \widetilde {\Delta} _ {c} ^ {t - 1 \rightarrow t} = \frac {\sum \alpha_ {i} \delta_ {i} ^ {t - 1 \rightarrow t}}{\sum \alpha_ {i}}, c \notin C ^ {t}, \tag {9} \\ \alpha_ {i} = \mathbf {e} ^ {- \frac {| | e _ {i} ^ {t - 1} - \varphi_ {c} ^ {t - 1} | |}{2 \sigma^ {2}}}, \\ \end{array} +$$ + +where $\sigma$ is the standard deviation of the distribution of class $c$ ; $C^t$ denotes classes learned in the current session. Before retraining the classifier, we update the prototypes with + +$$ +\left\{\begin{array}{l l}\varphi_ {c} = \varphi_ {c} ^ {t - 1} + \widetilde {\Delta} _ {c} ^ {t - 1 \rightarrow t}&, c \notin C ^ {t}\\\varphi_ {c} = \frac {1}{N _ {c}} \sum_ {i} e _ {c}&, c \in C ^ {t},\end{array}\right. \tag {10} +$$ + +where $N_{c}$ denotes the number of images in class $c$ . + +# 3.4. Unified classifier training + +Previous work [32, 45, 54] has attempted to retrain a unified classifier by modeling each class as a Gaussian distribution and sampling features from the distribution. We refer to this method as classifier alignment (CA) and adopt a similar approach that incorporates semantic shift estimation, which we denote as SSCA. Specifically, we compute the class prototypes $P_{c} = \{\varphi_{1},\dots,\varphi_{C}\}$ and covariance $\Sigma_{c} = \{\varsigma_{1},\dots,\varsigma_{C}\}$ for each class after training process in each learning session. The calculation of class prototypes is based on Eq. 10. Due to the capability of the trained backbone network to provide well-distributed representations, each class exhibits an unimodal distribution. Therefore, we form a normal distribution $\mathcal{N}(\mu_c,\Sigma_c)$ for each class with class prototype and variance. We sample features $\mathcal{V}_c = \{v_{c,1},\dots v_{c,S_n}\}$ from the distribution to obtain diverse samples, where $S_{n}$ is the number of the sample features for each class. Then, we use these features to train classification layers $\theta_{cls}$ with a commonly used cross-entropy loss as + +$$ +\mathcal {L} \left(\theta_ {c l s}, \mathcal {V} _ {c}\right) = - \sum_ {i = 1} ^ {S _ {n} * C} \log \frac {\mathbf {e} ^ {\left(\theta_ {c l s} ^ {j} \left(v _ {i}\right)\right)}}{\sum_ {k \in C} \mathbf {e} ^ {\left(\theta_ {c l s} ^ {k} \left(v _ {i}\right)\right)}}, \tag {11} +$$ + +where $C$ denotes all classes learned so far. We normalize the features and classifier the same as backbone training. + +# 4. Experiments + +# 4.1. Datasets and Evaluation Protocols + +Dataset: We evaluate our method on four commonly-used CIL benchmarks and one cross-domain CIL dataset. We randomly split the dataset into 10 or 20 learning tasks. CIFAR100 [18] is a widely used dataset in CIL which consists of 60000 images, belonging to 100 different categories. CUB200 [33] is a dataset that contains approximately 11,788 images of 200 bird species with fine-grained class labels. Additionally, we also follow recent work [45, 50] to use the other three datasets which have a large + +
MethodParamsSplit-ImageNetRSplit-ImageNetACUB200CIFAR100
\( A_{Last} \uparrow \)\( A_{Avg} \uparrow \)\( A_{Last} \uparrow \)\( A_{Avg} \uparrow \)\( A_{Last} \uparrow \)\( A_{avg} \uparrow \)\( A_{Last} \uparrow \)\( A_{avg} \uparrow \)
Joint86M81.72±0.35-50.56±1.75-88.17±0.32-89.71±0.07-
FT86M20.93±0.8640.35±0.746.03±4.7416.57±5.822.05±1.6945.67±2.0422.17±1.0941.83±1.60
SLCA [45]86M79.35±0.2883.29±0.4661.05±0.6368.88±2.3184.68±0.0990.77±0.7991.26±0.3794.29±0.92
Adam-adapter [50]1.19M65.79±0.9872.42±1.4148.81±0.0858.84±1.3785.84±0.0891.33±0.4987.29±0.2791.21±1.33
Adam-ssf [50]0.2M66.61±0.0974.36±1.0048.94±0.1458.79±2.8285.67±0.1590.99±0.7685.27±0.2189.90±0.98
Adam-prompt [50]0.04M65.29±1.5272.97±0.5629.29±7.4239.14±7.5985.28±0.4790.89±0.8685.04±1.0489.49±0.58
LAE [8]0.19M72.29±0.1477.99±0.4647.18±1.1758.15±0.7380.97±0.5187.22±1.2185.25±0.4389.80±1.20
L2P [38]0.04M72.34±0.1777.36±0.6444.04±0.9351.24±2.2667.02±1.9079.62±1.6084.06±0.8888.26±1.34
ADA [6]1.19M73.76±0.2779.57±0.8450.16±0.2059.43±2.2076.13±0.9485.74±0.2688.25±0.2691.85±1.32
DualPrompt [37]0.25M69.10±0.6274.28±0.6653.19±0.7464.59±0.0868.48±0.4780.59±1.5086.93±0.2491.13±0.32
CODAPrompt [31]3.84M73.31±0.5078.47±0.5352.08±0.1263.92±0.1277.23±1.1281.90±0.8583.21±3.3987.71±3.17
SSIAT (Ours)1.19M79.38±0.5983.63±0.4362.43±1.6370.83±1.6388.75±0.3893.00±0.9091.35±0.2694.35±0.60
+ +![](images/c65dbad5c97f094610f8f85bf7d9c4dcffc02992b9ce62a69a4f09322a0603c3.jpg) +Figure 5. The performance of each learning session on four datasets. (a) ImageNetR; (b) ImageNetA; (c) CUB200; (d) CIFAR100. These curves are plotted by calculating the average performance across three different seeds for each incremental session. + +![](images/6f3089f711c6abdb9a01251408912e42063b6e283306d0c5749cd35e4a860cc0.jpg) + +![](images/da6c6bfa4fdbc1f75ccd7f2b7fc8fc253efa4bea5fd72daf44b1de72386be979.jpg) + +![](images/07aead82b1259ae870c4fcbfc8fe05b7051d0987697a8a32e0755c5215962f6c.jpg) + +Table 1. Experimental results on four CIL benchmarks. All other methods are reproduced using the same seeds for a fair comparison. + +
MethodImageNetRImageNetA
\( A_{Last} \uparrow \)\( A_{Avg} \uparrow \)\( A_{Last} \uparrow \)\( A_{Avg} \uparrow \)
SLCA [45]74.63±1.5579.92±1.2936.69±21.3156.35±7.09
Adam-adapter[50]57.42±0.8464.75±0.7948.65±0.1259.55±1.07
Adam-ssf[50]64.30±0.9472.42±1.4747.27±4.3458.36±4.70
Adam-prompt[50]59.90±1.1368.02±1.0229.93±4.8839.13±4.19
LAE [8]69.86±0.4377.38±0.6139.52±0.7851.75±2.15
L2P [38]69.64±0.4275.28±0.5740.48±1.7849.62±1.46
DualPrompt [37]66.61±0.5872.45±0.3742.28±1.9453.39±1.64
CODAPrompt [31]69.96±0.5075.34±0.8544.62±1.9254.86±0.50
SSIAT (Ours)75.67±0.1482.30±0.3659.16±1.0368.45±1.92
+ +Table 2. Experimental results for long-sequences (20 incremental sessions) on ImageNetR and ImageNetA dataset. + +domain gap with pre-training data. ImageNetR [10] consists of 30,000 images with 200 categories. Although its categories overlap with ImageNet-21K [29], the images belong to a different domain. ImageNetA [11] is a real-world dataset that consists of 200 categories. This dataset exhibits significant class imbalance, with some categories having only a few training samples. VTAB [44] is a complex dataset that consists of 19 tasks covering a broad spectrum of domains and semantics. We follow previous work [50] to select 5 tasks to construct a cross-domain CIL dataset. + +Implementation details: We use ViT-B/16 [5] as the pre-trained model, which is pre-trained on ImageNet-21K [29]. The initial learning rate is set as 0.01 and we use the cosine Anneal scheduler. In our experiments, we train the first session for 20 epochs and 10 epochs for later sessions. Following previous papers [45, 50], we use common evaluation metrics in CIL. Specifically, we report the last session + +accuracy $\mathcal{A}_{Last}$ and average accuracy of the whole incremental sessions $\mathcal{A}_{Avg} = \frac{1}{T}\sum_{i=1}^{T}\mathcal{A}_i$ . We utilize three different seeds to generate three different class orders for evaluating various methods. We report the mean and standard deviation based on the three experiments. See codes1. + +# 4.2. Experiment Results + +For a fair comparison, we compare our methods with SOTA CIL methods based on the pre-trained vision transformer model. We compare our methods with prompt-based methods L2P [52], DualPrompt [37], CODAPrompt [31], finetuning methods SLCA [45], and adapter-based method [6, 8, 50]. Tab. 1 shows $\mathcal{A}_{Avg}$ and $\mathcal{A}_{Last}$ with three different seeds on four CIL benchmarks. + +CUB200 & CIFAR100: We first report the results of each method on the CUB200 and CIFAR100 datasets. Since these two datasets overlap with the pre-training data, methods based on a pre-trained model achieve a huge improvement in performance compared with methods that are trained from scratch. For example, as shown in Tab. 1, the average accuracy on L2P, DualPrompt, and CODAPrompt reached $88.26\%$ , $91.13\%$ , and $87.71\%$ on CIFAR100, respectively. Nevertheless, our method still outperforms those prompt-based methods. Besides, our method does not require the construction of a prompt pool which allows each task to learn specific prompt parameters. The adapter is shared across tasks and our method avoids the parameter + +
MethodSes.1Ses.2Ses.3Ses.4Ses.5Avg↑
Adam-adapter[50]87.6086.0789.1482.7284.3585.97
Adam-ssf[50]89.6088.2189.9480.5082.3886.13
Adam-vpt[50]90.2087.5789.6980.3982.1886.01
SLCA[45]94.8092.4393.5493.9894.3393.82
LAE [8]97.9985.2679.6878.7874.3683.21
SSIAT (Ours)96.1092.7194.0993.6894.5094.21
+ +Table 3. Experimental results for different methods on VTAB dataset which contain 5 datasets from different domains. + +expansion with tasks increasing. Even though the Adam-adapter/SSF/prompt only needs to train in the first stage which requires less training time, the performance of those methods is inferior to our proposed method. Although the performance of SLCA is comparable to our method in CIFAR100, the number of tuning parameters of our method is much smaller. Besides that, the average performance of our method on CUB200 is $93.00\%$ , nearly $2.3\%$ improvement over SLCA. Fig. 5 (c) (d) shows the incremental accuracy of each session on CUB200 and CIFAR100 and our method is always at the top of all lines in the incremental process. + +ImageNetR & ImageNetA: We report the performance on ImageNetR and ImageNetA in Tab. 1. These two datasets are more difficult due to the domain gap with the pre-training data. It can be seen that the performance of each method on these two datasets is lower than CIFAR100 and CUB200. Besides, we can see that SLCA outperforms other previous methods significantly on these two datasets. Notably, SLCA achieves an impressive last accuracy on ImageNetR, surpassing the other methods. In contrast, our method achieves SOTA-level performance on both datasets with fewer tuning parameters. Based on Fig. 5, the performance of our method is slightly higher than SLCA in several learning sessions with fewer tuning parameters on the ImageNetR dataset. On the ImageNetA dataset, our method achieves the last accuracy of $62.43\%$ , surpassing SLCA by $1.39\%$ . The average accuracy across all sessions is $70.83\%$ , showing a $2\%$ improvement. + +Additionally, we evaluate the performance of each method under the condition of long sequences. In this setting, each session consists of only 10 classes, and the results are summarized in Tab. 2. Our method also maintains excellent performance in terms of $\mathcal{A}_{Last}$ and $\mathcal{A}_{Avg}$ . The performance of SLCA is highly dependent on the class order in which the training data appears, resulting in a substantial variance in $\mathcal{A}_{Last}$ on ImageNetA. In contrast, the Adam-based methods remain relatively stable in long-sequence settings. For Adam-SSF, the long sequence only leads to a nearly $2\%$ performance drop in ImageNetR. However, for SLCA, its performance drops by $5\%$ on ImageNetR and nearly $10\%$ on ImageNetA. In comparison, our method demonstrates excellent stability on long sequences and outperforms other methods by a large margin. + +VTAB: VTAB is a cross-domain CIL dataset where each task provides training data from a different domain. Based + +on the results presented in Tab. 3, it can be observed that both SLCA and our method perform well in cross-domain CIL. Specifically, in the last incremental stage, our method achieves an accuracy that is $12\%$ higher than the Adam-based methods. Adam-based methods only perform finetuning in the first task and are not able to adapt well to subsequent tasks on the cross-domain dataset. + +# 4.3. Ablation Study + +Baselines with different PET methods: Tab. 4 shows the results of baselines with three different parameter-efficient tuning methods in each incremental session. It can be observed that the pre-trained model with an adapter achieves the best performance in terms of both the last session accuracy and average accuracy. Fig. 1 demonstrates that tuning with an adapter achieves a better balance between learning new classes and retaining knowledge of old classes. Both VPT-deep and SSF methods tend to prioritize learning new categories, which leads to increased forgetting of previously learned categories. Although VPT-shallow performs well on CIFAR, its limited parameters hinder the model from incrementally learning new classes on ImageNetR. More results on the other datasets can be found in the Supp. + +Unified classifier retraining vs. Separate local classifier: As we train separate task-specific classifiers in each incremental session, we propose to retrain the classifier to find the optimal decision boundary for all the classes. Tab. 5 displays the ablation experiments of the classifier re-trained on ImageNetA which is the most difficult benchmark. It can be observed that whether it is a linear or a cosine classifier, retraining the classifier leads to a significant performance improvement. Additionally, incorporating the computation of prototype semantic shifts further enhances the performance by an additional $2\%$ in the cosine classifier. Compared to the classifier alignment methods that do not involve computing updated prototypes, our method demonstrates its superiority as the incremental stages progress. More results on the other datasets can be found in the Supp. + +Progressively tuning vs. first session adaptation: Tab. 6 shows the linear probing results of different adaption ways. After finishing the training of the last session, we freeze the pre-trained backbone and only train the classifier using all the samples. It is evident that not performing tuning and solely freezing the pre-trained model leads to the worst performance, regardless of the dataset. First-session adaptation proves to be a good choice as it reduces training time and works well for datasets like CIFAR100 and CUB200. However, for datasets such as ImageNetA and ImageNetR, which have significant domain gaps from the pre-trained model, relying solely on first-session adaptation is suboptimal. By continuously fine-tuning the adapter, we observe that the backbone exhibits stronger representability compared to only tuning in the first session. + +
PET MethodParamsSes.1Ses.2Ses.3Ses.4Ses.5Ses.6Ses.7Ses.8Ses.9Ses.10Avg↑
SSF [22]0.2M98.5091.9088.5785.0283.9278.7077.7977.8973.0274.9183.03
VPT-deep [15]0.046M97.6069.1568.7056.6055.5648.8755.9756.0553.4855.2161.72
VPT-shallow [15]0.004M98.4092.9588.8092.0687.2686.3785.6485.3185.3685.1088.72
Adapter [4]1.19M98.5095.3591.6091.0890.9290.0889.8089.6288.9889.2991.52
+ +Table 4. Experimental results for baselines with different efficient tuning methods on CIFAR100. We report the overall performance of each session and the average performance. + +
ClassifierMethodSes.1Ses.2Ses.3Ses.4Ses.5Ses.6Ses.7Ses.8Ses.9Ses.10Avg↑
Linearw/o CA74.6568.3763.9058.8258.0255.4854.0352.8951.6252.1358.99
w/ CA74.6571.5967.9364.2462.0860.9059.0357.3256.4156.8563.10
w/ SSCA74.6570.9267.6463.9162.6560.9660.3858.5558.1357.7763.55
Cosinew/o CA82.6677.7872.2067.6366.0163.1859.9759.3558.9357.9166.56
w/ CA82.6679.7074.5670.4068.1965.6663.4061.7760.7059.7868.68
w/ SSCA82.6680.6075.9172.4171.5669.0166.1064.6063.0062.4370.83
+ +Table 5. Ablation results for unified classifier training and semantic shift estimation on ImageNetA. We report the overall performance of each session and the average performance. We run the experiments with three seeds and reported the average performance. + +
MethodCIFARImageNetRImageNetACUB
No-Adapt.86.0868.4233.7186.77
First-Adapt.91.3378.0263.5389.27
All-Adapt.92.5782.0265.9689.86
Δ↑1.24%4.00%2.43%0.59%
+ +Table 6. Linear probing results of different training ways on four datasets. We retrain the classifier using all the data on the fixed-trained backbone. + +
StructureParamsCIFARImageNetRImageNetA
AdaptMLP-P [4]1.19M94.35±0.6083.63±0.4370.83±1.63
AdaptMLP-S [4]1.19M94.16±0.8883.19±0.4771.00±1.52
Convpass [16]1.63M94.08±0.9983.64±0.3569.96±1.09
Adapter [13]2.38M94.26±0.9183.65±0.5070.94±1.42
+ +Different structures of the adapter: In this paper, we follow AdaptFormer [4] to use parallel adapterMLP as the adapter structure. We also delve deeper into different adapter structures such as Adapter [13] and Convpass [16]. Although these different tuning structures may exhibit performance differences under static settings, the performance differences among those adapter structures are minimal in the context of CIL shown in Tab. 7. This offers us the flexibility to employ various adapter structures within the context of the CIL paradigm. + +Comparison to traditional CIL methods: We conduct evaluations by comparing our approach to SOTA traditional CIL methods shown in Tab. 8. We replace the Resnet backbone with the pre-trained ViT model for fair comparison. The results indicate that the performance of iCaRL tends to be inferior compared to SOTA model expansion methods and our proposed method, even when past samples are stored. It can be observed that methods such as Foster and Der, which dynamically expand feature extraction net + +Table 7. Experimental results of different adapter structures. We report the average performance and standard deviation. + +
MethodImageNetRImageNetA
\( A_{Last} \uparrow \)\( A_{Avg} \uparrow \)\( A_{Last} \uparrow \)\( A_{Avg} \uparrow \)
iCaRL [28]61.70±0.5671.34±0.6729.32±2.3640.11±1.36
Foster [34]75.87±0.3881.54±0.8212.44±17.4517.01±20.44
Der [41]75.63±0.8681.13±0.1138.43±2.3946.43±3.29
Memo [48]65.38±0.9073.80±0.8628.45±2.3740.27±1.22
SSIAT (Ours)79.38±0.5983.63±0.4362.43±1.6370.83±1.63
+ +Table 8. Comparison to traditional CIL methods on ImageNetR and ImageNetA dataset. + +works, achieve impressive results on ImageNetR. The average accuracy of these methods is only $2\%$ lower than our method. However, on ImageNetA, where there are few-shot samples for many classes, these methods exhibit low performance. More ablation experiments related to hyperparameters can be found in the supp. + +# 5. Conclusion + +Class-incremental learning on a pre-trained model has received significant attention in recent years. In this paper, we first revisit different PET methods in the context of CIL. Then, we propose that incrementally tuning the shared adapter and local classifier without constraints exhibits less forgetting and gains plasticity for learning new classes. Moreover, to train a unified classifier, we calculate the semantic shift of old prototypes and retrain the classifier using updated prototypes in each session. The proposed method eliminates the need for constructing an adapter pool and avoids retaining any image samples. Experimental results on five benchmarks demonstrate the effectiveness of our method which achieves the SOTA performance. + +Acknowledgement. This research was supported by Natural Science Fund of Hubei Province (Grant # 2022CFB823), Alibaba Innovation Research program under Grant Contract # CRAQ7WHZ11220001-20978282, and HUST Independent Innovation Research Fund (Grant # 2021XXJS096). + +# References + +[1] Rahaf Aljundi, Francesca Babiloni, Mohamed Elhoseiny, Marcus Rohrbach, and Tinne Tuytelaars. Memory aware synapses: Learning what (not) to forget. In Proceedings of the European conference on computer vision (ECCV), pages 139-154, 2018. 1 +[2] Jihwan Bang, Heesu Kim, YoungJoon Yoo, Jung-Woo Ha, and Jonghyun Choi. Rainbow memory: Continual learning with a memory of diverse samples. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 8218-8227, 2021. 1, 2 +[3] Arslan Chaudhry, Puneet K Dokania, Thalaiyasingam Ajthan, and Philip HS Torr. Riemannian walk for incremental learning: Understanding forgetting and intransigence. In Proceedings of the European conference on computer vision (ECCV), pages 532-547, 2018. 1, 2 +[4] Shoufa Chen, Chongjian Ge, Zhan Tong, Jiangliu Wang, Yibing Song, Jue Wang, and Ping Luo. Adaptformer: Adapting vision transformers for scalable visual recognition. Advances in Neural Information Processing Systems, 35:16664-16678, 2022. 1, 2, 3, 8 +[5] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929, 2020. 1, 6 +[6] Beyza Ermis, Giovanni Zappella, Martin Wistuba, Aditya Rawal, and Cedric Archambeau. Memory efficient continual learning with transformers. Advances in Neural Information Processing Systems, 35:10629-10642, 2022. 6 +[7] Robert M French. Catastrophic forgetting in connectionist networks. Trends in cognitive sciences, 3(4):128-135, 1999. 1 +[8] Qiankun Gao, Chen Zhao, Yifan Sun, Teng Xi, Gang Zhang, Bernard Ghanem, and Jian Zhang. A unified continual learning framework with general parameter-efficient tuning. arXiv preprint arXiv:2303.10070, 2023. 3, 4, 6, 7 +[9] Haoyu He, Jianfei Cai, Jing Zhang, Dacheng Tao, and Bohan Zhuang. Sensitivity-aware visual parameter-efficient tuning. arXiv preprint arXiv:2303.08566, 2023. 4, 5 +[10] Dan Hendrycks, Steven Basart, Norman Mu, Saurav Kadavath, Frank Wang, Evan Dorundo, Rahul Desai, Tyler Zhu, Samyak Parajuli, Mike Guo, et al. The many faces of robustness: A critical analysis of out-of-distribution generalization. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 8340-8349, 2021. 6 +[11] Dan Hendrycks, Kevin Zhao, Steven Basart, Jacob Steinhardt, and Dawn Song. Natural adversarial examples. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15262-15271, 2021. 6 +[12] Geoffrey Hinton, Oriol Vinyals, and Jeff Dean. Distilling the knowledge in a neural network. arXiv preprint arXiv:1503.02531, 2015. 4 +[13] Neil Houlsby, Andrei Giurgiu, Stanislaw Jastrzebski, Bruna Morrone, Quentin De Laroussilhe, Andrea Gesmundo, Mona Attariyan, and Sylvain Gelly. Parameter-efficient transfer + +learning for nlp. In International Conference on Machine Learning, pages 2790-2799. PMLR, 2019. 2, 8 +[14] Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. Lora: Low-rank adaptation of large language models. arXiv preprint arXiv:2106.09685, 2021. 2 +[15] Menglin Jia, Luming Tang, Bor-Chun Chen, Claire Cardie, Serge Belongie, Bharath Hariharan, and Ser-Nam Lim. Visual prompt tuning. In European Conference on Computer Vision, pages 709-727. Springer, 2022. 1, 2, 3, 8 +[16] Shibo Jie and Zhi-Hong Deng. Convolutional bypasses are better vision transformer adapters. arXiv preprint arXiv:2207.07039, 2022.8 +[17] James Kirkpatrick, Razvan Pascanu, Neil Rabinowitz, Joel Veness, Guillaume Desjardins, Andrei A Rusu, Kieran Milan, John Quan, Tiago Ramalho, Agnieszka Grabska-Barwinska, et al. Overcoming catastrophic forgetting in neural networks. Proceedings of the national academy of sciences, 114(13):3521-3526, 2017. 1, 2, 4 +[18] Alex Krizhevsky, Geoffrey Hinton, et al. Learning multiple layers of features from tiny images. 2009. 5 +[19] Brian Lester, Rami Al-Rfou, and Noah Constant. The power of scale for parameter-efficient prompt tuning. arXiv preprint arXiv:2104.08691, 2021. 2 +[20] Xiang Lisa Li and Percy Liang. Prefix-tuning: Optimizing continuous prompts for generation. arXiv preprint arXiv:2101.00190, 2021. 2 +[21] Zhizhong Li and Derek Hoiem. Learning without forgetting. IEEE transactions on pattern analysis and machine intelligence, 40(12):2935-2947, 2017. 4 +[22] Dongze Lian, Daquan Zhou, Jiashi Feng, and Xinchao Wang. Scaling & shifting your features: A new baseline for efficient model tuning. Advances in Neural Information Processing Systems, 35:109-123, 2022. 1, 2, 3, 8 +[23] Arun Mallya and Svetlana Lazebnik. Packet: Adding multiple tasks to a single network by iterative pruning. In Proceedings of the IEEE conference on Computer Vision and Pattern Recognition, pages 7765-7773, 2018. 1, 2 +[24] Arun Mallya, Dillon Davis, and Svetlana Lazebnik. Piggyback: Adapting a single network to multiple tasks by learning to mask weights. In Proceedings of the European conference on computer vision (ECCV), pages 67-82, 2018. 1, 2 +[25] Aristeidis Panos, Yuriko Kobe, Daniel Olmeda Reino, Rahaf Aljundi, and Richard E Turner. First session adaptation: A strong replay-free baseline for class-incremental learning. arXiv preprint arXiv:2303.13199, 2023. 4 +[26] Can Peng, Kun Zhao, Tianren Wang, Meng Li, and Brian C Lovell. Few-shot class-incremental learning from an open-set perspective. In European Conference on Computer Vision, pages 382-397. Springer, 2022. 4 +[27] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021. 2 + +[28] Sylvestre-Alvise Rebuffi, Alexander Kolesnikov, Georg Sperl, and Christoph H Lampert. icarl: Incremental classifier and representation learning. In Proceedings of the IEEE conference on Computer Vision and Pattern Recognition, pages 2001-2010, 2017. 1, 2, 4, 8 +[29] Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, et al. Imagenet large scale visual recognition challenge. International journal of computer vision, 115:211-252, 2015. 6 +[30] Joan Serra, Didac Suris, Marius Miron, and Alexandros Karatzoglou. Overcoming catastrophic forgetting with hard attention to the task. In International conference on machine learning, pages 4548-4557. PMLR, 2018. 1, 2 +[31] James Seale Smith, Leonid Karlinsky, Vyshnavi Gutta, Paola Cascante-Bonilla, Donghyun Kim, Assaf Arbelle, Rameswar Panda, Rogerio Feris, and Zsolt Kira. Coda-prompt: Continual decomposed attention-based prompting for rehearsal-free continual learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11909-11919, 2023. 1, 3, 6 +[32] Yu-Ming Tang, Yi-Xing Peng, and Wei-Shi Zheng. When prompt-based incremental learning does not meet strong pretraining. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 1706-1716, 2023. 2, 5 +[33] Catherine Wah, Steve Branson, Peter Welinder, Pietro Perona, and Serge Belongie. The caltech-ucsd birds-200-2011 dataset. 2011. 5 +[34] Fu-Yun Wang, Da-Wei Zhou, Han-Jia Ye, and De-Chuan Zhan. Foster: Feature boosting and compression for class incremental learning. In European conference on computer vision, pages 398–414. Springer, 2022. 1, 2, 8 +[35] Yabin Wang, Zhiwu Huang, and Xiaopeng Hong. S-prompts learning with pre-trained transformers: An occam's razor for domain incremental learning. Advances in Neural Information Processing Systems, 35:5682-5695, 2022. 2 +[36] Yabin Wang, Zhiheng Ma, Zhiwu Huang, Yaowei Wang, Zhou Su, and Xiaopeng Hong. Isolation and impartial aggregation: A paradigm of incremental learning without interference. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 10209-10217, 2023. 4 +[37] Zifeng Wang, Zizhao Zhang, Sayna Ebrahimi, Ruoxi Sun, Han Zhang, Chen-Yu Lee, Xiaqi Ren, Guolong Su, Vincent Perot, Jennifer Dy, et al. Dualprompt: Complementary prompting for rehearsal-free continual learning. In European Conference on Computer Vision, pages 631-648. Springer, 2022. 1, 3, 6 +[38] Zifeng Wang, Zizhao Zhang, Chen-Yu Lee, Han Zhang, Ruoxi Sun, Xiaoqi Ren, Guolong Su, Vincent Perot, Jennifer Dy, and Tomas Pfister. Learning to prompt for continual learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 139-149, 2022. 1, 6 +[39] Tz-Ying Wu, Gurumurthy Swaminathan, Zhizhong Li, Avinash Ravichandran, Nuno Vasconcelos, Rahul Bhotika, and Stefano Soatto. Class-incremental learning with strong + +pre-trained models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9601-9610, 2022. 2 +[40] Xiang Xiang, Yuwen Tan, Qian Wan, Jing Ma, Alan Yuille, and Gregory D Hager. Coarse-to-fine incremental few-shot learning. In European Conference on Computer Vision, pages 205-222. Springer, 2022. 2 +[41] Shipeng Yan, Jiangwei Xie, and Xuming He. Der: Dynamically expandable representation for class incremental learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3014-3023, 2021. 2, 8 +[42] Lu Yu, Bartlomiej Twardowski, Xialei Liu, Luis Herranz, Kai Wang, Yongmei Cheng, Shangling Jui, and Joost van de Weijer. Semantic drift compensation for class-incremental learning. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 6982-6991, 2020. 2, 5 +[43] Friedemann Zenke, Ben Poole, and Surya Ganguli. Continual learning through synaptic intelligence. In International conference on machine learning, pages 3987-3995. PMLR, 2017. 1, 2 +[44] Xiaohua Zhai, Joan Puigcerver, Alexander Kolesnikov, Pierre Ruyssen, Carlos Riquelme, Mario Lucic, Josip Djolonga, Andre Susano Pinto, Maxim Neumann, Alexey Dosovitskiy, et al. A large-scale study of representation learning with the visual task adaptation benchmark. arXiv preprint arXiv:1910.04867, 2019. 6 +[45] Gengwei Zhang, Liyuan Wang, Guoliang Kang, Ling Chen, and Yunchao Wei. Slca: Slow learner with classifier alignment for continual learning on a pre-trained model. arXiv preprint arXiv:2303.05118, 2023. 1, 2, 3, 4, 5, 6, 7 +[46] Renrui Zhang, Ziyu Guo, Wei Zhang, Kunchang Li, Xupeng Miao, Bin Cui, Yu Qiao, Peng Gao, and Hongsheng Li. Pointclip: Point cloud understanding by clip. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8552-8562, 2022. 2 +[47] Hengyuan Zhao, Hao Luo, Yuyang Zhao, Pichao Wang, Fan Wang, and Mike Zheng Shou. Revisit parameter-efficient transfer learning: A two-stage paradigm. arXiv preprint arXiv:2303.07910, 2023. 4 +[48] Da-Wei Zhou, Qi-Wei Wang, Han-Jia Ye, and De-Chuan Zhan. A model or 603 exemplars: Towards memory-efficient class-incremental learning. arXiv preprint arXiv:2205.13218, 2022. 2, 8 +[49] Da-Wei Zhou, Qi-Wei Wang, Zhi-Hong Qi, Han-Jia Ye, DeChuan Zhan, and Ziwei Liu. Deep class-incremental learning: A survey. arXiv preprint arXiv:2302.03648, 2023. 2 +[50] Da-Wei Zhou, Han-Jia Ye, De-Chuan Zhan, and Ziwei Liu. Revisiting class-incremental learning with pre-trained models: Generalizability and adaptivity are all you need. arXiv preprint arXiv:2303.07338, 2023. 1, 2, 3, 5, 6, 7 +[51] Kaiyang Zhou, Jingkang Yang, Chen Change Loy, and Ziwei Liu. Conditional prompt learning for vision-language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16816-16825, 2022. 2 + +[52] Kaiyang Zhou, Jingkang Yang, Chen Change Loy, and Ziwei Liu. Learning to prompt for vision-language models. International Journal of Computer Vision, 130(9):2337-2348, 2022. 1, 2, 3, 6 +[53] Qinhao Zhou, Xiang Xiang, and Jing Ma. Hierarchical task-incremental learning with feature-space initialization inspired by neural collapse. Neural Processing Letters, pages 1-17, 2023. 2 +[54] Fei Zhu, Xu-Yao Zhang, Chuang Wang, Fei Yin, and Cheng-Lin Liu. Prototype augmentation and self-supervision for incremental learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5871-5880, 2021. 1, 2, 5 \ No newline at end of file diff --git a/2024/Semantically-Shifted Incremental Adapter-Tuning is A Continual ViTransformer/images.zip b/2024/Semantically-Shifted Incremental Adapter-Tuning is A Continual ViTransformer/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..cb3bb18923054bd81cc96ccf8702bb9c609208f1 --- /dev/null +++ b/2024/Semantically-Shifted Incremental Adapter-Tuning is A Continual ViTransformer/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f99298ee7e5326631431cbfb0e7fc3234a9006b824163e319da4278a68a9fa8d +size 731797 diff --git a/2024/Semantically-Shifted Incremental Adapter-Tuning is A Continual ViTransformer/layout.json b/2024/Semantically-Shifted Incremental Adapter-Tuning is A Continual ViTransformer/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..d591d2b23160197eefa784341d878030b0c87459 --- /dev/null +++ b/2024/Semantically-Shifted Incremental Adapter-Tuning is A Continual ViTransformer/layout.json @@ -0,0 +1,8868 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 47, + 103, + 547, + 122 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 103, + 547, + 122 + ], + "spans": [ + { + "bbox": [ + 47, + 103, + 547, + 122 + ], + "type": "text", + "content": "Semantically-Shifted Incremental Adapter-Tuning is A Continual ViTransformer" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 143, + 340, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 143, + 340, + 186 + ], + "spans": [ + { + "bbox": [ + 56, + 143, + 340, + 186 + ], + "type": "text", + "content": "Yuwen Tan*, Qinhao Zhou*, Xiang Xiang*† \nSchool of Artificial Intelligence and Automation, Huazhong University of Science and Tech., Wuhan, China" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 361, + 143, + 536, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 361, + 143, + 536, + 186 + ], + "spans": [ + { + "bbox": [ + 361, + 143, + 536, + 186 + ], + "type": "text", + "content": "Ke Wang, Yuchuan Wu, Yongbin Li \nDAMO Academy, \nAlibaba Group, Beijing, China" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 143, + 213, + 192, + 225 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 213, + 192, + 225 + ], + "spans": [ + { + "bbox": [ + 143, + 213, + 192, + 225 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 238, + 290, + 501 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 238, + 290, + 501 + ], + "spans": [ + { + "bbox": [ + 46, + 238, + 290, + 501 + ], + "type": "text", + "content": "Class-incremental learning (CIL) aims to enable models to continuously learn new classes while overcoming catastrophic forgetting. The introduction of pre-trained models has brought new tuning paradigms to CIL. In this paper, we revisit different parameter-efficient tuning (PET) methods within the context of continual learning. We observe that adapter tuning demonstrates superiority over prompt-based methods, even without parameter expansion in each learning session. Motivated by this, we propose incrementally tuning the shared adapter without imposing parameter update constraints, enhancing the learning capacity of the backbone. Additionally, we employ feature sampling from stored prototypes to retrain a unified classifier, further improving its performance. We estimate the semantic shift of old prototypes without access to past samples and update stored prototypes session by session. Our proposed method eliminates model expansion and avoids retaining any image samples. It surpasses previous pre-trained model-based CIL methods and demonstrates remarkable continual learning capabilities. Experimental results on five CIL benchmarks validate the effectiveness of our approach, achieving state-of-the-art (SOTA) performance." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 523, + 128, + 536 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 523, + 128, + 536 + ], + "spans": [ + { + "bbox": [ + 47, + 523, + 128, + 536 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 543, + 287, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 543, + 287, + 687 + ], + "spans": [ + { + "bbox": [ + 46, + 543, + 287, + 687 + ], + "type": "text", + "content": "In traditional deep learning, the model can access all the data at once and learning is performed on a static dataset. However, in real-life applications, data usually arrives in a stream format with new classes, requiring the model to learn continuously, known as class-incremental learning (CIL). The primary objective of CIL is to enable the model to learn continuously from non-stationary data streams, facilitating adaptation to new classes and mitigating catastrophic forgetting [7]. A number of methods [28, 34, 54] have been devoted to alleviating catastrophic forgetting. Those methods can be mainly divided into replay-based [2, 3, 28], regularization-based [1, 17, 43], and isolation-based meth" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 313, + 213, + 426, + 312 + ], + "blocks": [ + { + "bbox": [ + 313, + 213, + 426, + 312 + ], + "lines": [ + { + "bbox": [ + 313, + 213, + 426, + 312 + ], + "spans": [ + { + "bbox": [ + 313, + 213, + 426, + 312 + ], + "type": "image", + "image_path": "38761abf699db7fc658056c8941fcecba082ceb556f2f03f620f6f0d613fcd5c.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 313, + 546, + 368 + ], + "lines": [ + { + "bbox": [ + 305, + 313, + 546, + 368 + ], + "spans": [ + { + "bbox": [ + 305, + 313, + 546, + 368 + ], + "type": "text", + "content": "Figure 1. Comparison of different parameter-efficient tuning CIL baselines on CIFAR100 dataset. Left: The relationship between the average accuracy of the incremental sessions and the number of tunable parameters. Right: The average performance of old classes and new classes for each PET method." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 425, + 215, + 539, + 312 + ], + "blocks": [ + { + "bbox": [ + 425, + 215, + 539, + 312 + ], + "lines": [ + { + "bbox": [ + 425, + 215, + 539, + 312 + ], + "spans": [ + { + "bbox": [ + 425, + 215, + 539, + 312 + ], + "type": "image", + "image_path": "8b6416936343e222840143636fa49bcfddbb157082155ef743fd2c04bfba11a8.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 376, + 546, + 412 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 376, + 546, + 412 + ], + "spans": [ + { + "bbox": [ + 304, + 376, + 546, + 412 + ], + "type": "text", + "content": "ods [23, 24, 30]. However, all these methods assume that models are trained from scratch while ignoring the generalization ability of a strong pre-trained model [5] in the CIL." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 414, + 547, + 557 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 414, + 547, + 557 + ], + "spans": [ + { + "bbox": [ + 304, + 414, + 547, + 557 + ], + "type": "text", + "content": "Pre-trained vision transformer models [5] have demonstrated excellent performance on various vision tasks. Recently, it has been explored in the field of CIL and continues to receive considerable attention [37, 38, 45, 50]. Due to the powerful representation capabilities of pre-trained models, CIL methods based on pre-trained models achieve significant performance improvements compared to traditional SOTA methods which are trained from scratch. CIL with a pre-trained model typically fixes the pre-trained model to retain the generalizability and adds a few additional training parameters such as adapter [4], prompt [15] and SSF [22], which is referred to as parameter-efficient tuning (PET)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 558, + 548, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 558, + 548, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 558, + 548, + 714 + ], + "type": "text", + "content": "Inspired by language-based intelligence, current research in CIL is primarily focused on the prompt-based method [31, 37, 52]. Typically, these approaches require the construction of a pool of task-specific prompts during the training phase which increases storage overhead. Additionally, selecting prompts during the testing stage incurs additional computational costs. Other PET methods as well as fully fine-tuning are still in exploration in the context of CIL. Recently, SLCA [45] proposes fine-tuning the entire ViT and classifier incrementally with different learning rates. However, fine-tuning the entire pre-trained model requires substantial computational resources. In addition, Adam [50] initially explores the application of other PET" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 57, + 693, + 286, + 702 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 693, + 286, + 702 + ], + "spans": [ + { + "bbox": [ + 57, + 693, + 286, + 702 + ], + "type": "text", + "content": "*Equal contribution, co-first author; also with Nat. Key Lab of MSIPT." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 60, + 703, + 287, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 703, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 60, + 703, + 287, + 712 + ], + "type": "text", + "content": "†Correspondence to xex@hust.edu.cn; also with Peng Cheng Lab." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "23252" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 288, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 288, + 156 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 288, + 156 + ], + "type": "text", + "content": "methods in CIL using first-session adaptation and branch fusion. Training in the first stage and subsequently freezing the model can reduce training time but result in lower accuracy for subsequent new classes. Our linear probing results reveal that the first-session adaptation is insufficient when there is a significant domain discrepancy between downstream data and the pre-trained model." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 162, + 290, + 402 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 162, + 290, + 402 + ], + "spans": [ + { + "bbox": [ + 46, + 162, + 290, + 402 + ], + "type": "text", + "content": "In this paper, we first revisit different PET methods within the CIL paradigm. As shown in Fig. 1, we observe that adapter tuning [4] is a better continual learner than prompt-tuning [15] and SSF-tuning [22]. When progressively fine-tuning the prompt and SSF parameters, the forgetting of old classes is catastrophic. In comparison, adapter tuning effectively balances learning new classes and maintaining performance in old classes. Unlike prompt-based methods, which require constructing a prompt pool, adapter tuning avoids catastrophic forgetting even sharing the same parameters across learning sessions. Additionally, the adapter balances the number of tuning parameters and model performance compared to fully fine-tuning. Moreover, unlike previous methods that use feature distillation loss to restrict changes in shared parameters as part of overall loss, we analyze that tuning with constraints hinders continual learning from the perspective of parameter sensitivity. Therefore, we train the adapter and task-specific classifier without parameter regularization in each session, allowing for greater plasticity in learning new classes." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 408, + 288, + 576 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 408, + 288, + 576 + ], + "spans": [ + { + "bbox": [ + 46, + 408, + 288, + 576 + ], + "type": "text", + "content": "As we only train the local classifier in each learning session, we propose to adopt a new classifier retraining method [32, 45, 54] to further improve the CIL performance. First, we implicitly compute the semantic shift [42] of previous prototypes which leverages the semantic shift of current task samples to estimate the change of old classes. Then, we sample several features according to the updated prototypes to retrain the classifier which is more effective than previous methods. The advantages of our proposed method can be summarized as follows: 1) Fine-tuning adapters significantly reduces training costs and improves learning efficiency; 2) We do not need to retain any image samples; 3) The accuracy for new classes is relatively high which verifies the continual learning capacity of the model." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 582, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 582, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 582, + 288, + 715 + ], + "type": "text", + "content": "In summary, our proposed learning framework has the following main contributions: (1) Different from various devotion into the prompt-based methods for CIL, we discover that incrementally tuning adapter is a better continual learner even without constructing an adapter-pool; (2) After each session adaptation with local classifier, we propose to retrain a unified classifier with the semantic shift compensated prototypes which can further improve the performance; (3) Extensive experimental results on five CIL benchmarks demonstrate the superiority of the proposed simple but effective methods which achieves the SOTA." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 306, + 71, + 393, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 71, + 393, + 84 + ], + "spans": [ + { + "bbox": [ + 306, + 71, + 393, + 84 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 306, + 91, + 460, + 104 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 91, + 460, + 104 + ], + "spans": [ + { + "bbox": [ + 306, + 91, + 460, + 104 + ], + "type": "text", + "content": "2.1. Class-incremental Learning" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 109, + 547, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 109, + 547, + 361 + ], + "spans": [ + { + "bbox": [ + 304, + 109, + 547, + 361 + ], + "type": "text", + "content": "Class-incremental learning requires the model to be continuously updated with new class instances while retaining old knowledge [49]. Traditional CIL methods can be categorized into replay-based [2, 3, 28], regularization-based [17, 40, 43, 53], and parameter isolation-based methods [23, 24, 30]. Replay-based methods involve retaining or generating samples of previous classes and incorporating them into the current training phase. These methods often employ strategies for sample selection or sample generation to effectively replay past information. Regularization-based methods add constraints or penalties in the learning process which limit the update of the parameters that are important for old classes. Isolation-based methods aim to isolate and update task-specific parameters. By focusing on updating only a subset of parameters, these methods can mitigate catastrophic forgetting. To expand the representative capacity of a model without compromising its existing knowledge, methods for expanding the network have been proposed [34, 41, 48]. These methods dynamically extend the feature extraction network, combined with the replay-based method, achieving dramatic performance improvements." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 369, + 458, + 382 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 369, + 458, + 382 + ], + "spans": [ + { + "bbox": [ + 306, + 369, + 458, + 382 + ], + "type": "text", + "content": "2.2. Parameter-Efficient Tuning" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 387, + 547, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 387, + 547, + 616 + ], + "spans": [ + { + "bbox": [ + 304, + 387, + 547, + 616 + ], + "type": "text", + "content": "Parameter-Efficient Tuning can be considered as a transfer learning method. It refers to not performing full fine-tuning on a pre-trained model, instead inserting and fine-tuning specific sub-modules within the network. This approach is initially demonstrated to have effective transfer learning results in NLP [13, 14, 19, 20]. Recently, similar approaches have been applied to vision transformer models as well. AdaptFormer [4] inserts lightweight modules after the MLP layers in the attention module and has been found to outperform full fine-tuning on action recognition benchmarks. Another PET approach SSF [22] surprisingly outperforms other methods in certain tasks even with a smaller number of parameters. Inspired by the prompt approach used in the language model, VPT [15] applies it to visual models and achieves impressive results across various downstream tasks while only introducing a small number of additional parameters. Furthermore, the prompt-based method has also been used in vision-language models [27, 46, 51, 52] to improve performance on various downstream tasks." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 623, + 534, + 635 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 623, + 534, + 635 + ], + "spans": [ + { + "bbox": [ + 306, + 623, + 534, + 635 + ], + "type": "text", + "content": "2.3. Continual Learning on a Pre-trained Model" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 641, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 641, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 641, + 547, + 715 + ], + "type": "text", + "content": "The aforementioned CIL methods all involve training the model from scratch, while CIL with pre-trained model [35, 39, 50, 52] has gained much attention due to its strong feature representation ability. L2P [52] utilizes the pretrained model and learns a set of extra prompts dynamically to guide the model to solve corresponding tasks. Du" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "23253" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 73, + 86, + 267, + 319 + ], + "blocks": [ + { + "bbox": [ + 76, + 72, + 212, + 84 + ], + "lines": [ + { + "bbox": [ + 76, + 72, + 212, + 84 + ], + "spans": [ + { + "bbox": [ + 76, + 72, + 212, + 84 + ], + "type": "text", + "content": "(I) Incremental Adapter Tuning" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 73, + 86, + 267, + 319 + ], + "lines": [ + { + "bbox": [ + 73, + 86, + 267, + 319 + ], + "spans": [ + { + "bbox": [ + 73, + 86, + 267, + 319 + ], + "type": "image", + "image_path": "357d2bfef8d65c36341d1ff6b88ea039f1345d7805989618c7494ad3d9e309dc.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 276, + 79, + 521, + 223 + ], + "blocks": [ + { + "bbox": [ + 276, + 72, + 405, + 84 + ], + "lines": [ + { + "bbox": [ + 276, + 72, + 405, + 84 + ], + "spans": [ + { + "bbox": [ + 276, + 72, + 405, + 84 + ], + "type": "text", + "content": "(II) Semantic Shift Estimation" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 276, + 79, + 521, + 223 + ], + "lines": [ + { + "bbox": [ + 276, + 79, + 521, + 223 + ], + "spans": [ + { + "bbox": [ + 276, + 79, + 521, + 223 + ], + "type": "image", + "image_path": "43934528ae263193d23078904a8836507b8bcb4e088a6e5da5c03cfc045ccfbb.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 276, + 224, + 408, + 236 + ], + "lines": [ + { + "bbox": [ + 276, + 224, + 408, + 236 + ], + "spans": [ + { + "bbox": [ + 276, + 224, + 408, + 236 + ], + "type": "text", + "content": "(III) Unified Classifier Training" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 280, + 238, + 519, + 320 + ], + "blocks": [ + { + "bbox": [ + 280, + 238, + 519, + 320 + ], + "lines": [ + { + "bbox": [ + 280, + 238, + 519, + 320 + ], + "spans": [ + { + "bbox": [ + 280, + 238, + 519, + 320 + ], + "type": "image", + "image_path": "b5d9fafbba2e6239eee9791679047ae39d6413db2ec80a4e28445ecce8e73f16.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 322, + 545, + 344 + ], + "lines": [ + { + "bbox": [ + 46, + 322, + 545, + 344 + ], + "spans": [ + { + "bbox": [ + 46, + 322, + 545, + 344 + ], + "type": "text", + "content": "Figure 2. The framework of our proposed method. Left: The illustration of the structure of ViT and adapter. The adapter and local classifier are incrementally trained in each session using the Eq. 4. Right: The process of retraining the classifier with semantic shift estimation." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 350, + 289, + 531 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 350, + 289, + 531 + ], + "spans": [ + { + "bbox": [ + 46, + 350, + 289, + 531 + ], + "type": "text", + "content": "alPrompt [37] proposes to learn of two mutually unrelated prompt spaces: the general prompt and the expert prompt. It encodes task-invariant instructions and task-specific instructions, respectively. CODAPrompt [31] introduces a decomposed attention-based continual learning prompting method, which offers a larger learning capacity than existing prompt-based methods [37, 52]. SLCA [45] explores the fine-tuning paradigm of the pre-trained models, setting different learning rates for backbone and classifiers, and gains excellent performance. Adam [50] proposes to construct the classifier by merging the embeddings of a pretrained model and an adapted downstream model. LAE [8] proposes a unified framework that calibrates the adaptation speed of tuning modules and ensembles PET modules to accomplish predictions." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 542, + 129, + 556 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 542, + 129, + 556 + ], + "spans": [ + { + "bbox": [ + 47, + 542, + 129, + 556 + ], + "type": "text", + "content": "3. Methodology" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 562, + 127, + 575 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 562, + 127, + 575 + ], + "spans": [ + { + "bbox": [ + 47, + 562, + 127, + 575 + ], + "type": "text", + "content": "3.1. Preliminary" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 582, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 582, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 582, + 289, + 715 + ], + "type": "text", + "content": "Class-incremental learning formulation: We first introduce the definition of CIL. Consider a neural network " + }, + { + "bbox": [ + 46, + 582, + 289, + 715 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_{\\theta} = f_{\\theta_{cls}}(\\mathcal{F}_{\\theta_{bne}}(\\cdot))" + }, + { + "bbox": [ + 46, + 582, + 289, + 715 + ], + "type": "text", + "content": " with trainable parameters " + }, + { + "bbox": [ + 46, + 582, + 289, + 715 + ], + "type": "inline_equation", + "content": "\\theta = \\{\\theta_{bne},\\theta_{cls}\\}" + }, + { + "bbox": [ + 46, + 582, + 289, + 715 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 46, + 582, + 289, + 715 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{\\theta_{bne}}" + }, + { + "bbox": [ + 46, + 582, + 289, + 715 + ], + "type": "text", + "content": " represents the feature extraction backbone which extracts features from input images and " + }, + { + "bbox": [ + 46, + 582, + 289, + 715 + ], + "type": "inline_equation", + "content": "f_{\\theta_{cls}}" + }, + { + "bbox": [ + 46, + 582, + 289, + 715 + ], + "type": "text", + "content": " stands for the classification layer that projects feature representations to class predictions. In CIL setting, " + }, + { + "bbox": [ + 46, + 582, + 289, + 715 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_{\\theta}" + }, + { + "bbox": [ + 46, + 582, + 289, + 715 + ], + "type": "text", + "content": " needs to learn a series of sessions from training data " + }, + { + "bbox": [ + 46, + 582, + 289, + 715 + ], + "type": "inline_equation", + "content": "D_{t} = \\{(x_{1}^{t},y_{1}^{t}),(x_{2}^{t},y_{2}^{t}),\\ldots \\}, t = 1,\\ldots ,T" + }, + { + "bbox": [ + 46, + 582, + 289, + 715 + ], + "type": "text", + "content": " and satisfy the condition " + }, + { + "bbox": [ + 46, + 582, + 289, + 715 + ], + "type": "inline_equation", + "content": "Y(i)\\cap Y(j) = \\emptyset ,i\\neq j" + }, + { + "bbox": [ + 46, + 582, + 289, + 715 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 46, + 582, + 289, + 715 + ], + "type": "inline_equation", + "content": "Y(i)" + }, + { + "bbox": [ + 46, + 582, + 289, + 715 + ], + "type": "text", + "content": " represent the label set in session " + }, + { + "bbox": [ + 46, + 582, + 289, + 715 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 46, + 582, + 289, + 715 + ], + "type": "text", + "content": ". The goal of " + }, + { + "bbox": [ + 46, + 582, + 289, + 715 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_{\\theta}" + }, + { + "bbox": [ + 46, + 582, + 289, + 715 + ], + "type": "text", + "content": " is to perform well" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 350, + 545, + 374 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 350, + 545, + 374 + ], + "spans": [ + { + "bbox": [ + 304, + 350, + 545, + 374 + ], + "type": "text", + "content": "on test sets that contain all the classes learned denoted as " + }, + { + "bbox": [ + 304, + 350, + 545, + 374 + ], + "type": "inline_equation", + "content": "\\mathcal{Y} = Y(1) \\cup \\ldots \\cup Y(t)" + }, + { + "bbox": [ + 304, + 350, + 545, + 374 + ], + "type": "text", + "content": " after " + }, + { + "bbox": [ + 304, + 350, + 545, + 374 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 304, + 350, + 545, + 374 + ], + "type": "text", + "content": "-th session." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 375, + 546, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 375, + 546, + 472 + ], + "spans": [ + { + "bbox": [ + 304, + 375, + 546, + 472 + ], + "type": "text", + "content": "Parameter-efficient tuning with Adapter: An adapter is a bottleneck structure [4] that can be incorporated into a pre-trained transformer-based network to facilitate transfer learning and enhance the performance of downstream tasks. An adapter typically consists of a downsampled MLP layer " + }, + { + "bbox": [ + 304, + 375, + 546, + 472 + ], + "type": "inline_equation", + "content": "W_{down} \\in \\mathbb{R}^{d \\times d}" + }, + { + "bbox": [ + 304, + 375, + 546, + 472 + ], + "type": "text", + "content": ", a non-linear activation function " + }, + { + "bbox": [ + 304, + 375, + 546, + 472 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 304, + 375, + 546, + 472 + ], + "type": "text", + "content": ", and an upsampled MLP layer " + }, + { + "bbox": [ + 304, + 375, + 546, + 472 + ], + "type": "inline_equation", + "content": "W_{up} \\in \\mathbb{R}^{d \\times d}" + }, + { + "bbox": [ + 304, + 375, + 546, + 472 + ], + "type": "text", + "content": ". Denote the input as " + }, + { + "bbox": [ + 304, + 375, + 546, + 472 + ], + "type": "inline_equation", + "content": "x_{i}" + }, + { + "bbox": [ + 304, + 375, + 546, + 472 + ], + "type": "text", + "content": ", we formalize the adapter as" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 347, + 479, + 545, + 493 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 347, + 479, + 545, + 493 + ], + "spans": [ + { + "bbox": [ + 347, + 479, + 545, + 493 + ], + "type": "interline_equation", + "content": "o u t = x _ {i} + s \\cdot \\sigma \\left(x _ {i} * W _ {\\text {d o w n}}\\right) * W _ {u p}, \\tag {1}", + "image_path": "2b291edd69939669e39d5e2adce922a278c675d055a5c5b2ec883c513503145e.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 494, + 545, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 494, + 545, + 517 + ], + "spans": [ + { + "bbox": [ + 304, + 494, + 545, + 517 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 494, + 545, + 517 + ], + "type": "inline_equation", + "content": "*" + }, + { + "bbox": [ + 304, + 494, + 545, + 517 + ], + "type": "text", + "content": " stands for the matrix multiplication, " + }, + { + "bbox": [ + 304, + 494, + 545, + 517 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 304, + 494, + 545, + 517 + ], + "type": "text", + "content": " denotes the activation function RELU, and " + }, + { + "bbox": [ + 304, + 494, + 545, + 517 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 304, + 494, + 545, + 517 + ], + "type": "text", + "content": " denotes the scale factor." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 518, + 545, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 518, + 545, + 578 + ], + "spans": [ + { + "bbox": [ + 304, + 518, + 545, + 578 + ], + "type": "text", + "content": "Parameter-efficient tuning with SSF: SSF [22] modulates pre-trained models using scale and shift factors to align the feature distribution of downstream tasks. SSF inserts its layers in each transformer operation. Suppose " + }, + { + "bbox": [ + 304, + 518, + 545, + 578 + ], + "type": "inline_equation", + "content": "x_{i}" + }, + { + "bbox": [ + 304, + 518, + 545, + 578 + ], + "type": "text", + "content": " is the output of one of the modules, SSF can be represented as" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 390, + 586, + 545, + 598 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 390, + 586, + 545, + 598 + ], + "spans": [ + { + "bbox": [ + 390, + 586, + 545, + 598 + ], + "type": "interline_equation", + "content": "y = \\gamma \\odot x _ {i} + \\beta , \\tag {2}", + "image_path": "856038103e351725e9305b51766315117694afb41f610b16c52cb898655625d7.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 605, + 545, + 630 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 605, + 545, + 630 + ], + "spans": [ + { + "bbox": [ + 304, + 605, + 545, + 630 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 605, + 545, + 630 + ], + "type": "inline_equation", + "content": "\\gamma \\in \\mathbb{R}^d" + }, + { + "bbox": [ + 304, + 605, + 545, + 630 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 605, + 545, + 630 + ], + "type": "inline_equation", + "content": "\\beta \\in \\mathbb{R}^d" + }, + { + "bbox": [ + 304, + 605, + 545, + 630 + ], + "type": "text", + "content": " denote the scale and shift factor, respectively. " + }, + { + "bbox": [ + 304, + 605, + 545, + 630 + ], + "type": "inline_equation", + "content": "\\odot" + }, + { + "bbox": [ + 304, + 605, + 545, + 630 + ], + "type": "text", + "content": " stands for Hadamard product." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 630, + 546, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 630, + 546, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 630, + 546, + 715 + ], + "type": "text", + "content": "Parameter-efficient tuning with VPT: Visual Prompt Tuning (VPT) inserts a small number of trainable parameters in the input space after the embedding layer [15]. It is called prompts and only these parameters will be updated in the fine-tuning process. Depending on the number of layers inserted, VPT can be categorized as VPT-shallow and VPT-deep. Suppose " + }, + { + "bbox": [ + 304, + 630, + 546, + 715 + ], + "type": "inline_equation", + "content": "P = \\{p^k \\in R^d | 1 \\leq k \\leq n\\}" + }, + { + "bbox": [ + 304, + 630, + 546, + 715 + ], + "type": "text", + "content": " and the input" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "23254" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 240, + 83 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 240, + 83 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 240, + 83 + ], + "type": "text", + "content": "embedding is " + }, + { + "bbox": [ + 47, + 72, + 240, + 83 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 47, + 72, + 240, + 83 + ], + "type": "text", + "content": ", VPT will combine " + }, + { + "bbox": [ + 47, + 72, + 240, + 83 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 47, + 72, + 240, + 83 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 47, + 72, + 240, + 83 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 47, + 72, + 240, + 83 + ], + "type": "text", + "content": " as" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 141, + 94, + 287, + 107 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 94, + 287, + 107 + ], + "spans": [ + { + "bbox": [ + 141, + 94, + 287, + 107 + ], + "type": "interline_equation", + "content": "x ^ {\\prime} = [ x, P ], \\tag {3}", + "image_path": "3f67b2b7ea7f021edac1bb522608a6b33f26ca5da5941e18ea2fbe16592774ed.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 111, + 287, + 136 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 111, + 287, + 136 + ], + "spans": [ + { + "bbox": [ + 47, + 111, + 287, + 136 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 111, + 287, + 136 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 47, + 111, + 287, + 136 + ], + "type": "text", + "content": " is the number of prompts and the " + }, + { + "bbox": [ + 47, + 111, + 287, + 136 + ], + "type": "inline_equation", + "content": "x'" + }, + { + "bbox": [ + 47, + 111, + 287, + 136 + ], + "type": "text", + "content": " will be passed into subsequent blocks." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 144, + 287, + 157 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 144, + 287, + 157 + ], + "spans": [ + { + "bbox": [ + 47, + 144, + 287, + 157 + ], + "type": "text", + "content": "3.2. Adapter-tuning without parameter constraints" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 163, + 287, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 163, + 287, + 425 + ], + "spans": [ + { + "bbox": [ + 46, + 163, + 287, + 425 + ], + "type": "text", + "content": "Most of the work based on pre-trained models focuses on how to apply the prompt-tuning strategies to the CIL paradigm. However, tuning the same prompt parameters across each learning session will cause catastrophic forgetting. As shown in Fig. 1, when progressively training the shared extra module while keeping the pre-trained model fixed, the adapter demonstrates its superiority over other tuning methods such as prompt-tuning and SSF. Fine-tuning the shared adapter incrementally seems to well balance the learning of new classes and old-knowledge retaining. Based on this observation, we delve deeper into incremental adapter tuning and use it as our baseline. The whole framework of the proposed method is shown in Fig. 2. Some methods [25, 47] adopt the first-session adaption and then fix the backbone. In addition, previous methods often utilize knowledge distillation [12] (KD) loss to restrict parameter changes of the feature extractor to mitigate forgetting. Totally different from earlier methods [17, 21, 28], we propose that the shared adapter should be tuned incrementally without parameter constraints. Next, we will provide a detailed description of the proposed baseline and offer a reasonable explanation and analysis." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 426, + 287, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 426, + 287, + 521 + ], + "spans": [ + { + "bbox": [ + 46, + 426, + 287, + 521 + ], + "type": "text", + "content": "Implementation of adapter-based baselines: During incremental training sessions, only adapter and classifier layers are updated, and the pre-trained ViT model is frozen. As the cosine classifier has shown great success in CIL, we follow ALICE [26] to use the cosine classifier with a margin. The margin hyper-parameter could also be used as a balance factor to decide the learning and retaining. The training loss can be formulated as follows:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 523, + 287, + 567 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 523, + 287, + 567 + ], + "spans": [ + { + "bbox": [ + 55, + 523, + 287, + 567 + ], + "type": "interline_equation", + "content": "\\mathcal {L} ^ {t} = - \\frac {1}{N ^ {t}} \\sum_ {j = 1} ^ {N ^ {t}} \\log \\frac {e ^ {s \\left(\\cos \\theta_ {j} ^ {i} - m\\right)}}{e ^ {s \\left(\\cos \\theta_ {j} ^ {i} - m\\right)} + \\sum_ {c = 1} ^ {Y (t) - \\{i \\}} e ^ {s \\left(\\cos \\theta_ {j} ^ {c}\\right)}} \\tag {4}", + "image_path": "1b195f3c7789800da104069926318a700cd45507240051e40a060202bf649114.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 567, + 287, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 567, + 287, + 605 + ], + "spans": [ + { + "bbox": [ + 47, + 567, + 287, + 605 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 567, + 287, + 605 + ], + "type": "inline_equation", + "content": "\\cos\\theta_{j}^{i} = \\frac{w_{i}*f_{j}}{||w_{i}||*||f_{j}||}" + }, + { + "bbox": [ + 47, + 567, + 287, + 605 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 47, + 567, + 287, + 605 + ], + "type": "inline_equation", + "content": "N^t" + }, + { + "bbox": [ + 47, + 567, + 287, + 605 + ], + "type": "text", + "content": " denotes the number of training samples of the current session, " + }, + { + "bbox": [ + 47, + 567, + 287, + 605 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 47, + 567, + 287, + 605 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 567, + 287, + 605 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 47, + 567, + 287, + 605 + ], + "type": "text", + "content": " represent the scale factor and margin factor, respectively." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 605, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 605, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 605, + 287, + 714 + ], + "type": "text", + "content": "As we do not retain any image samples, the gradients computed during the optimization of current samples not only affect the newly trained classifiers but also have an impact on the previously learned classifiers. The forgetting of the classifier is significant when no samples are retained. Thus, we follow previous work [8, 36, 45] to adopt the local training loss where we only compute the loss between current logits and labels and hinder the gradient updates of the previous classifier which alleviates the classifier forgetting." + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 307, + 72, + 425, + 170 + ], + "blocks": [ + { + "bbox": [ + 307, + 72, + 425, + 170 + ], + "lines": [ + { + "bbox": [ + 307, + 72, + 425, + 170 + ], + "spans": [ + { + "bbox": [ + 307, + 72, + 425, + 170 + ], + "type": "image", + "image_path": "ac25e42f8d7773a6e5121ab88de386b47d286589750d72ecca9801b378168139.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 171, + 545, + 204 + ], + "lines": [ + { + "bbox": [ + 305, + 171, + 545, + 204 + ], + "spans": [ + { + "bbox": [ + 305, + 171, + 545, + 204 + ], + "type": "text", + "content": "Figure 3. Comparison of the performance on ImageNetR dataset with different extent of parameter constraints. Left: The overall accuracy of each session. Right: The accuracy of new classes." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 425, + 72, + 541, + 170 + ], + "blocks": [ + { + "bbox": [ + 425, + 72, + 541, + 170 + ], + "lines": [ + { + "bbox": [ + 425, + 72, + 541, + 170 + ], + "spans": [ + { + "bbox": [ + 425, + 72, + 541, + 170 + ], + "type": "image", + "image_path": "75f5180e732ccfbe142d9d19964860bfccf8aa09cca2f33bd8a160c3c39dedc4.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 307, + 206, + 427, + 308 + ], + "blocks": [ + { + "bbox": [ + 307, + 206, + 427, + 308 + ], + "lines": [ + { + "bbox": [ + 307, + 206, + 427, + 308 + ], + "spans": [ + { + "bbox": [ + 307, + 206, + 427, + 308 + ], + "type": "image", + "image_path": "29a1640ab22aa0138ffadfc5931fc33b54b611ac45c2e33a46449ded5a59439a.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 309, + 544, + 342 + ], + "lines": [ + { + "bbox": [ + 305, + 309, + 544, + 342 + ], + "spans": [ + { + "bbox": [ + 305, + 309, + 544, + 342 + ], + "type": "text", + "content": "Figure 4. Parameter sensitivity analysis on the ImageNetR dataset. Left: The parameter sensitiveness of two incremental tasks. Right: The sensitiveness of different parameters in one task." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 427, + 207, + 542, + 307 + ], + "blocks": [ + { + "bbox": [ + 427, + 207, + 542, + 307 + ], + "lines": [ + { + "bbox": [ + 427, + 207, + 542, + 307 + ], + "spans": [ + { + "bbox": [ + 427, + 207, + 542, + 307 + ], + "type": "image", + "image_path": "9efc944cb5097dbecb40a0fc2adc8dc87415b8a5d0a488d8589e8ecbc9609644.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 346, + 545, + 393 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 346, + 545, + 393 + ], + "spans": [ + { + "bbox": [ + 305, + 346, + 545, + 393 + ], + "type": "text", + "content": "Analysis of the adapter-based baseline: We will analyze why the adapter shows its superiority in the CIL over other PET methods, and why we choose to incrementally tune the shared adapter without parameter constraints." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 394, + 546, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 394, + 546, + 620 + ], + "spans": [ + { + "bbox": [ + 304, + 394, + 546, + 620 + ], + "type": "text", + "content": "First, we elaborate on why incrementally tuning the adapter is better in the context of CIL. By utilizing the residual structure, the adapter can retain the generalization capabilities from the pre-trained model while adapting to new tasks. The incremental tuning of the adapter exhibits a cumulative learning capability, where the representational capacity of the adapter is further enhanced as the learning sessions progress. In contrast, both SSF and prompt tuning have limitations when it comes to handling CIL. These methods suffer from overfitting to the current distribution. When the shared parameters excessively overfit each current task, the model gradually loses its generalization ability which is harmful for training a unified model for CIL. Then, we try to utilize KD loss to implicitly limit parameter updates and adjust the weighting factor. As shown in Fig. 3, the results demonstrate that unconstrained training is more beneficial for new-classes learning and improving overall performance. Based on this observation, we propose our proposition from the perspective of parameter sensitivity." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 305, + 621, + 545, + 656 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 621, + 545, + 656 + ], + "spans": [ + { + "bbox": [ + 305, + 621, + 545, + 656 + ], + "type": "text", + "content": "Proposition 1: Confining the change of parameters of previous tasks hinders the plasticity of new classes due to the similarity of parameter sensitivity among tasks." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 305, + 657, + 545, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 657, + 545, + 693 + ], + "spans": [ + { + "bbox": [ + 305, + 657, + 545, + 693 + ], + "type": "text", + "content": "Proof: Given the parameter set " + }, + { + "bbox": [ + 305, + 657, + 545, + 693 + ], + "type": "inline_equation", + "content": "\\theta = \\{\\theta_1, \\theta_2, \\dots, \\theta_N\\}" + }, + { + "bbox": [ + 305, + 657, + 545, + 693 + ], + "type": "text", + "content": " and training set " + }, + { + "bbox": [ + 305, + 657, + 545, + 693 + ], + "type": "inline_equation", + "content": "D_t = (X_t, Y_t)" + }, + { + "bbox": [ + 305, + 657, + 545, + 693 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 305, + 657, + 545, + 693 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 305, + 657, + 545, + 693 + ], + "type": "text", + "content": "-th session, the definition of parameter sensitivity [9, 47] is defined as" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 353, + 700, + 545, + 715 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 353, + 700, + 545, + 715 + ], + "spans": [ + { + "bbox": [ + 353, + 700, + 545, + 715 + ], + "type": "interline_equation", + "content": "s _ {i} ^ {t} = \\mathcal {L} \\left(X _ {t}, Y _ {t} \\mid \\theta_ {i}\\right) - \\mathcal {L} \\left(X _ {t}, Y _ {t} \\mid \\theta_ {i} ^ {*}\\right), \\tag {5}", + "image_path": "3a01ff1ae2a1bf88a1c1ffcbc786ab63c49c733947a04eae078f27e3682a1bfe.jpg" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "text", + "content": "23255" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 287, + 107 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 287, + 107 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 287, + 107 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 72, + 287, + 107 + ], + "type": "inline_equation", + "content": "\\theta_{i}^{*} = \\theta_{i} + \\Delta \\theta_{i}" + }, + { + "bbox": [ + 47, + 72, + 287, + 107 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 72, + 287, + 107 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 47, + 72, + 287, + 107 + ], + "type": "text", + "content": " denotes the optimized loss in the classification task. We use the first-order Taylor expansion, and the parameter sensitivity can be rewritten as follows:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 110, + 287, + 135 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 110, + 287, + 135 + ], + "spans": [ + { + "bbox": [ + 106, + 110, + 287, + 135 + ], + "type": "interline_equation", + "content": "s _ {i} = - g _ {i} \\Delta \\theta_ {i} = - \\frac {\\delta \\mathcal {L}}{\\delta \\theta_ {i}} * \\Delta \\theta_ {i}, \\tag {6}", + "image_path": "b284f53b84b6bff0fd5b0fb5b3b3934109d5b5afe667a817988a2287d4dcba70.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 143, + 288, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 143, + 288, + 289 + ], + "spans": [ + { + "bbox": [ + 46, + 143, + 288, + 289 + ], + "type": "text", + "content": "as " + }, + { + "bbox": [ + 46, + 143, + 288, + 289 + ], + "type": "inline_equation", + "content": "\\Delta \\theta_{i}" + }, + { + "bbox": [ + 46, + 143, + 288, + 289 + ], + "type": "text", + "content": " denotes the update after the training process, we follow the work [9] to use the one-step update to approximate the " + }, + { + "bbox": [ + 46, + 143, + 288, + 289 + ], + "type": "inline_equation", + "content": "\\Delta \\theta_{i} = \\epsilon \\frac{\\delta\\mathcal{L}}{\\delta\\theta_{i}}" + }, + { + "bbox": [ + 46, + 143, + 288, + 289 + ], + "type": "text", + "content": ". Therefore, the parameter can be approximately computed as " + }, + { + "bbox": [ + 46, + 143, + 288, + 289 + ], + "type": "inline_equation", + "content": "s_i \\approx -\\epsilon \\left(\\frac{\\delta\\mathcal{L}}{\\delta\\theta_i}\\right)^2" + }, + { + "bbox": [ + 46, + 143, + 288, + 289 + ], + "type": "text", + "content": ". As shown in Fig. 4, the sensitivity values of tuning parameters for two different sessions are nearly equal and the most sensitive parameters are always up weights. This means that constraining the parameter update would hinder the learning of new classes and further impede the ability of the model for continual learning. Furthermore, in the experimental section, we demonstrate the representative capacity of the adapter continued to strengthen through incremental tuning." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 296, + 287, + 309 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 296, + 287, + 309 + ], + "spans": [ + { + "bbox": [ + 47, + 296, + 287, + 309 + ], + "type": "text", + "content": "3.3. Semantic shift estimation without past samples" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 314, + 287, + 493 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 314, + 287, + 493 + ], + "spans": [ + { + "bbox": [ + 46, + 314, + 287, + 493 + ], + "type": "text", + "content": "Due to the selective updating of classifiers corresponding to the current task during training, the classifiers across different learning sessions are not fully aligned in the same feature space. To further optimize classifiers, we store the prototypes after training the backbone and local classifier. However, as the backbone is trained incrementally with new classes, the feature distribution of old classes undergoes changes. Retraining the classifier with the previous prototypes is sub-optimal. Since the feature representability of the backbone updates over time, using outdated features may not effectively retrain a unified classifier. To solve this problem, we update the feature distribution of old classes by computing the semantic shift over the learning process. We follow SDC [42] to estimate the semantic shift of old prototypes without access to past samples." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 494, + 287, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 494, + 287, + 553 + ], + "spans": [ + { + "bbox": [ + 47, + 494, + 287, + 553 + ], + "type": "text", + "content": "Suppose " + }, + { + "bbox": [ + 47, + 494, + 287, + 553 + ], + "type": "inline_equation", + "content": "\\varphi_c^t" + }, + { + "bbox": [ + 47, + 494, + 287, + 553 + ], + "type": "text", + "content": " denotes the prototype of category " + }, + { + "bbox": [ + 47, + 494, + 287, + 553 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 47, + 494, + 287, + 553 + ], + "type": "text", + "content": " in session " + }, + { + "bbox": [ + 47, + 494, + 287, + 553 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 47, + 494, + 287, + 553 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 494, + 287, + 553 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 47, + 494, + 287, + 553 + ], + "type": "text", + "content": " is the learning session that the category belongs to. We have no access to the samples of category " + }, + { + "bbox": [ + 47, + 494, + 287, + 553 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 47, + 494, + 287, + 553 + ], + "type": "text", + "content": " to update the prototype in session " + }, + { + "bbox": [ + 47, + 494, + 287, + 553 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 47, + 494, + 287, + 553 + ], + "type": "text", + "content": " (when " + }, + { + "bbox": [ + 47, + 494, + 287, + 553 + ], + "type": "inline_equation", + "content": "t > r" + }, + { + "bbox": [ + 47, + 494, + 287, + 553 + ], + "type": "text", + "content": "). The semantic shift of class " + }, + { + "bbox": [ + 47, + 494, + 287, + 553 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 47, + 494, + 287, + 553 + ], + "type": "text", + "content": " between two sessions can be represented as" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 71, + 556, + 287, + 590 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 556, + 287, + 590 + ], + "spans": [ + { + "bbox": [ + 71, + 556, + 287, + 590 + ], + "type": "interline_equation", + "content": "\\Delta_ {c} ^ {r \\rightarrow t} = \\varphi_ {c} ^ {t} - \\varphi_ {c} ^ {r}, \\quad \\varphi_ {c} ^ {r} = \\frac {1}{N _ {r} ^ {c}} \\sum_ {n = 1} ^ {N _ {r} ^ {c}} \\mathcal {F} \\left(X _ {r} ^ {c}, \\theta_ {r}\\right). \\tag {7}", + "image_path": "3ac286a9a2502006827c97b0f67e61d3a69789720ea26ce157dce6267f08378f.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 598, + 287, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 598, + 287, + 647 + ], + "spans": [ + { + "bbox": [ + 47, + 598, + 287, + 647 + ], + "type": "text", + "content": "While we do not have access to data from the old class " + }, + { + "bbox": [ + 47, + 598, + 287, + 647 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 47, + 598, + 287, + 647 + ], + "type": "text", + "content": ", we can only estimate the shift of current task categories on old and new models. The semantic shift of current samples between two sessions can be represented as" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 123, + 654, + 287, + 669 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 654, + 287, + 669 + ], + "spans": [ + { + "bbox": [ + 123, + 654, + 287, + 669 + ], + "type": "interline_equation", + "content": "\\delta_ {i} ^ {t - 1 \\rightarrow t} = e _ {i} ^ {t} - e _ {i} ^ {t - 1}, \\tag {8}", + "image_path": "93d2391e131c0350c2fdcab609d325357667069e929b0e6ff0a1ddc2423d42a9.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "type": "inline_equation", + "content": "e" + }, + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "type": "text", + "content": " denotes the embedding of one sample in the current task " + }, + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "type": "text", + "content": ". We can compute " + }, + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "type": "inline_equation", + "content": "e_i^{t - 1}" + }, + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "type": "text", + "content": " at the start of the current task with the model trained in task " + }, + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "type": "inline_equation", + "content": "t - 1" + }, + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "type": "text", + "content": ". After training" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 71, + 545, + 97 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 71, + 545, + 97 + ], + "spans": [ + { + "bbox": [ + 306, + 71, + 545, + 97 + ], + "type": "text", + "content": "on the new task, we compute " + }, + { + "bbox": [ + 306, + 71, + 545, + 97 + ], + "type": "inline_equation", + "content": "\\delta_i^{t - 1\\to t}" + }, + { + "bbox": [ + 306, + 71, + 545, + 97 + ], + "type": "text", + "content": " and use it to estimate " + }, + { + "bbox": [ + 306, + 71, + 545, + 97 + ], + "type": "inline_equation", + "content": "\\Delta_c^{t - 1\\to t}" + }, + { + "bbox": [ + 306, + 71, + 545, + 97 + ], + "type": "text", + "content": ". We compute the shift as" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 358, + 100, + 545, + 148 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 358, + 100, + 545, + 148 + ], + "spans": [ + { + "bbox": [ + 358, + 100, + 545, + 148 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\widetilde {\\Delta} _ {c} ^ {t - 1 \\rightarrow t} = \\frac {\\sum \\alpha_ {i} \\delta_ {i} ^ {t - 1 \\rightarrow t}}{\\sum \\alpha_ {i}}, c \\notin C ^ {t}, \\tag {9} \\\\ \\alpha_ {i} = \\mathbf {e} ^ {- \\frac {| | e _ {i} ^ {t - 1} - \\varphi_ {c} ^ {t - 1} | |}{2 \\sigma^ {2}}}, \\\\ \\end{array}", + "image_path": "e8df10ce30dab4dc5f0d3598a6aa418e5d185e568d4fcdef03411e6a5216820a.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 159, + 545, + 195 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 159, + 545, + 195 + ], + "spans": [ + { + "bbox": [ + 305, + 159, + 545, + 195 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 159, + 545, + 195 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 305, + 159, + 545, + 195 + ], + "type": "text", + "content": " is the standard deviation of the distribution of class " + }, + { + "bbox": [ + 305, + 159, + 545, + 195 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 305, + 159, + 545, + 195 + ], + "type": "text", + "content": "; " + }, + { + "bbox": [ + 305, + 159, + 545, + 195 + ], + "type": "inline_equation", + "content": "C^t" + }, + { + "bbox": [ + 305, + 159, + 545, + 195 + ], + "type": "text", + "content": " denotes classes learned in the current session. Before retraining the classifier, we update the prototypes with" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 348, + 202, + 545, + 231 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 348, + 202, + 545, + 231 + ], + "spans": [ + { + "bbox": [ + 348, + 202, + 545, + 231 + ], + "type": "interline_equation", + "content": "\\left\\{\\begin{array}{l l}\\varphi_ {c} = \\varphi_ {c} ^ {t - 1} + \\widetilde {\\Delta} _ {c} ^ {t - 1 \\rightarrow t}&, c \\notin C ^ {t}\\\\\\varphi_ {c} = \\frac {1}{N _ {c}} \\sum_ {i} e _ {c}&, c \\in C ^ {t},\\end{array}\\right. \\tag {10}", + "image_path": "826ec11dc54661d57d17dc4a76f07641a7c9aef45d270a68dead467a49de8a62.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 236, + 511, + 248 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 236, + 511, + 248 + ], + "spans": [ + { + "bbox": [ + 306, + 236, + 511, + 248 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 306, + 236, + 511, + 248 + ], + "type": "inline_equation", + "content": "N_{c}" + }, + { + "bbox": [ + 306, + 236, + 511, + 248 + ], + "type": "text", + "content": " denotes the number of images in class " + }, + { + "bbox": [ + 306, + 236, + 511, + 248 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 306, + 236, + 511, + 248 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 257, + 447, + 270 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 257, + 447, + 270 + ], + "spans": [ + { + "bbox": [ + 306, + 257, + 447, + 270 + ], + "type": "text", + "content": "3.4. Unified classifier training" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 305, + 276, + 545, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 276, + 545, + 491 + ], + "spans": [ + { + "bbox": [ + 305, + 276, + 545, + 491 + ], + "type": "text", + "content": "Previous work [32, 45, 54] has attempted to retrain a unified classifier by modeling each class as a Gaussian distribution and sampling features from the distribution. We refer to this method as classifier alignment (CA) and adopt a similar approach that incorporates semantic shift estimation, which we denote as SSCA. Specifically, we compute the class prototypes " + }, + { + "bbox": [ + 305, + 276, + 545, + 491 + ], + "type": "inline_equation", + "content": "P_{c} = \\{\\varphi_{1},\\dots,\\varphi_{C}\\}" + }, + { + "bbox": [ + 305, + 276, + 545, + 491 + ], + "type": "text", + "content": " and covariance " + }, + { + "bbox": [ + 305, + 276, + 545, + 491 + ], + "type": "inline_equation", + "content": "\\Sigma_{c} = \\{\\varsigma_{1},\\dots,\\varsigma_{C}\\}" + }, + { + "bbox": [ + 305, + 276, + 545, + 491 + ], + "type": "text", + "content": " for each class after training process in each learning session. The calculation of class prototypes is based on Eq. 10. Due to the capability of the trained backbone network to provide well-distributed representations, each class exhibits an unimodal distribution. Therefore, we form a normal distribution " + }, + { + "bbox": [ + 305, + 276, + 545, + 491 + ], + "type": "inline_equation", + "content": "\\mathcal{N}(\\mu_c,\\Sigma_c)" + }, + { + "bbox": [ + 305, + 276, + 545, + 491 + ], + "type": "text", + "content": " for each class with class prototype and variance. We sample features " + }, + { + "bbox": [ + 305, + 276, + 545, + 491 + ], + "type": "inline_equation", + "content": "\\mathcal{V}_c = \\{v_{c,1},\\dots v_{c,S_n}\\}" + }, + { + "bbox": [ + 305, + 276, + 545, + 491 + ], + "type": "text", + "content": " from the distribution to obtain diverse samples, where " + }, + { + "bbox": [ + 305, + 276, + 545, + 491 + ], + "type": "inline_equation", + "content": "S_{n}" + }, + { + "bbox": [ + 305, + 276, + 545, + 491 + ], + "type": "text", + "content": " is the number of the sample features for each class. Then, we use these features to train classification layers " + }, + { + "bbox": [ + 305, + 276, + 545, + 491 + ], + "type": "inline_equation", + "content": "\\theta_{cls}" + }, + { + "bbox": [ + 305, + 276, + 545, + 491 + ], + "type": "text", + "content": " with a commonly used cross-entropy loss as" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 328, + 495, + 545, + 528 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 328, + 495, + 545, + 528 + ], + "spans": [ + { + "bbox": [ + 328, + 495, + 545, + 528 + ], + "type": "interline_equation", + "content": "\\mathcal {L} \\left(\\theta_ {c l s}, \\mathcal {V} _ {c}\\right) = - \\sum_ {i = 1} ^ {S _ {n} * C} \\log \\frac {\\mathbf {e} ^ {\\left(\\theta_ {c l s} ^ {j} \\left(v _ {i}\\right)\\right)}}{\\sum_ {k \\in C} \\mathbf {e} ^ {\\left(\\theta_ {c l s} ^ {k} \\left(v _ {i}\\right)\\right)}}, \\tag {11}", + "image_path": "358edfa2887d6e5e250bfc52f7f5072ee8bc0b06017bc072b576cbe17e687549.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 305, + 530, + 545, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 530, + 545, + 555 + ], + "spans": [ + { + "bbox": [ + 305, + 530, + 545, + 555 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 530, + 545, + 555 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 305, + 530, + 545, + 555 + ], + "type": "text", + "content": " denotes all classes learned so far. We normalize the features and classifier the same as backbone training." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 306, + 567, + 387, + 580 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 567, + 387, + 580 + ], + "spans": [ + { + "bbox": [ + 306, + 567, + 387, + 580 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 306, + 586, + 488, + 598 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 586, + 488, + 598 + ], + "spans": [ + { + "bbox": [ + 306, + 586, + 488, + 598 + ], + "type": "text", + "content": "4.1. Datasets and Evaluation Protocols" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 305, + 605, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 605, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 605, + 545, + 713 + ], + "type": "text", + "content": "Dataset: We evaluate our method on four commonly-used CIL benchmarks and one cross-domain CIL dataset. We randomly split the dataset into 10 or 20 learning tasks. CIFAR100 [18] is a widely used dataset in CIL which consists of 60000 images, belonging to 100 different categories. CUB200 [33] is a dataset that contains approximately 11,788 images of 200 bird species with fine-grained class labels. Additionally, we also follow recent work [45, 50] to use the other three datasets which have a large" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "type": "text", + "content": "23256" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 56, + 70, + 539, + 220 + ], + "blocks": [ + { + "bbox": [ + 56, + 70, + 539, + 220 + ], + "lines": [ + { + "bbox": [ + 56, + 70, + 539, + 220 + ], + "spans": [ + { + "bbox": [ + 56, + 70, + 539, + 220 + ], + "type": "table", + "html": "
MethodParamsSplit-ImageNetRSplit-ImageNetACUB200CIFAR100
\\( A_{Last} \\uparrow \\)\\( A_{Avg} \\uparrow \\)\\( A_{Last} \\uparrow \\)\\( A_{Avg} \\uparrow \\)\\( A_{Last} \\uparrow \\)\\( A_{avg} \\uparrow \\)\\( A_{Last} \\uparrow \\)\\( A_{avg} \\uparrow \\)
Joint86M81.72±0.35-50.56±1.75-88.17±0.32-89.71±0.07-
FT86M20.93±0.8640.35±0.746.03±4.7416.57±5.822.05±1.6945.67±2.0422.17±1.0941.83±1.60
SLCA [45]86M79.35±0.2883.29±0.4661.05±0.6368.88±2.3184.68±0.0990.77±0.7991.26±0.3794.29±0.92
Adam-adapter [50]1.19M65.79±0.9872.42±1.4148.81±0.0858.84±1.3785.84±0.0891.33±0.4987.29±0.2791.21±1.33
Adam-ssf [50]0.2M66.61±0.0974.36±1.0048.94±0.1458.79±2.8285.67±0.1590.99±0.7685.27±0.2189.90±0.98
Adam-prompt [50]0.04M65.29±1.5272.97±0.5629.29±7.4239.14±7.5985.28±0.4790.89±0.8685.04±1.0489.49±0.58
LAE [8]0.19M72.29±0.1477.99±0.4647.18±1.1758.15±0.7380.97±0.5187.22±1.2185.25±0.4389.80±1.20
L2P [38]0.04M72.34±0.1777.36±0.6444.04±0.9351.24±2.2667.02±1.9079.62±1.6084.06±0.8888.26±1.34
ADA [6]1.19M73.76±0.2779.57±0.8450.16±0.2059.43±2.2076.13±0.9485.74±0.2688.25±0.2691.85±1.32
DualPrompt [37]0.25M69.10±0.6274.28±0.6653.19±0.7464.59±0.0868.48±0.4780.59±1.5086.93±0.2491.13±0.32
CODAPrompt [31]3.84M73.31±0.5078.47±0.5352.08±0.1263.92±0.1277.23±1.1281.90±0.8583.21±3.3987.71±3.17
SSIAT (Ours)1.19M79.38±0.5983.63±0.4362.43±1.6370.83±1.6388.75±0.3893.00±0.9091.35±0.2694.35±0.60
", + "image_path": "552694258e6782543edf674b329e17b72e22f00b6216319331081807893685cb.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 53, + 233, + 175, + 335 + ], + "blocks": [ + { + "bbox": [ + 53, + 233, + 175, + 335 + ], + "lines": [ + { + "bbox": [ + 53, + 233, + 175, + 335 + ], + "spans": [ + { + "bbox": [ + 53, + 233, + 175, + 335 + ], + "type": "image", + "image_path": "c65dbad5c97f094610f8f85bf7d9c4dcffc02992b9ce62a69a4f09322a0603c3.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 336, + 546, + 358 + ], + "lines": [ + { + "bbox": [ + 46, + 336, + 546, + 358 + ], + "spans": [ + { + "bbox": [ + 46, + 336, + 546, + 358 + ], + "type": "text", + "content": "Figure 5. The performance of each learning session on four datasets. (a) ImageNetR; (b) ImageNetA; (c) CUB200; (d) CIFAR100. These curves are plotted by calculating the average performance across three different seeds for each incremental session." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 173, + 233, + 296, + 335 + ], + "blocks": [ + { + "bbox": [ + 173, + 233, + 296, + 335 + ], + "lines": [ + { + "bbox": [ + 173, + 233, + 296, + 335 + ], + "spans": [ + { + "bbox": [ + 173, + 233, + 296, + 335 + ], + "type": "image", + "image_path": "6f3089f711c6abdb9a01251408912e42063b6e283306d0c5749cd35e4a860cc0.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 296, + 233, + 417, + 335 + ], + "blocks": [ + { + "bbox": [ + 296, + 233, + 417, + 335 + ], + "lines": [ + { + "bbox": [ + 296, + 233, + 417, + 335 + ], + "spans": [ + { + "bbox": [ + 296, + 233, + 417, + 335 + ], + "type": "image", + "image_path": "da6c6bfa4fdbc1f75ccd7f2b7fc8fc253efa4bea5fd72daf44b1de72386be979.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 417, + 233, + 539, + 335 + ], + "blocks": [ + { + "bbox": [ + 417, + 233, + 539, + 335 + ], + "lines": [ + { + "bbox": [ + 417, + 233, + 539, + 335 + ], + "spans": [ + { + "bbox": [ + 417, + 233, + 539, + 335 + ], + "type": "image", + "image_path": "07aead82b1259ae870c4fcbfc8fe05b7051d0987697a8a32e0755c5215962f6c.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 50, + 369, + 288, + 470 + ], + "blocks": [ + { + "bbox": [ + 56, + 222, + 536, + 232 + ], + "lines": [ + { + "bbox": [ + 56, + 222, + 536, + 232 + ], + "spans": [ + { + "bbox": [ + 56, + 222, + 536, + 232 + ], + "type": "text", + "content": "Table 1. Experimental results on four CIL benchmarks. All other methods are reproduced using the same seeds for a fair comparison." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 50, + 369, + 288, + 470 + ], + "lines": [ + { + "bbox": [ + 50, + 369, + 288, + 470 + ], + "spans": [ + { + "bbox": [ + 50, + 369, + 288, + 470 + ], + "type": "table", + "html": "
MethodImageNetRImageNetA
\\( A_{Last} \\uparrow \\)\\( A_{Avg} \\uparrow \\)\\( A_{Last} \\uparrow \\)\\( A_{Avg} \\uparrow \\)
SLCA [45]74.63±1.5579.92±1.2936.69±21.3156.35±7.09
Adam-adapter[50]57.42±0.8464.75±0.7948.65±0.1259.55±1.07
Adam-ssf[50]64.30±0.9472.42±1.4747.27±4.3458.36±4.70
Adam-prompt[50]59.90±1.1368.02±1.0229.93±4.8839.13±4.19
LAE [8]69.86±0.4377.38±0.6139.52±0.7851.75±2.15
L2P [38]69.64±0.4275.28±0.5740.48±1.7849.62±1.46
DualPrompt [37]66.61±0.5872.45±0.3742.28±1.9453.39±1.64
CODAPrompt [31]69.96±0.5075.34±0.8544.62±1.9254.86±0.50
SSIAT (Ours)75.67±0.1482.30±0.3659.16±1.0368.45±1.92
", + "image_path": "8eb4e21dc97033e3636f804767fb91802d66bfb5a36f4d3b74e9aefb9f3b8eff.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 473, + 287, + 496 + ], + "lines": [ + { + "bbox": [ + 46, + 473, + 287, + 496 + ], + "spans": [ + { + "bbox": [ + 46, + 473, + 287, + 496 + ], + "type": "text", + "content": "Table 2. Experimental results for long-sequences (20 incremental sessions) on ImageNetR and ImageNetA dataset." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 46, + 506, + 287, + 626 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 506, + 287, + 626 + ], + "spans": [ + { + "bbox": [ + 46, + 506, + 287, + 626 + ], + "type": "text", + "content": "domain gap with pre-training data. ImageNetR [10] consists of 30,000 images with 200 categories. Although its categories overlap with ImageNet-21K [29], the images belong to a different domain. ImageNetA [11] is a real-world dataset that consists of 200 categories. This dataset exhibits significant class imbalance, with some categories having only a few training samples. VTAB [44] is a complex dataset that consists of 19 tasks covering a broad spectrum of domains and semantics. We follow previous work [50] to select 5 tasks to construct a cross-domain CIL dataset." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "type": "text", + "content": "Implementation details: We use ViT-B/16 [5] as the pre-trained model, which is pre-trained on ImageNet-21K [29]. The initial learning rate is set as 0.01 and we use the cosine Anneal scheduler. In our experiments, we train the first session for 20 epochs and 10 epochs for later sessions. Following previous papers [45, 50], we use common evaluation metrics in CIL. Specifically, we report the last session" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 371, + 546, + 431 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 371, + 546, + 431 + ], + "spans": [ + { + "bbox": [ + 304, + 371, + 546, + 431 + ], + "type": "text", + "content": "accuracy " + }, + { + "bbox": [ + 304, + 371, + 546, + 431 + ], + "type": "inline_equation", + "content": "\\mathcal{A}_{Last}" + }, + { + "bbox": [ + 304, + 371, + 546, + 431 + ], + "type": "text", + "content": " and average accuracy of the whole incremental sessions " + }, + { + "bbox": [ + 304, + 371, + 546, + 431 + ], + "type": "inline_equation", + "content": "\\mathcal{A}_{Avg} = \\frac{1}{T}\\sum_{i=1}^{T}\\mathcal{A}_i" + }, + { + "bbox": [ + 304, + 371, + 546, + 431 + ], + "type": "text", + "content": ". We utilize three different seeds to generate three different class orders for evaluating various methods. We report the mean and standard deviation based on the three experiments. See codes1." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 437, + 421, + 451 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 437, + 421, + 451 + ], + "spans": [ + { + "bbox": [ + 305, + 437, + 421, + 451 + ], + "type": "text", + "content": "4.2. Experiment Results" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 456, + 545, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 456, + 545, + 539 + ], + "spans": [ + { + "bbox": [ + 304, + 456, + 545, + 539 + ], + "type": "text", + "content": "For a fair comparison, we compare our methods with SOTA CIL methods based on the pre-trained vision transformer model. We compare our methods with prompt-based methods L2P [52], DualPrompt [37], CODAPrompt [31], finetuning methods SLCA [45], and adapter-based method [6, 8, 50]. Tab. 1 shows " + }, + { + "bbox": [ + 304, + 456, + 545, + 539 + ], + "type": "inline_equation", + "content": "\\mathcal{A}_{Avg}" + }, + { + "bbox": [ + 304, + 456, + 545, + 539 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 456, + 545, + 539 + ], + "type": "inline_equation", + "content": "\\mathcal{A}_{Last}" + }, + { + "bbox": [ + 304, + 456, + 545, + 539 + ], + "type": "text", + "content": " with three different seeds on four CIL benchmarks." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 540, + 546, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 540, + 546, + 696 + ], + "spans": [ + { + "bbox": [ + 304, + 540, + 546, + 696 + ], + "type": "text", + "content": "CUB200 & CIFAR100: We first report the results of each method on the CUB200 and CIFAR100 datasets. Since these two datasets overlap with the pre-training data, methods based on a pre-trained model achieve a huge improvement in performance compared with methods that are trained from scratch. For example, as shown in Tab. 1, the average accuracy on L2P, DualPrompt, and CODAPrompt reached " + }, + { + "bbox": [ + 304, + 540, + 546, + 696 + ], + "type": "inline_equation", + "content": "88.26\\%" + }, + { + "bbox": [ + 304, + 540, + 546, + 696 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 540, + 546, + 696 + ], + "type": "inline_equation", + "content": "91.13\\%" + }, + { + "bbox": [ + 304, + 540, + 546, + 696 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 304, + 540, + 546, + 696 + ], + "type": "inline_equation", + "content": "87.71\\%" + }, + { + "bbox": [ + 304, + 540, + 546, + 696 + ], + "type": "text", + "content": " on CIFAR100, respectively. Nevertheless, our method still outperforms those prompt-based methods. Besides, our method does not require the construction of a prompt pool which allows each task to learn specific prompt parameters. The adapter is shared across tasks and our method avoids the parameter" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 317, + 702, + 482, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 702, + 482, + 712 + ], + "spans": [ + { + "bbox": [ + 317, + 702, + 482, + 712 + ], + "type": "text", + "content": "1https://github.com/HAIV-Lab/SSIAT" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "23257" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 50, + 70, + 286, + 144 + ], + "blocks": [ + { + "bbox": [ + 50, + 70, + 286, + 144 + ], + "lines": [ + { + "bbox": [ + 50, + 70, + 286, + 144 + ], + "spans": [ + { + "bbox": [ + 50, + 70, + 286, + 144 + ], + "type": "table", + "html": "
MethodSes.1Ses.2Ses.3Ses.4Ses.5Avg↑
Adam-adapter[50]87.6086.0789.1482.7284.3585.97
Adam-ssf[50]89.6088.2189.9480.5082.3886.13
Adam-vpt[50]90.2087.5789.6980.3982.1886.01
SLCA[45]94.8092.4393.5493.9894.3393.82
LAE [8]97.9985.2679.6878.7874.3683.21
SSIAT (Ours)96.1092.7194.0993.6894.5094.21
", + "image_path": "59651ade57516cd523197da551cad070f000ab3f9e0e2012a2ef8c178f2788da.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 148, + 286, + 169 + ], + "lines": [ + { + "bbox": [ + 47, + 148, + 286, + 169 + ], + "spans": [ + { + "bbox": [ + 47, + 148, + 286, + 169 + ], + "type": "text", + "content": "Table 3. Experimental results for different methods on VTAB dataset which contain 5 datasets from different domains." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 46, + 173, + 286, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 173, + 286, + 304 + ], + "spans": [ + { + "bbox": [ + 46, + 173, + 286, + 304 + ], + "type": "text", + "content": "expansion with tasks increasing. Even though the Adam-adapter/SSF/prompt only needs to train in the first stage which requires less training time, the performance of those methods is inferior to our proposed method. Although the performance of SLCA is comparable to our method in CIFAR100, the number of tuning parameters of our method is much smaller. Besides that, the average performance of our method on CUB200 is " + }, + { + "bbox": [ + 46, + 173, + 286, + 304 + ], + "type": "inline_equation", + "content": "93.00\\%" + }, + { + "bbox": [ + 46, + 173, + 286, + 304 + ], + "type": "text", + "content": ", nearly " + }, + { + "bbox": [ + 46, + 173, + 286, + 304 + ], + "type": "inline_equation", + "content": "2.3\\%" + }, + { + "bbox": [ + 46, + 173, + 286, + 304 + ], + "type": "text", + "content": " improvement over SLCA. Fig. 5 (c) (d) shows the incremental accuracy of each session on CUB200 and CIFAR100 and our method is always at the top of all lines in the incremental process." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 304, + 286, + 507 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 304, + 286, + 507 + ], + "spans": [ + { + "bbox": [ + 46, + 304, + 286, + 507 + ], + "type": "text", + "content": "ImageNetR & ImageNetA: We report the performance on ImageNetR and ImageNetA in Tab. 1. These two datasets are more difficult due to the domain gap with the pre-training data. It can be seen that the performance of each method on these two datasets is lower than CIFAR100 and CUB200. Besides, we can see that SLCA outperforms other previous methods significantly on these two datasets. Notably, SLCA achieves an impressive last accuracy on ImageNetR, surpassing the other methods. In contrast, our method achieves SOTA-level performance on both datasets with fewer tuning parameters. Based on Fig. 5, the performance of our method is slightly higher than SLCA in several learning sessions with fewer tuning parameters on the ImageNetR dataset. On the ImageNetA dataset, our method achieves the last accuracy of " + }, + { + "bbox": [ + 46, + 304, + 286, + 507 + ], + "type": "inline_equation", + "content": "62.43\\%" + }, + { + "bbox": [ + 46, + 304, + 286, + 507 + ], + "type": "text", + "content": ", surpassing SLCA by " + }, + { + "bbox": [ + 46, + 304, + 286, + 507 + ], + "type": "inline_equation", + "content": "1.39\\%" + }, + { + "bbox": [ + 46, + 304, + 286, + 507 + ], + "type": "text", + "content": ". The average accuracy across all sessions is " + }, + { + "bbox": [ + 46, + 304, + 286, + 507 + ], + "type": "inline_equation", + "content": "70.83\\%" + }, + { + "bbox": [ + 46, + 304, + 286, + 507 + ], + "type": "text", + "content": ", showing a " + }, + { + "bbox": [ + 46, + 304, + 286, + 507 + ], + "type": "inline_equation", + "content": "2\\%" + }, + { + "bbox": [ + 46, + 304, + 286, + 507 + ], + "type": "text", + "content": " improvement." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 510, + 286, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 510, + 286, + 688 + ], + "spans": [ + { + "bbox": [ + 46, + 510, + 286, + 688 + ], + "type": "text", + "content": "Additionally, we evaluate the performance of each method under the condition of long sequences. In this setting, each session consists of only 10 classes, and the results are summarized in Tab. 2. Our method also maintains excellent performance in terms of " + }, + { + "bbox": [ + 46, + 510, + 286, + 688 + ], + "type": "inline_equation", + "content": "\\mathcal{A}_{Last}" + }, + { + "bbox": [ + 46, + 510, + 286, + 688 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 510, + 286, + 688 + ], + "type": "inline_equation", + "content": "\\mathcal{A}_{Avg}" + }, + { + "bbox": [ + 46, + 510, + 286, + 688 + ], + "type": "text", + "content": ". The performance of SLCA is highly dependent on the class order in which the training data appears, resulting in a substantial variance in " + }, + { + "bbox": [ + 46, + 510, + 286, + 688 + ], + "type": "inline_equation", + "content": "\\mathcal{A}_{Last}" + }, + { + "bbox": [ + 46, + 510, + 286, + 688 + ], + "type": "text", + "content": " on ImageNetA. In contrast, the Adam-based methods remain relatively stable in long-sequence settings. For Adam-SSF, the long sequence only leads to a nearly " + }, + { + "bbox": [ + 46, + 510, + 286, + 688 + ], + "type": "inline_equation", + "content": "2\\%" + }, + { + "bbox": [ + 46, + 510, + 286, + 688 + ], + "type": "text", + "content": " performance drop in ImageNetR. However, for SLCA, its performance drops by " + }, + { + "bbox": [ + 46, + 510, + 286, + 688 + ], + "type": "inline_equation", + "content": "5\\%" + }, + { + "bbox": [ + 46, + 510, + 286, + 688 + ], + "type": "text", + "content": " on ImageNetR and nearly " + }, + { + "bbox": [ + 46, + 510, + 286, + 688 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 46, + 510, + 286, + 688 + ], + "type": "text", + "content": " on ImageNetA. In comparison, our method demonstrates excellent stability on long sequences and outperforms other methods by a large margin." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 689, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 286, + 713 + ], + "type": "text", + "content": "VTAB: VTAB is a cross-domain CIL dataset where each task provides training data from a different domain. Based" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "text", + "content": "on the results presented in Tab. 3, it can be observed that both SLCA and our method perform well in cross-domain CIL. Specifically, in the last incremental stage, our method achieves an accuracy that is " + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "inline_equation", + "content": "12\\%" + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "text", + "content": " higher than the Adam-based methods. Adam-based methods only perform finetuning in the first task and are not able to adapt well to subsequent tasks on the cross-domain dataset." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 166, + 399, + 178 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 166, + 399, + 178 + ], + "spans": [ + { + "bbox": [ + 306, + 166, + 399, + 178 + ], + "type": "text", + "content": "4.3. Ablation Study" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 185, + 545, + 353 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 185, + 545, + 353 + ], + "spans": [ + { + "bbox": [ + 304, + 185, + 545, + 353 + ], + "type": "text", + "content": "Baselines with different PET methods: Tab. 4 shows the results of baselines with three different parameter-efficient tuning methods in each incremental session. It can be observed that the pre-trained model with an adapter achieves the best performance in terms of both the last session accuracy and average accuracy. Fig. 1 demonstrates that tuning with an adapter achieves a better balance between learning new classes and retaining knowledge of old classes. Both VPT-deep and SSF methods tend to prioritize learning new categories, which leads to increased forgetting of previously learned categories. Although VPT-shallow performs well on CIFAR, its limited parameters hinder the model from incrementally learning new classes on ImageNetR. More results on the other datasets can be found in the Supp." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 354, + 545, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 354, + 545, + 533 + ], + "spans": [ + { + "bbox": [ + 304, + 354, + 545, + 533 + ], + "type": "text", + "content": "Unified classifier retraining vs. Separate local classifier: As we train separate task-specific classifiers in each incremental session, we propose to retrain the classifier to find the optimal decision boundary for all the classes. Tab. 5 displays the ablation experiments of the classifier re-trained on ImageNetA which is the most difficult benchmark. It can be observed that whether it is a linear or a cosine classifier, retraining the classifier leads to a significant performance improvement. Additionally, incorporating the computation of prototype semantic shifts further enhances the performance by an additional " + }, + { + "bbox": [ + 304, + 354, + 545, + 533 + ], + "type": "inline_equation", + "content": "2\\%" + }, + { + "bbox": [ + 304, + 354, + 545, + 533 + ], + "type": "text", + "content": " in the cosine classifier. Compared to the classifier alignment methods that do not involve computing updated prototypes, our method demonstrates its superiority as the incremental stages progress. More results on the other datasets can be found in the Supp." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 534, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 534, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 534, + 545, + 713 + ], + "type": "text", + "content": "Progressively tuning vs. first session adaptation: Tab. 6 shows the linear probing results of different adaption ways. After finishing the training of the last session, we freeze the pre-trained backbone and only train the classifier using all the samples. It is evident that not performing tuning and solely freezing the pre-trained model leads to the worst performance, regardless of the dataset. First-session adaptation proves to be a good choice as it reduces training time and works well for datasets like CIFAR100 and CUB200. However, for datasets such as ImageNetA and ImageNetR, which have significant domain gaps from the pre-trained model, relying solely on first-session adaptation is suboptimal. By continuously fine-tuning the adapter, we observe that the backbone exhibits stronger representability compared to only tuning in the first session." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "text", + "content": "23258" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 59, + 70, + 536, + 140 + ], + "blocks": [ + { + "bbox": [ + 59, + 70, + 536, + 140 + ], + "lines": [ + { + "bbox": [ + 59, + 70, + 536, + 140 + ], + "spans": [ + { + "bbox": [ + 59, + 70, + 536, + 140 + ], + "type": "table", + "html": "
PET MethodParamsSes.1Ses.2Ses.3Ses.4Ses.5Ses.6Ses.7Ses.8Ses.9Ses.10Avg↑
SSF [22]0.2M98.5091.9088.5785.0283.9278.7077.7977.8973.0274.9183.03
VPT-deep [15]0.046M97.6069.1568.7056.6055.5648.8755.9756.0553.4855.2161.72
VPT-shallow [15]0.004M98.4092.9588.8092.0687.2686.3785.6485.3185.3685.1088.72
Adapter [4]1.19M98.5095.3591.6091.0890.9290.0889.8089.6288.9889.2991.52
", + "image_path": "ae86d835ecbe60876852924b1f787e7c7ccc30db0aa84f8ab73b1ef6eaf01c9d.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 67, + 171, + 526, + 266 + ], + "blocks": [ + { + "bbox": [ + 46, + 143, + 547, + 166 + ], + "lines": [ + { + "bbox": [ + 46, + 143, + 547, + 166 + ], + "spans": [ + { + "bbox": [ + 46, + 143, + 547, + 166 + ], + "type": "text", + "content": "Table 4. Experimental results for baselines with different efficient tuning methods on CIFAR100. We report the overall performance of each session and the average performance." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 67, + 171, + 526, + 266 + ], + "lines": [ + { + "bbox": [ + 67, + 171, + 526, + 266 + ], + "spans": [ + { + "bbox": [ + 67, + 171, + 526, + 266 + ], + "type": "table", + "html": "
ClassifierMethodSes.1Ses.2Ses.3Ses.4Ses.5Ses.6Ses.7Ses.8Ses.9Ses.10Avg↑
Linearw/o CA74.6568.3763.9058.8258.0255.4854.0352.8951.6252.1358.99
w/ CA74.6571.5967.9364.2462.0860.9059.0357.3256.4156.8563.10
w/ SSCA74.6570.9267.6463.9162.6560.9660.3858.5558.1357.7763.55
Cosinew/o CA82.6677.7872.2067.6366.0163.1859.9759.3558.9357.9166.56
w/ CA82.6679.7074.5670.4068.1965.6663.4061.7760.7059.7868.68
w/ SSCA82.6680.6075.9172.4171.5669.0166.1064.6063.0062.4370.83
", + "image_path": "572b37d67bb8fa993d47f43fe2f05dee017e19ccdd399be0bb8b3b0fd265e3ab.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 51, + 298, + 286, + 361 + ], + "blocks": [ + { + "bbox": [ + 46, + 269, + 547, + 292 + ], + "lines": [ + { + "bbox": [ + 46, + 269, + 547, + 292 + ], + "spans": [ + { + "bbox": [ + 46, + 269, + 547, + 292 + ], + "type": "text", + "content": "Table 5. Ablation results for unified classifier training and semantic shift estimation on ImageNetA. We report the overall performance of each session and the average performance. We run the experiments with three seeds and reported the average performance." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 51, + 298, + 286, + 361 + ], + "lines": [ + { + "bbox": [ + 51, + 298, + 286, + 361 + ], + "spans": [ + { + "bbox": [ + 51, + 298, + 286, + 361 + ], + "type": "table", + "html": "
MethodCIFARImageNetRImageNetACUB
No-Adapt.86.0868.4233.7186.77
First-Adapt.91.3378.0263.5389.27
All-Adapt.92.5782.0265.9689.86
Δ↑1.24%4.00%2.43%0.59%
", + "image_path": "28fa07fbb5cb9c98a60b3434de2ff437cccb88cc064985031ffaa3b03d178a18.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 50, + 400, + 287, + 456 + ], + "blocks": [ + { + "bbox": [ + 46, + 365, + 287, + 397 + ], + "lines": [ + { + "bbox": [ + 46, + 365, + 287, + 397 + ], + "spans": [ + { + "bbox": [ + 46, + 365, + 287, + 397 + ], + "type": "text", + "content": "Table 6. Linear probing results of different training ways on four datasets. We retrain the classifier using all the data on the fixed-trained backbone." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 50, + 400, + 287, + 456 + ], + "lines": [ + { + "bbox": [ + 50, + 400, + 287, + 456 + ], + "spans": [ + { + "bbox": [ + 50, + 400, + 287, + 456 + ], + "type": "table", + "html": "
StructureParamsCIFARImageNetRImageNetA
AdaptMLP-P [4]1.19M94.35±0.6083.63±0.4370.83±1.63
AdaptMLP-S [4]1.19M94.16±0.8883.19±0.4771.00±1.52
Convpass [16]1.63M94.08±0.9983.64±0.3569.96±1.09
Adapter [13]2.38M94.26±0.9183.65±0.5070.94±1.42
", + "image_path": "dcbb19d2b990fb42607f805ea0e5a7466ca0c37a3b7602354d2b1d57e6533a5f.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 486, + 287, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 486, + 287, + 605 + ], + "spans": [ + { + "bbox": [ + 46, + 486, + 287, + 605 + ], + "type": "text", + "content": "Different structures of the adapter: In this paper, we follow AdaptFormer [4] to use parallel adapterMLP as the adapter structure. We also delve deeper into different adapter structures such as Adapter [13] and Convpass [16]. Although these different tuning structures may exhibit performance differences under static settings, the performance differences among those adapter structures are minimal in the context of CIL shown in Tab. 7. This offers us the flexibility to employ various adapter structures within the context of the CIL paradigm." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 605, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 605, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 605, + 287, + 714 + ], + "type": "text", + "content": "Comparison to traditional CIL methods: We conduct evaluations by comparing our approach to SOTA traditional CIL methods shown in Tab. 8. We replace the Resnet backbone with the pre-trained ViT model for fair comparison. The results indicate that the performance of iCaRL tends to be inferior compared to SOTA model expansion methods and our proposed method, even when past samples are stored. It can be observed that methods such as Foster and Der, which dynamically expand feature extraction net" + } + ] + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 314, + 298, + 536, + 366 + ], + "blocks": [ + { + "bbox": [ + 46, + 459, + 287, + 482 + ], + "lines": [ + { + "bbox": [ + 46, + 459, + 287, + 482 + ], + "spans": [ + { + "bbox": [ + 46, + 459, + 287, + 482 + ], + "type": "text", + "content": "Table 7. Experimental results of different adapter structures. We report the average performance and standard deviation." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 314, + 298, + 536, + 366 + ], + "lines": [ + { + "bbox": [ + 314, + 298, + 536, + 366 + ], + "spans": [ + { + "bbox": [ + 314, + 298, + 536, + 366 + ], + "type": "table", + "html": "
MethodImageNetRImageNetA
\\( A_{Last} \\uparrow \\)\\( A_{Avg} \\uparrow \\)\\( A_{Last} \\uparrow \\)\\( A_{Avg} \\uparrow \\)
iCaRL [28]61.70±0.5671.34±0.6729.32±2.3640.11±1.36
Foster [34]75.87±0.3881.54±0.8212.44±17.4517.01±20.44
Der [41]75.63±0.8681.13±0.1138.43±2.3946.43±3.29
Memo [48]65.38±0.9073.80±0.8628.45±2.3740.27±1.22
SSIAT (Ours)79.38±0.5983.63±0.4362.43±1.6370.83±1.63
", + "image_path": "e5ea17a09c5444667c1c6d81e7aab9d578b2d91ae99aaf1f87881bd55a6da305.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 369, + 545, + 392 + ], + "lines": [ + { + "bbox": [ + 305, + 369, + 545, + 392 + ], + "spans": [ + { + "bbox": [ + 305, + 369, + 545, + 392 + ], + "type": "text", + "content": "Table 8. Comparison to traditional CIL methods on ImageNetR and ImageNetA dataset." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 304, + 393, + 545, + 466 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 393, + 545, + 466 + ], + "spans": [ + { + "bbox": [ + 304, + 393, + 545, + 466 + ], + "type": "text", + "content": "works, achieve impressive results on ImageNetR. The average accuracy of these methods is only " + }, + { + "bbox": [ + 304, + 393, + 545, + 466 + ], + "type": "inline_equation", + "content": "2\\%" + }, + { + "bbox": [ + 304, + 393, + 545, + 466 + ], + "type": "text", + "content": " lower than our method. However, on ImageNetA, where there are few-shot samples for many classes, these methods exhibit low performance. More ablation experiments related to hyperparameters can be found in the supp." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 475, + 379, + 487 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 475, + 379, + 487 + ], + "spans": [ + { + "bbox": [ + 306, + 475, + 379, + 487 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 496, + 545, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 496, + 545, + 652 + ], + "spans": [ + { + "bbox": [ + 304, + 496, + 545, + 652 + ], + "type": "text", + "content": "Class-incremental learning on a pre-trained model has received significant attention in recent years. In this paper, we first revisit different PET methods in the context of CIL. Then, we propose that incrementally tuning the shared adapter and local classifier without constraints exhibits less forgetting and gains plasticity for learning new classes. Moreover, to train a unified classifier, we calculate the semantic shift of old prototypes and retrain the classifier using updated prototypes in each session. The proposed method eliminates the need for constructing an adapter pool and avoids retaining any image samples. Experimental results on five benchmarks demonstrate the effectiveness of our method which achieves the SOTA performance." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 653, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 653, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 653, + 547, + 713 + ], + "type": "text", + "content": "Acknowledgement. This research was supported by Natural Science Fund of Hubei Province (Grant # 2022CFB823), Alibaba Innovation Research program under Grant Contract # CRAQ7WHZ11220001-20978282, and HUST Independent Innovation Research Fund (Grant # 2021XXJS096)." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "23259" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 289, + 713 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 53, + 91, + 289, + 145 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 289, + 145 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 289, + 145 + ], + "type": "text", + "content": "[1] Rahaf Aljundi, Francesca Babiloni, Mohamed Elhoseiny, Marcus Rohrbach, and Tinne Tuytelaars. Memory aware synapses: Learning what (not) to forget. In Proceedings of the European conference on computer vision (ECCV), pages 139-154, 2018. 1" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 146, + 288, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 146, + 288, + 201 + ], + "spans": [ + { + "bbox": [ + 53, + 146, + 288, + 201 + ], + "type": "text", + "content": "[2] Jihwan Bang, Heesu Kim, YoungJoon Yoo, Jung-Woo Ha, and Jonghyun Choi. Rainbow memory: Continual learning with a memory of diverse samples. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 8218-8227, 2021. 1, 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 202, + 288, + 257 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 202, + 288, + 257 + ], + "spans": [ + { + "bbox": [ + 53, + 202, + 288, + 257 + ], + "type": "text", + "content": "[3] Arslan Chaudhry, Puneet K Dokania, Thalaiyasingam Ajthan, and Philip HS Torr. Riemannian walk for incremental learning: Understanding forgetting and intransigence. In Proceedings of the European conference on computer vision (ECCV), pages 532-547, 2018. 1, 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 258, + 288, + 312 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 258, + 288, + 312 + ], + "spans": [ + { + "bbox": [ + 53, + 258, + 288, + 312 + ], + "type": "text", + "content": "[4] Shoufa Chen, Chongjian Ge, Zhan Tong, Jiangliu Wang, Yibing Song, Jue Wang, and Ping Luo. Adaptformer: Adapting vision transformers for scalable visual recognition. Advances in Neural Information Processing Systems, 35:16664-16678, 2022. 1, 2, 3, 8" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 312, + 288, + 378 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 312, + 288, + 378 + ], + "spans": [ + { + "bbox": [ + 53, + 312, + 288, + 378 + ], + "type": "text", + "content": "[5] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929, 2020. 1, 6" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 380, + 288, + 423 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 380, + 288, + 423 + ], + "spans": [ + { + "bbox": [ + 53, + 380, + 288, + 423 + ], + "type": "text", + "content": "[6] Beyza Ermis, Giovanni Zappella, Martin Wistuba, Aditya Rawal, and Cedric Archambeau. Memory efficient continual learning with transformers. Advances in Neural Information Processing Systems, 35:10629-10642, 2022. 6" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 424, + 288, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 424, + 288, + 456 + ], + "spans": [ + { + "bbox": [ + 53, + 424, + 288, + 456 + ], + "type": "text", + "content": "[7] Robert M French. Catastrophic forgetting in connectionist networks. Trends in cognitive sciences, 3(4):128-135, 1999. 1" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 457, + 288, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 457, + 288, + 502 + ], + "spans": [ + { + "bbox": [ + 53, + 457, + 288, + 502 + ], + "type": "text", + "content": "[8] Qiankun Gao, Chen Zhao, Yifan Sun, Teng Xi, Gang Zhang, Bernard Ghanem, and Jian Zhang. A unified continual learning framework with general parameter-efficient tuning. arXiv preprint arXiv:2303.10070, 2023. 3, 4, 6, 7" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 502, + 288, + 535 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 502, + 288, + 535 + ], + "spans": [ + { + "bbox": [ + 53, + 502, + 288, + 535 + ], + "type": "text", + "content": "[9] Haoyu He, Jianfei Cai, Jing Zhang, Dacheng Tao, and Bohan Zhuang. Sensitivity-aware visual parameter-efficient tuning. arXiv preprint arXiv:2303.08566, 2023. 4, 5" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 536, + 288, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 536, + 288, + 601 + ], + "spans": [ + { + "bbox": [ + 48, + 536, + 288, + 601 + ], + "type": "text", + "content": "[10] Dan Hendrycks, Steven Basart, Norman Mu, Saurav Kadavath, Frank Wang, Evan Dorundo, Rahul Desai, Tyler Zhu, Samyak Parajuli, Mike Guo, et al. The many faces of robustness: A critical analysis of out-of-distribution generalization. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 8340-8349, 2021. 6" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 602, + 288, + 646 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 602, + 288, + 646 + ], + "spans": [ + { + "bbox": [ + 48, + 602, + 288, + 646 + ], + "type": "text", + "content": "[11] Dan Hendrycks, Kevin Zhao, Steven Basart, Jacob Steinhardt, and Dawn Song. Natural adversarial examples. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15262-15271, 2021. 6" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 647, + 288, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 647, + 288, + 679 + ], + "spans": [ + { + "bbox": [ + 48, + 647, + 288, + 679 + ], + "type": "text", + "content": "[12] Geoffrey Hinton, Oriol Vinyals, and Jeff Dean. Distilling the knowledge in a neural network. arXiv preprint arXiv:1503.02531, 2015. 4" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 680, + 288, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 680, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 680, + 288, + 713 + ], + "type": "text", + "content": "[13] Neil Houlsby, Andrei Giurgiu, Stanislaw Jastrzebski, Bruna Morrone, Quentin De Laroussilhe, Andrea Gesmundo, Mona Attariyan, and Sylvain Gelly. Parameter-efficient transfer" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 712 + ], + "type": "list", + "angle": 0, + "index": 30, + "blocks": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "text", + "content": "learning for nlp. In International Conference on Machine Learning, pages 2790-2799. PMLR, 2019. 2, 8" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 96, + 547, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 96, + 547, + 140 + ], + "spans": [ + { + "bbox": [ + 307, + 96, + 547, + 140 + ], + "type": "text", + "content": "[14] Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. Lora: Low-rank adaptation of large language models. arXiv preprint arXiv:2106.09685, 2021. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 141, + 547, + 185 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 141, + 547, + 185 + ], + "spans": [ + { + "bbox": [ + 307, + 141, + 547, + 185 + ], + "type": "text", + "content": "[15] Menglin Jia, Luming Tang, Bor-Chun Chen, Claire Cardie, Serge Belongie, Bharath Hariharan, and Ser-Nam Lim. Visual prompt tuning. In European Conference on Computer Vision, pages 709-727. Springer, 2022. 1, 2, 3, 8" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 186, + 547, + 218 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 186, + 547, + 218 + ], + "spans": [ + { + "bbox": [ + 307, + 186, + 547, + 218 + ], + "type": "text", + "content": "[16] Shibo Jie and Zhi-Hong Deng. Convolutional bypasses are better vision transformer adapters. arXiv preprint arXiv:2207.07039, 2022.8" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 220, + 545, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 220, + 545, + 285 + ], + "spans": [ + { + "bbox": [ + 307, + 220, + 545, + 285 + ], + "type": "text", + "content": "[17] James Kirkpatrick, Razvan Pascanu, Neil Rabinowitz, Joel Veness, Guillaume Desjardins, Andrei A Rusu, Kieran Milan, John Quan, Tiago Ramalho, Agnieszka Grabska-Barwinska, et al. Overcoming catastrophic forgetting in neural networks. Proceedings of the national academy of sciences, 114(13):3521-3526, 2017. 1, 2, 4" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 286, + 545, + 309 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 286, + 545, + 309 + ], + "spans": [ + { + "bbox": [ + 307, + 286, + 545, + 309 + ], + "type": "text", + "content": "[18] Alex Krizhevsky, Geoffrey Hinton, et al. Learning multiple layers of features from tiny images. 2009. 5" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 310, + 545, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 310, + 545, + 342 + ], + "spans": [ + { + "bbox": [ + 307, + 310, + 545, + 342 + ], + "type": "text", + "content": "[19] Brian Lester, Rami Al-Rfou, and Noah Constant. The power of scale for parameter-efficient prompt tuning. arXiv preprint arXiv:2104.08691, 2021. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 343, + 545, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 343, + 545, + 376 + ], + "spans": [ + { + "bbox": [ + 307, + 343, + 545, + 376 + ], + "type": "text", + "content": "[20] Xiang Lisa Li and Percy Liang. Prefix-tuning: Optimizing continuous prompts for generation. arXiv preprint arXiv:2101.00190, 2021. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 377, + 545, + 410 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 377, + 545, + 410 + ], + "spans": [ + { + "bbox": [ + 307, + 377, + 545, + 410 + ], + "type": "text", + "content": "[21] Zhizhong Li and Derek Hoiem. Learning without forgetting. IEEE transactions on pattern analysis and machine intelligence, 40(12):2935-2947, 2017. 4" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 411, + 545, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 411, + 545, + 456 + ], + "spans": [ + { + "bbox": [ + 307, + 411, + 545, + 456 + ], + "type": "text", + "content": "[22] Dongze Lian, Daquan Zhou, Jiashi Feng, and Xinchao Wang. Scaling & shifting your features: A new baseline for efficient model tuning. Advances in Neural Information Processing Systems, 35:109-123, 2022. 1, 2, 3, 8" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 456, + 545, + 500 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 456, + 545, + 500 + ], + "spans": [ + { + "bbox": [ + 307, + 456, + 545, + 500 + ], + "type": "text", + "content": "[23] Arun Mallya and Svetlana Lazebnik. Packet: Adding multiple tasks to a single network by iterative pruning. In Proceedings of the IEEE conference on Computer Vision and Pattern Recognition, pages 7765-7773, 2018. 1, 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 501, + 545, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 501, + 545, + 555 + ], + "spans": [ + { + "bbox": [ + 307, + 501, + 545, + 555 + ], + "type": "text", + "content": "[24] Arun Mallya, Dillon Davis, and Svetlana Lazebnik. Piggyback: Adapting a single network to multiple tasks by learning to mask weights. In Proceedings of the European conference on computer vision (ECCV), pages 67-82, 2018. 1, 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 557, + 545, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 557, + 545, + 601 + ], + "spans": [ + { + "bbox": [ + 307, + 557, + 545, + 601 + ], + "type": "text", + "content": "[25] Aristeidis Panos, Yuriko Kobe, Daniel Olmeda Reino, Rahaf Aljundi, and Richard E Turner. First session adaptation: A strong replay-free baseline for class-incremental learning. arXiv preprint arXiv:2303.13199, 2023. 4" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 602, + 545, + 646 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 602, + 545, + 646 + ], + "spans": [ + { + "bbox": [ + 307, + 602, + 545, + 646 + ], + "type": "text", + "content": "[26] Can Peng, Kun Zhao, Tianren Wang, Meng Li, and Brian C Lovell. Few-shot class-incremental learning from an open-set perspective. In European Conference on Computer Vision, pages 382-397. Springer, 2022. 4" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 307, + 647, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 647, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 307, + 647, + 545, + 712 + ], + "type": "text", + "content": "[27] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021. 2" + } + ] + } + ], + "index": 29 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "23260" + } + ] + } + ], + "index": 31 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 714 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "type": "text", + "content": "[28] Sylvestre-Alvise Rebuffi, Alexander Kolesnikov, Georg Sperl, and Christoph H Lampert. icarl: Incremental classifier and representation learning. In Proceedings of the IEEE conference on Computer Vision and Pattern Recognition, pages 2001-2010, 2017. 1, 2, 4, 8" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 129, + 288, + 185 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 129, + 288, + 185 + ], + "spans": [ + { + "bbox": [ + 48, + 129, + 288, + 185 + ], + "type": "text", + "content": "[29] Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, et al. Imagenet large scale visual recognition challenge. International journal of computer vision, 115:211-252, 2015. 6" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 186, + 288, + 229 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 186, + 288, + 229 + ], + "spans": [ + { + "bbox": [ + 48, + 186, + 288, + 229 + ], + "type": "text", + "content": "[30] Joan Serra, Didac Suris, Marius Miron, and Alexandros Karatzoglou. Overcoming catastrophic forgetting with hard attention to the task. In International conference on machine learning, pages 4548-4557. PMLR, 2018. 1, 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 231, + 288, + 306 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 231, + 288, + 306 + ], + "spans": [ + { + "bbox": [ + 48, + 231, + 288, + 306 + ], + "type": "text", + "content": "[31] James Seale Smith, Leonid Karlinsky, Vyshnavi Gutta, Paola Cascante-Bonilla, Donghyun Kim, Assaf Arbelle, Rameswar Panda, Rogerio Feris, and Zsolt Kira. Coda-prompt: Continual decomposed attention-based prompting for rehearsal-free continual learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11909-11919, 2023. 1, 3, 6" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 308, + 288, + 361 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 308, + 288, + 361 + ], + "spans": [ + { + "bbox": [ + 48, + 308, + 288, + 361 + ], + "type": "text", + "content": "[32] Yu-Ming Tang, Yi-Xing Peng, and Wei-Shi Zheng. When prompt-based incremental learning does not meet strong pretraining. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 1706-1716, 2023. 2, 5" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 365, + 287, + 397 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 365, + 287, + 397 + ], + "spans": [ + { + "bbox": [ + 48, + 365, + 287, + 397 + ], + "type": "text", + "content": "[33] Catherine Wah, Steve Branson, Peter Welinder, Pietro Perona, and Serge Belongie. The caltech-ucsd birds-200-2011 dataset. 2011. 5" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 399, + 287, + 443 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 399, + 287, + 443 + ], + "spans": [ + { + "bbox": [ + 48, + 399, + 287, + 443 + ], + "type": "text", + "content": "[34] Fu-Yun Wang, Da-Wei Zhou, Han-Jia Ye, and De-Chuan Zhan. Foster: Feature boosting and compression for class incremental learning. In European conference on computer vision, pages 398–414. Springer, 2022. 1, 2, 8" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 445, + 287, + 488 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 445, + 287, + 488 + ], + "spans": [ + { + "bbox": [ + 48, + 445, + 287, + 488 + ], + "type": "text", + "content": "[35] Yabin Wang, Zhiwu Huang, and Xiaopeng Hong. S-prompts learning with pre-trained transformers: An occam's razor for domain incremental learning. Advances in Neural Information Processing Systems, 35:5682-5695, 2022. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 490, + 287, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 490, + 287, + 544 + ], + "spans": [ + { + "bbox": [ + 48, + 490, + 287, + 544 + ], + "type": "text", + "content": "[36] Yabin Wang, Zhiheng Ma, Zhiwu Huang, Yaowei Wang, Zhou Su, and Xiaopeng Hong. Isolation and impartial aggregation: A paradigm of incremental learning without interference. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 10209-10217, 2023. 4" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 546, + 287, + 610 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 546, + 287, + 610 + ], + "spans": [ + { + "bbox": [ + 48, + 546, + 287, + 610 + ], + "type": "text", + "content": "[37] Zifeng Wang, Zizhao Zhang, Sayna Ebrahimi, Ruoxi Sun, Han Zhang, Chen-Yu Lee, Xiaqi Ren, Guolong Su, Vincent Perot, Jennifer Dy, et al. Dualprompt: Complementary prompting for rehearsal-free continual learning. In European Conference on Computer Vision, pages 631-648. Springer, 2022. 1, 3, 6" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 613, + 287, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 613, + 287, + 677 + ], + "spans": [ + { + "bbox": [ + 48, + 613, + 287, + 677 + ], + "type": "text", + "content": "[38] Zifeng Wang, Zizhao Zhang, Chen-Yu Lee, Han Zhang, Ruoxi Sun, Xiaoqi Ren, Guolong Su, Vincent Perot, Jennifer Dy, and Tomas Pfister. Learning to prompt for continual learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 139-149, 2022. 1, 6" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 680, + 287, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 680, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 48, + 680, + 287, + 714 + ], + "type": "text", + "content": "[39] Tz-Ying Wu, Gurumurthy Swaminathan, Zhizhong Li, Avinash Ravichandran, Nuno Vasconcelos, Rahul Bhotika, and Stefano Soatto. Class-incremental learning with strong" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 712 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 326, + 73, + 545, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 545, + 106 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 545, + 106 + ], + "type": "text", + "content": "pre-trained models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9601-9610, 2022. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 308, + 107, + 545, + 152 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 107, + 545, + 152 + ], + "spans": [ + { + "bbox": [ + 308, + 107, + 545, + 152 + ], + "type": "text", + "content": "[40] Xiang Xiang, Yuwen Tan, Qian Wan, Jing Ma, Alan Yuille, and Gregory D Hager. Coarse-to-fine incremental few-shot learning. In European Conference on Computer Vision, pages 205-222. Springer, 2022. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 153, + 545, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 153, + 545, + 206 + ], + "spans": [ + { + "bbox": [ + 307, + 153, + 545, + 206 + ], + "type": "text", + "content": "[41] Shipeng Yan, Jiangwei Xie, and Xuming He. Der: Dynamically expandable representation for class incremental learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3014-3023, 2021. 2, 8" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 209, + 545, + 273 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 209, + 545, + 273 + ], + "spans": [ + { + "bbox": [ + 308, + 209, + 545, + 273 + ], + "type": "text", + "content": "[42] Lu Yu, Bartlomiej Twardowski, Xialei Liu, Luis Herranz, Kai Wang, Yongmei Cheng, Shangling Jui, and Joost van de Weijer. Semantic drift compensation for class-incremental learning. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 6982-6991, 2020. 2, 5" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 276, + 545, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 276, + 545, + 319 + ], + "spans": [ + { + "bbox": [ + 308, + 276, + 545, + 319 + ], + "type": "text", + "content": "[43] Friedemann Zenke, Ben Poole, and Surya Ganguli. Continual learning through synaptic intelligence. In International conference on machine learning, pages 3987-3995. PMLR, 2017. 1, 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 321, + 545, + 385 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 321, + 545, + 385 + ], + "spans": [ + { + "bbox": [ + 308, + 321, + 545, + 385 + ], + "type": "text", + "content": "[44] Xiaohua Zhai, Joan Puigcerver, Alexander Kolesnikov, Pierre Ruyssen, Carlos Riquelme, Mario Lucic, Josip Djolonga, Andre Susano Pinto, Maxim Neumann, Alexey Dosovitskiy, et al. A large-scale study of representation learning with the visual task adaptation benchmark. arXiv preprint arXiv:1910.04867, 2019. 6" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 388, + 545, + 431 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 388, + 545, + 431 + ], + "spans": [ + { + "bbox": [ + 308, + 388, + 545, + 431 + ], + "type": "text", + "content": "[45] Gengwei Zhang, Liyuan Wang, Guoliang Kang, Ling Chen, and Yunchao Wei. Slca: Slow learner with classifier alignment for continual learning on a pre-trained model. arXiv preprint arXiv:2303.05118, 2023. 1, 2, 3, 4, 5, 6, 7" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 434, + 545, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 434, + 545, + 487 + ], + "spans": [ + { + "bbox": [ + 308, + 434, + 545, + 487 + ], + "type": "text", + "content": "[46] Renrui Zhang, Ziyu Guo, Wei Zhang, Kunchang Li, Xupeng Miao, Bin Cui, Yu Qiao, Peng Gao, and Hongsheng Li. Pointclip: Point cloud understanding by clip. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8552-8562, 2022. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 489, + 545, + 532 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 489, + 545, + 532 + ], + "spans": [ + { + "bbox": [ + 308, + 489, + 545, + 532 + ], + "type": "text", + "content": "[47] Hengyuan Zhao, Hao Luo, Yuyang Zhao, Pichao Wang, Fan Wang, and Mike Zheng Shou. Revisit parameter-efficient transfer learning: A two-stage paradigm. arXiv preprint arXiv:2303.07910, 2023. 4" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 534, + 545, + 577 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 534, + 545, + 577 + ], + "spans": [ + { + "bbox": [ + 308, + 534, + 545, + 577 + ], + "type": "text", + "content": "[48] Da-Wei Zhou, Qi-Wei Wang, Han-Jia Ye, and De-Chuan Zhan. A model or 603 exemplars: Towards memory-efficient class-incremental learning. arXiv preprint arXiv:2205.13218, 2022. 2, 8" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 579, + 545, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 579, + 545, + 612 + ], + "spans": [ + { + "bbox": [ + 308, + 579, + 545, + 612 + ], + "type": "text", + "content": "[49] Da-Wei Zhou, Qi-Wei Wang, Zhi-Hong Qi, Han-Jia Ye, DeChuan Zhan, and Ziwei Liu. Deep class-incremental learning: A survey. arXiv preprint arXiv:2302.03648, 2023. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 613, + 545, + 657 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 613, + 545, + 657 + ], + "spans": [ + { + "bbox": [ + 308, + 613, + 545, + 657 + ], + "type": "text", + "content": "[50] Da-Wei Zhou, Han-Jia Ye, De-Chuan Zhan, and Ziwei Liu. Revisiting class-incremental learning with pre-trained models: Generalizability and adaptivity are all you need. arXiv preprint arXiv:2303.07338, 2023. 1, 2, 3, 5, 6, 7" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 658, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 658, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 308, + 658, + 545, + 712 + ], + "type": "text", + "content": "[51] Kaiyang Zhou, Jingkang Yang, Chen Change Loy, and Ziwei Liu. Conditional prompt learning for vision-language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16816-16825, 2022. 2" + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "text", + "content": "23261" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 217 + ], + "type": "list", + "angle": 0, + "index": 3, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 115 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 115 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 115 + ], + "type": "text", + "content": "[52] Kaiyang Zhou, Jingkang Yang, Chen Change Loy, and Ziwei Liu. Learning to prompt for vision-language models. International Journal of Computer Vision, 130(9):2337-2348, 2022. 1, 2, 3, 6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 118, + 287, + 160 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 118, + 287, + 160 + ], + "spans": [ + { + "bbox": [ + 48, + 118, + 287, + 160 + ], + "type": "text", + "content": "[53] Qinhao Zhou, Xiang Xiang, and Jing Ma. Hierarchical task-incremental learning with feature-space initialization inspired by neural collapse. Neural Processing Letters, pages 1-17, 2023. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 163, + 287, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 163, + 287, + 217 + ], + "spans": [ + { + "bbox": [ + 48, + 163, + 287, + 217 + ], + "type": "text", + "content": "[54] Fei Zhu, Xu-Yao Zhang, Chuang Wang, Fei Yin, and Cheng-Lin Liu. Prototype augmentation and self-supervision for incremental learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5871-5880, 2021. 1, 2, 5" + } + ] + } + ], + "index": 2 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "text", + "content": "23262" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/Semantics Distortion and Style Matter_ Towards Source-free UDA for Panoramic Segmentation/fc7b011a-8b94-4431-a0e3-dcc09545c286_content_list.json b/2024/Semantics Distortion and Style Matter_ Towards Source-free UDA for Panoramic Segmentation/fc7b011a-8b94-4431-a0e3-dcc09545c286_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..1b3c2c4baec44c5a1870605e1a7ca2f807a9bee5 --- /dev/null +++ b/2024/Semantics Distortion and Style Matter_ Towards Source-free UDA for Panoramic Segmentation/fc7b011a-8b94-4431-a0e3-dcc09545c286_content_list.json @@ -0,0 +1,1539 @@ +[ + { + "type": "text", + "text": "Semantics, Distortion, and Style Matter: Towards Source-free UDA for Panoramic Segmentation", + "text_level": 1, + "bbox": [ + 76, + 128, + 893, + 176 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Xu Zheng $^{1}$ Pengyuan Zhou $^{3}$ Athanasios V. Vasilakos $^{4}$ Lin Wang $^{1,2*}$ $^{1}$ AI Thrust, HKUST(GZ) $^{2}$ Dept. of CSE, HKUST $^{3}$ Aarhus University $^{4}$ University of Agder zhengxu128@gmail.com, pengyuan.zhou@ece.au.dk, th.vasilakos@gmail.com, linwang@ust.hk Project Page: https://vlislab22.github.io/360SFUDA/", + "bbox": [ + 89, + 202, + 880, + 273 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 308, + 313, + 325 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "This paper addresses an interesting yet challenging problem—source-free unsupervised domain adaptation (SFUDA) for pinhole-to-panoramic semantic segmentation—given only a pinhole image-trained model (i.e., source) and unlabeled panoramic images (i.e., target). Tackling this problem is nontrivial due to the semantic mismatches, style discrepancies, and inevitable distortion of panoramic images. To this end, we propose a novel method that utilizes Tangent Projection (TP) as it has less distortion and meanwhile slits the equirectangular projection (ERP) with a fixed FoV to mimic the pinhole images. Both projections are shown effective in extracting knowledge from the source model. However, the distinct projection discrepancies between source and target domains impede the direct knowledge transfer; thus, we propose a panoramic prototype adaptation module (PPAM) to integrate panoramic prototypes from the extracted knowledge for adaptation. We then impose the loss constraints on both predictions and prototypes and propose a cross-dual attention module (CDAM) at the feature level to better align the spatial and channel characteristics across the domains and projections. Both knowledge extraction and transfer processes are synchronously updated to reach the best performance. Extensive experiments on the synthetic and real-world benchmarks, including outdoor and indoor scenarios, demonstrate that our method achieves significantly better performance than prior SFUDA methods for pinhole-to-panoramic adaptation.", + "bbox": [ + 75, + 340, + 473, + 750 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 777, + 209, + 792 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The comprehensive scene perception abilities of $360^{\\circ}$ cameras have made them highly popular for applications, such as autonomous driving [1]. In contrast to pinhole cameras that capture 2D planer images with a limited field-of-view (FoV), $360^{\\circ}$ cameras offer a much wider FoV of", + "bbox": [ + 75, + 801, + 470, + 878 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/e7dab01ecd680f1cef9b78e4cc3b1aeab906a9ee8fdb9476ed363efce7353ec9.jpg", + "image_caption": [ + "Figure 1. We address a new problem of achieving source-free pinhole-to-panoramic adaptation for segmentation." + ], + "image_footnote": [], + "bbox": [ + 506, + 309, + 885, + 439 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$360^{\\circ} \\times 180^{\\circ}$ . As a result, research on panoramic semantic segmentation [42, 43, 46, 48, 49] has been actively explored to achieve dense scene understanding for intelligent systems.", + "bbox": [ + 496, + 513, + 893, + 559 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Generally, the spherical data captured by the $360^{\\circ}$ cameras is always projected into 2D planar representations, e.g., Equirectangular Projection (ERP), to be aligned with the existing imaging pipeline [1] while preserving the omnidirectional information1. However, ERP suffers from the inevitable distortion and object deformation due to the nonuniformly distributed pixels [59]. Meanwhile, learning effective panoramic segmentation models is often impeded by the lack of large precisely labeled datasets due to the difficulty of annotation. For these reasons, some unsupervised domain adaptation (UDA) methods [49, 50, 59] have been proposed to transfer the knowledge from the pinhole image domain to the panoramic image domain. In some crucial application scenarios, e.g., autonomous driving, source datasets are not always accessible due to privacy and commercial issues, such as data portability and transmission costs. One typical example is the recent large model, SAM [19], which brings significant progress in instance segmentation for pinhole images; however, the source datasets are too large (10TB) to be reused in end-tasks, such as [20].", + "bbox": [ + 496, + 560, + 895, + 864 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "1In this paper, omnidirectional and panoramic images are interchangeably used, and ERP images often indicate panoramic images.", + "bbox": [ + 500, + 875, + 893, + 900 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Corresponding author.", + "bbox": [ + 94, + 886, + 223, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "27885", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Motivation: In this paper, we probe an interesting yet challenging problem: source-free UDA (SFUDA) for panoramic segmentation, in which only the source model (pretrained with pinhole images) and unlabeled panoramic images are available. As shown in Fig. 1 (a), different from existing SFUDA methods, e.g., [25, 41, 44] for the pinhole-to-pinhole image adaptation, transferring knowledge from the pinhole-to-panoramic image domain is hampered by: 1) semantic mismatch caused by the different FoV between the pinhole and $360^{\\circ}$ cameras, i.e., $70^{\\circ}$ vs. $360^{\\circ}$ ; 2) inevitable distortion of the ERP; 3) style discrepancies caused by the distinct camera sensors and captured scenes. In Tab. 2, we show that naively adapting existing SFUDA methods to our problem leads to a limited performance boost.", + "bbox": [ + 75, + 90, + 472, + 303 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Contributions: To this end, we propose a novel SFUDA method that effectively extracts knowledge from the source model with only panoramic images and transfers the knowledge to the target panoramic domain. Our key idea is to leverage the multi-projection versatility of $360^{\\circ}$ data for efficient domain knowledge transfer. Our method enjoys two key technical contributions. Specifically, we use Tangent Projection (TP) and divide the ERP images into patches with a fixed FoV, dubbed Fixed FoV Projection (FFP), to extract knowledge from the source model with less distortion and similar FoV to the pinhole images. Both projections make it possible to effectively extract knowledge from the source model. However, directly transferring the extracted knowledge to the target model is hardly approachable due to the distinct projection gaps. Thus, we propose a panoramic prototype adaptation module (PPAM) to obtain class-wise semantic prototypes from the features and predictions of the source model with TP and FFP images (Sec. 3.2). Then, these prototypes are integrated together to obtain the global panoramic prototypes for knowledge adaptation, which is updated across the adaptation procedure. Moreover, our proposed PPAM also fine-tunes the source model to promote better knowledge extraction using prototypes extracted from FFP images. Aligning the prototypes from each FFP image enables the source model to become more aware of distortion and semantics across the FoV.", + "bbox": [ + 75, + 306, + 472, + 699 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We initially apply both prediction-level and prototype-level loss constraints to facilitate knowledge transfer to the unlabeled target panoramic domain. Concretely, the FFP predictions of the source model are rebuilt together to provide a pseudo-supervision signal for the target model. The prototype-level loss constraint is performed between the panoramic prototypes from PPAM and the prototypes from the target model's features and predictions on the ERP images. Moreover, knowledge from the source model is not limited to predictions and prototypes, high-level features also contain crucial image characteristics that can enhance the performance of the target model. Consequently, we propose a Cross-Dual Attention Module (CDAM) that aligns", + "bbox": [ + 75, + 704, + 472, + 902 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "spatial and channel characteristics between domains to fully utilize the knowledge from the source model and address the style discrepancy problem (Sec. 3.3). Specifically, CDAM reconstructs the source model features from FFP images to provide a panoramic perception of the surrounding environment and aligns them with the ERP features from the target model for effective knowledge transfer.", + "bbox": [ + 496, + 90, + 893, + 196 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We conduct extensive experiments on both synthetic and real-world benchmarks, including outdoor and indoor scenarios. As no directly comparable works exist, we adapt the state-of-the-art (SoTA) SFUDA methods [14, 18, 21, 25, 41, 51] – designed for pinhole-to-pinhole image adaptation – to our problem in addressing the panoramic semantic segmentation. The results show that our framework significantly outperforms these methods by large margins of $+6.37\\%$ , $+11.47\\%$ , and $+10.99\\%$ on three benchmarks. We also evaluate our method against UDA methods [49, 50, 58, 59], using the source pinhole image, the results demonstrate its comparable performance.", + "bbox": [ + 496, + 198, + 895, + 378 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 500, + 393, + 640, + 409 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1. Source-free UDA for Segmentation", + "text_level": 1, + "bbox": [ + 500, + 419, + 803, + 435 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "UDA aims to mitigate the impact of domain shift caused by data distribution discrepancies in downstream computer vision tasks, such as semantic segmentation [2, 6-9, 13, 17, 30, 32, 33, 36, 37, 40, 52, 55-57, 60, 61]. However, the source domain data may not always be accessible due to the privacy protection and data storage concerns. Intuitively, source-free UDA (SFUDA) [18, 21, 45] methods are proposed to adapt source models to a target domain without access to the source data. Existing SFUDA methods for semantic segmentation primarily focus on source data estimation [41, 44] or self-training [4, 21, 25, 54] for pinhole images. In this paper, we make the first attempt at achieving SFUDA from the pinhole image domain to the panoramic domain. This task is nontrivial to be tackled due to the semantic mismatches, style discrepancies, and inevitable distortion of panoramic images. Unlike these methods that focus on the source domain data estimation [25, 44], we propose a novel SFUDA method that effectively extracts knowledge from the source model with only panoramic images and transfers the knowledge to the target panoramic image domain. Experiments also show that naively applying these methods leads to less optimal performance (See Tab. 2).", + "bbox": [ + 496, + 441, + 895, + 776 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.2. UDA for Panoramic Semantic Segmentation", + "text_level": 1, + "bbox": [ + 500, + 786, + 874, + 801 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "It can be classified into three types, including adversarial training [10, 16, 31, 34, 59], pseudo labeling [24, 38, 47, 53] and prototypical adaptation methods [49, 50]. Specifically, the first line of research applies alignment approaches to capture the domain invariant characteristics of images [16, 22, 29], feature [5, 15, 16, 59] and predictions [26, 28].", + "bbox": [ + 496, + 809, + 895, + 902 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "27886", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/82ebea8a067ecf92d4b0722afe7352152e23bb62c5d22bd703561ee158a75ed7.jpg", + "image_caption": [ + "Figure 2. Overall framework of our proposed SFUDA for panoramic semantic segmentation." + ], + "image_footnote": [], + "bbox": [ + 122, + 93, + 848, + 366 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The second type of methods generates pseudo labels for the target domain training. The last line of research, e.g., Mutual Prototype Adaption (MPA) [49], mutually aligns the high-level features with the prototypes between domain. However, these methods treat panoramic images as pinhole images when extracting prototypes, ignoring the intricate semantic, object correspondence, and distortion information brought by the panoramic FoV. We are the first to address the SFUDA problem for panoramic segmentation. Considering the distinct projection discrepancies between source and target domains, we propose a PPAM to integrate the global panoramic prototypes from the extracted knowledge for adaptation.", + "bbox": [ + 73, + 421, + 473, + 619 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Methodology", + "text_level": 1, + "bbox": [ + 75, + 636, + 212, + 655 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Overview", + "text_level": 1, + "bbox": [ + 75, + 662, + 187, + 678 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The overall framework for panoramic segmentation is shown in Fig. 2. With only the source model $F_{S}$ available and given the unlabeled panoramic image data $D_{T}$ , we aim to train a target model $F_{T}$ that adapts knowledge from $F_{S}$ to the common $K$ categories across both domains.", + "bbox": [ + 75, + 686, + 468, + 763 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Unlike the pinhole image-to-image adaptation [25, 41, 44], pinhole-to-panoramic image domain adaptation is hampered by three key factors, specifically: semantic mismatch due to FoV variations $(70^{\\circ}$ vs. $360^{\\circ})$ , inevitable distortion in ERP, and ubiquitous style discrepancies in unsupervised domain adaptation (UDA) (refer to Fig.1 (a)). Therefore, naively applying existing SFUDA methods exhibits suboptimal segmentation performance (See Tab. 2), while UDA methods with source data, e.g., [25] for panoramic segmen", + "bbox": [ + 75, + 763, + 473, + 902 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "tation do not account for the semantic mismatch between the pinhole and panoramic images. Intuitively, the key challenges are : 1) how to extract knowledge from the source model with only panoramic images and 2) how to transfer knowledge to the target panoramic image domain.", + "bbox": [ + 496, + 421, + 893, + 497 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our key idea is to leverage the multi-projection versatility of $360^{\\circ}$ data for efficient domain knowledge transfer.", + "bbox": [ + 496, + 498, + 890, + 527 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Concretely, to address the first challenge (Sec. 3.2), we use the Tangent Projection (TP) which is characterized by a reduced distortion issue compared to the ERP images [12] to extract knowledge from the source model. Concurrently, ERP images are segmented into discrete patches, each possessing a constant FoV to mimic the pinhole images, dubbed Fixed FoV Projection (FFP). Both projections make it possible to effectively extract knowledge from the source model. The distinct projection formats make it impossible to directly transfer knowledge between domains, thus we propose a Panoramic Prototype Adaptation Module (PPAM) to obtain panoramic prototypes for adaptation. To address the second challenge (Sec. 3.3), we first impose prediction and prototype level loss constraints, and propose a Cross-Dual Attention Module (CDAM) at the feature level to transfer knowledge and further address the style discrepancies.", + "bbox": [ + 496, + 529, + 895, + 772 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. Knowledge Extraction", + "text_level": 1, + "bbox": [ + 500, + 784, + 709, + 800 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "As depicted in Fig. 2, given the target domain (i.e., panoramic domain) ERP images $D_{T} = \\{x_{T}|x_{T}\\in \\mathbf{R}^{H\\times W\\times 3}\\}$ , we first project them into TP images $D_{T}^{t} = \\{x_{T}^{t}|x_{T}^{t}\\in \\mathbf{R}^{h\\times w\\times 3}\\}$ and FFP images $D_{T}^{f} = \\{x_{T}^{f}|x_{T}^{f}\\in \\mathbf{R}^{H\\times W / 4\\times 3}\\}$ for effectively extracting knowledge from the source model. Note that one ERP image corresponds to 18", + "bbox": [ + 496, + 808, + 893, + 902 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "27887", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/4e199840d916a08eeb2b3c2fd773d5f1503f1882600aa7a5c1b2cfea96baa60c.jpg", + "image_caption": [ + "Figure 3. Illustration of the prototype extraction (PE) in the panoramic prototype adaptation module (PPAM)." + ], + "image_footnote": [], + "bbox": [ + 93, + 103, + 444, + 271 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "TP images as [23, 59] and 4 FFP images with a fixed FoV of $90^{\\circ}$ (See Sec. 5). To obtain the features and predictions from the source model for knowledge adaptation, the two types of projected images are first fed into the source model with batch sampling:", + "bbox": [ + 75, + 351, + 468, + 426 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nP ^ {p}, f ^ {p} = F _ {S} \\left(x _ {T} ^ {t}\\right), \\quad P ^ {f}, f ^ {f} = F _ {S} \\left(x _ {T} ^ {f}\\right), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 129, + 428, + 468, + 446 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $f^p, f^f, P^p$ , and $P^f$ are the source model features and predictions of the input TP and FFP images, respectively. For the target panoramic images, $x_T$ is fed into $F_T$ to obtain the target model features $f$ and predictions $P$ of the input batch of ERP images as $P, f = F_T(x_T)$ . However, the distinct projection formats of the input data in the source and target models make it difficult to align their features directly, thus we propose a Panoramic Prototype Adaptation Module (PPAM) to obtain panoramic prototypes for adaptation.", + "bbox": [ + 75, + 449, + 470, + 585 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Panoramic Prototype Adaptation Module (PPAM) Compared to prior UDA methods using prototypical adaptation, e.g., MPA [49, 50], our PPAM possesses three distinct characteristics: (a) class-wise prototypes are obtained from TP and FFP images to alleviate distortion and semantic mismatch problems; (b) global prototypes are iteratively updated with prototypes from two projections during the whole training procedure; (c) hard pseudo-labels are softened in the high-level feature space to obtain prototypes with different projection of panoramic images, indicating that the knowledge from the source model is fully utilized.", + "bbox": [ + 75, + 585, + 470, + 752 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Specifically, we project the source model predictions $P^p$ , $P^f$ into pseudo labels:", + "bbox": [ + 76, + 753, + 470, + 782 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {y} _ {(h, w, k)} ^ {p} = 1 _ {k \\dot {=} a r g m a x (P _ {h, w,:} ^ {p})},\n$$\n", + "text_format": "latex", + "bbox": [ + 153, + 796, + 352, + 816 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {y} _ {(H, W / 4, k)} ^ {f} = 1 _ {k \\div a r g m a x \\left(P _ {H, W / 4,:} ^ {f}\\right)}. \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 153, + 816, + 468, + 843 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Here, $k$ denotes the semantic category. Subsequently, we obtain the class-specific masked features by integrating the up-sampled features with the corresponding pseudo", + "bbox": [ + 75, + 854, + 468, + 901 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "labels $\\hat{y}_{(h,w,k)}^p$ and $\\hat{y}_{(H,W/4,k)}^f$ . Notably, the prototypes $\\sum_{a=1}^{18} (\\tau_p^k)_a$ and $\\sum_{b=1}^{4} (\\tau_f^k)_b$ for TP and FFP images are obtained by masked average pooling (MAP) operation, as shown in Fig. 3. Within each projection, PPAM first integrates the prototypes:", + "bbox": [ + 496, + 89, + 893, + 172 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\tau_ {p} ^ {k} = a v g \\left(\\sum_ {a = 1} ^ {1 8} \\left(\\tau_ {p} ^ {k}\\right) _ {a}\\right), \\quad \\tau_ {f} ^ {k} = a v g \\left(\\sum_ {b = 1} ^ {4} \\left(\\tau_ {f} ^ {k}\\right) _ {b}\\right). \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 524, + 174, + 892, + 214 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "As shown in Fig. 2, $\\tau_{p}^{k}$ and $\\tau_{f}^{k}$ are integrated together as $\\tau_{pf}^{k}$ to preserve the less distortion characteristics of $\\tau_{p}^{k}$ and the similar scale semantics of $\\tau_{f}^{k}$ . The $\\tau_{pf}^{k}$ is then used to update the panoramic global prototype $\\tau_{g}^{k}$ , which is iteratively updated with $\\tau_{pf}^{k}$ . To obtain more accurate and reliable prototypes, we update $\\tau_{g}^{k}$ and $\\tau_{pf}^{k}$ as follows:", + "bbox": [ + 496, + 217, + 893, + 321 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\tau_ {g} ^ {i} = \\frac {1}{i} \\left(\\tau_ {p f} ^ {k}\\right) ^ {i} + \\left(1 - \\frac {1}{i}\\right) \\left(\\tau_ {g} ^ {k}\\right) ^ {i - 1}, \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 583, + 323, + 892, + 352 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $(\\tau_{g}^{k})^{i}$ and $(\\tau_{pf}^{k})^{i}$ are the prototypes for category $k$ in the $i$ -th training epoch, $(\\tau_{g}^{k})^{i-1}$ is the panoramic global prototype saved in the last training epoch, $i$ is the current epoch number. The panoramic global prototype $\\tau_{g}^{k}$ is then used to give supervision for the target prototype $\\tau_{t}^{k}$ obtained from $P$ and $f$ with the same operations.", + "bbox": [ + 496, + 354, + 890, + 450 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Besides extracting prototype knowledge from the source model, PPAM also fine-tunes the source model to improve the effectiveness of knowledge extraction. Specifically, since each ERP image can be projected to 4 FFP images, the source model's extracted features $f_{f}$ have 4 pieces of FFP features. As the content of all the features is within the same ERP image, we propose to align the class-wise prototypes from each piece of the features in PPAM to enhance the model's performance. Concretely, the prototypes $\\sum_{\\alpha = 1}^{4}\\tau_{\\alpha}$ of the four FFP features are obtained through the same operations with $\\tau_g^t$ . Each FFP image captures a non-overlapping $90^\\circ$ FoV, resulting in distinct distortions, and similar content in each FFP image. Aligning the prototypes from each FFP image enhances distortion-awareness ability in the source model and helps to explore complementary semantic content in each FFP image. The MSE loss is imposed between each two of the prototypes as follows:", + "bbox": [ + 496, + 452, + 892, + 708 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {s f t} = \\sum_ {\\alpha \\neq \\beta} ^ {4} \\left\\{\\frac {1}{K} \\sum_ {k \\in K} \\left(\\left(\\tau_ {f} ^ {k}\\right) _ {\\alpha} - \\left(\\tau_ {f} ^ {k}\\right) _ {\\beta}\\right) ^ {2} \\right\\}. \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 558, + 710, + 892, + 752 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Note that $\\mathcal{L}_{sft}$ is used to fine-tune the source model $F_{S}$ .", + "bbox": [ + 500, + 755, + 870, + 771 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3. Knowledge Adaptation", + "text_level": 1, + "bbox": [ + 500, + 784, + 712, + 801 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To adapt knowledge to the target domain, we impose the loss constraints on both predictions and prototypes and propose a cross-dual attention module (CDAM) at the feature level to better align the spatial and channel characteristics across the domains and projections. Specifically, the predictions of the FFP patch images are stitched to reconstruct an ERP image.", + "bbox": [ + 496, + 810, + 893, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "27888", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 3 + }, + { + "type": "table", + "img_path": "images/56972b681a2a3f0809411118061365a58e005c524a84ce04fc09b0d630aea13c.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodSFmIoURoadS.W.Build.WallFencePoleTr.L.Tr.S.Veget.Terr.SkyPers.CarΔ
PVT [39] SSL38.7455.3936.8780.8419.7215.188.045.392.1772.9132.0190.8126.7657.40-
PVT [39] MPA40.9070.7842.4782.1322.7910.7413.541.270.3071.1533.0389.6929.0764.73-
Source w/ seg-b135.8163.3624.0980.1315.6813.3916.267.420.0962.4520.2086.0523.0253.37-
SFDA w/ seg-b1 [25]38.2168.7830.7180.375.2618.9520.905.252.3670.1923.3090.2022.5557.90+2.40
ProDA w/ seg-b1 [51]37.3768.9330.8880.074.1718.6019.721.771.5670.0522.7390.6019.7157.04+2.73
GTA w/ seg-b1 [21]36.0064.6120.0479.048.0615.3619.866.022.1365.7717.7584.5626.7158.13+0.19
HCL w/ seg-b1 [18]38.3868.8230.4180.375.8820.1820.104.232.1170.5024.7489.8922.6559.04+2.57
DATC w/ seg-b1 [41]38.5469.4826.9680.6811.6415.2420.109.330.5566.1124.3185.1630.9060.58+2.73
Simt w/ seg-b1 [14]37.9468.4729.5179.626.7819.2019.482.311.3368.8526.5589.3022.3559.49+2.13
Ours w/ seg-b141.7870.1733.2481.6613.0623.4023.377.633.5971.0425.4689.3336.6064.60+5.97
Ours w/ seg-b242.1869.9932.2881.3410.6224.3524.299.193.6371.2830.0488.7537.4965.05+6.37
", + "bbox": [ + 81, + 88, + 883, + 287 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Table 1. Experimental results on the S-to-D scenario, the overlapped 13 classes of two datasets are used to test the UDA performance. The bold and underline denote the best and the second-best performance in source-free UDA methods, respectively.", + "bbox": [ + 75, + 297, + 892, + 325 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The ERP image is then passed to the source model $F_{S}$ to predict a pseudo label, which serves as the supervision for the ERP predictions of the target model $F_{T}$ . For simplicity, we use the Cross-Entropy (CE) loss, which is formulated as:", + "bbox": [ + 75, + 351, + 470, + 412 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {s u p} = C E (P, 1 _ {\\dot {k} = a r g m a x (\\{R e b u i l d (P _ {H, W / 4,:} ^ {f}) \\})}). \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 102, + 426, + 470, + 448 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "And the prototype-level knowledge transfer loss is achieved by Mean Squared Error (MSE) loss between the panoramic global prototype $\\tau_g^k$ and the target prototype $\\tau_t^k$ :", + "bbox": [ + 75, + 459, + 468, + 507 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {p p a} = \\frac {1}{K} \\sum_ {k \\in K} \\left(\\tau_ {g} ^ {k} - \\tau_ {t} ^ {k}\\right) ^ {2}. \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 178, + 518, + 468, + 555 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "With loss $\\mathcal{L}_{ppa}$ , the prototypes are pushed together to transfer the source-extracted knowledge to the target domain. In summary, with the proposed PPAM, we effectively address the distortion and semantic mismatch problems at the prediction and prototype level, we now tackle the style discrepancy problem at the feature level.", + "bbox": [ + 75, + 566, + 470, + 657 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Cross Dual Attention Module (CDAM). Inspired by the dual attention, focusing on spatial and channel characteristics [25], our CDAM imitates the spatial and channel-wise distributions of features to alleviate the style discrepancies. Different from [25] suggesting to minimize the distribution distance of the dual attention maps between the fake source (FFP images) and target data (ERP images), our CDAM focuses on aligning the distribution between FFP and ERP of the panoramic images rather than introducing additional parameters and computation cost in estimating source data. As shown in Fig. 2, we reconstruct the FFP features $F^f$ to ensure that the rebuilt feature $F'$ has the same spatial size as $F$ . Before the cross dual attention operation, we apply a Batch Normalization Statics (BNS) guided constraint on $F$ and $F'$ . Since the BNS of the source model should satisfy the feature distribution of the source data, we align $F$ and", + "bbox": [ + 75, + 659, + 470, + 900 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "$F^{\\prime}$ with BNS to alleviate the domain gaps as follows:", + "bbox": [ + 500, + 351, + 852, + 367 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {L} _ {b n s} = \\left\\| \\mu (F) - \\bar {\\mu} \\right\\| _ {2} ^ {2} + \\left\\| \\sigma^ {2} (F) - \\bar {\\sigma} ^ {2} \\right\\| _ {2} ^ {2} \\\\ + \\left\\| \\mu \\left(F ^ {\\prime}\\right) - \\bar {\\mu} \\right\\| _ {2} ^ {2} + \\left\\| \\sigma^ {2} \\left(F ^ {\\prime}\\right) - \\bar {\\sigma} ^ {2} \\right\\| _ {2} ^ {2}, \\tag {8} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 542, + 377, + 890, + 421 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\bar{\\mu}$ and $\\bar{\\sigma}^2$ are the mean and variance parameters of the last BN layer in the source model $S$ .", + "bbox": [ + 496, + 431, + 890, + 460 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "As shown in Fig. 2 (a), after aligned with BNS, the ERP feature $f$ and the rebuilt feature $f'$ are first reshaped to be $f \\in \\mathbb{R}^{N \\times C}$ and $f' \\in \\mathbb{R}^{N \\times C}$ , where $N$ is the number of pixels and $C$ is the channel number. Then we calculate the spatial-wise attention maps $M_{sp} \\in \\mathbb{R}^{N \\times C}$ and $M_{sp}' \\in \\mathbb{R}^{N \\times C}$ for $f$ and $f'$ by:", + "bbox": [ + 498, + 460, + 892, + 551 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\{M _ {s p} \\} _ {j i} = \\frac {\\exp (f _ {[ i : ]} ^ {\\prime} \\cdot f _ {[ : j ]} ^ {T})}{\\sum_ {i} ^ {N} \\exp (f _ {[ i : ]} ^ {\\prime} \\cdot f _ {[ : j ]} ^ {T})},\n$$\n", + "text_format": "latex", + "bbox": [ + 563, + 561, + 794, + 604 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\{M _ {s p} ^ {\\prime} \\right\\} _ {j i} = \\frac {\\exp \\left(f _ {[ i : ]} \\cdot f _ {[ : j ]} ^ {T}\\right)}{\\sum_ {i} ^ {N} \\exp \\left(f _ {[ i : ]} \\cdot f _ {[ : j ]} ^ {T}\\right)}, \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 563, + 607, + 890, + 650 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $f^T$ is the transpose of $f$ and $\\{M\\}_{ij}$ measures the impact of the $i$ -th position on the $j$ -th position. Similarly, the channel-wise attention maps $M_{ch} \\in \\mathbb{R}^{C \\times C}$ and $M_{ch}' \\in \\mathbb{R}^{C \\times C}$ can be obtained through:", + "bbox": [ + 500, + 659, + 893, + 720 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\{M _ {c h} \\right\\} _ {j i} = \\frac {\\exp \\left(f _ {[ i : ]} ^ {T} \\cdot f _ {[ : j ]}\\right)}{\\sum_ {i} ^ {C} \\exp \\left(f _ {[ i : ]} ^ {\\prime} \\cdot f _ {[ : j ]} ^ {T}\\right)},\n$$\n", + "text_format": "latex", + "bbox": [ + 563, + 729, + 794, + 773 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\{M _ {c h} ^ {\\prime} \\right\\} _ {j i} = \\frac {\\exp \\left(f _ {[ i : ]} ^ {T} \\cdot f _ {[ : j ]} ^ {\\prime}\\right)}{\\sum_ {i} ^ {C} \\exp \\left(f _ {[ i : ]} ^ {T} \\cdot f _ {[ : j ]} ^ {\\prime}\\right)}. \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 565, + 776, + 890, + 819 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "After obtaining the spatial and channel attention maps, the CDAM loss can be calculated with the Kullback-Liibler divergence (KL divergence) as follows:", + "bbox": [ + 500, + 828, + 890, + 873 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {c d a} = K L \\left(M _ {s p}, M _ {s p} ^ {\\prime}\\right) + K L \\left(M _ {c h}, M _ {c h} ^ {\\prime}\\right) \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 537, + 883, + 890, + 902 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "27889", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/56372506da771c02d126a5a0b84cc0c960978ea26b98df2281949a2bc7b8b3a3.jpg", + "image_caption": [ + "Figure 4. Example visualization results. (a) source, (b) SFDA [25], (c) DATC [41], (d) Ours, (e) Ground Truth (GT)." + ], + "image_footnote": [], + "bbox": [ + 83, + 88, + 890, + 292 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/bf1d67e171db265b4f64b2da4f105428130491007ab0bf138c6e9aeb9b88fee9.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodSFmIoUPersonRiderCarTruckBusTrainMotorBikeΔ
Trans4PASS-T [49]53.1848.5416.9179.5865.3355.7684.6359.0537.61-
Trans4PASS-S [49]55.2248.8523.3681.0267.3169.5386.1360.8539.09-
DAFormer [17]54.6749.6925.1577.7063.0665.6186.6865.1248.13-
DPPASS [59]55.3052.0929.4079.1958.7347.2486.4866.6038.11-
DATR [58]56.8154.6229.5080.0367.3563.7587.6767.5737.10-
Source w/ seg-b138.6540.9310.8967.6736.8615.5626.4342.6827.16-
SFDA w/ seg-b1 [25]42.7041.658.4669.9747.4833.2472.0147.6132.77+4.05
DTAC w/ seg-b1 [41]43.0643.518.3570.1035.7940.7370.5249.4932.94+4.41
Ours w/ seg-b148.7845.3615.8375.7049.1655.6882.0754.8233.76+10.13
Ours w/ seg-b250.1249.9227.2276.2247.8164.1379.4756.8335.76+11.47
", + "bbox": [ + 84, + 335, + 890, + 520 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 2. Experimental results of 8 selected categories in panoramic semantic segmentation on C-to-D. SF: Source-free UDA. The bold and underline denote the best and the second-best performance in source-free UDA methods, respectively.", + "bbox": [ + 75, + 530, + 892, + 560 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.4. Optimization", + "text_level": 1, + "bbox": [ + 76, + 585, + 217, + 601 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The training objective for learning the target model containing three losses is defined as:", + "bbox": [ + 75, + 611, + 470, + 642 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = \\lambda \\cdot \\mathcal {L} _ {p p a} + \\gamma \\cdot \\mathcal {L} _ {c d a} + \\mathcal {L} _ {b n s} + \\mathcal {L} _ {s u p} \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 137, + 661, + 468, + 679 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $\\mathcal{L}_{ppa}$ is the MSE loss from PPAM, $\\mathcal{L}_{cda}$ refers to the KL loss from CDAM, $\\mathcal{L}_{sup}$ denotes the CE loss for the prediction pseudo label supervision loss, $\\mathcal{L}_{bns}$ refers to the BNS guided feature loss, and $\\lambda$ and $\\gamma$ are the trade-off weights of the proposed loss terms.", + "bbox": [ + 75, + 696, + 468, + 772 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4. Experiments and Analysis", + "text_level": 1, + "bbox": [ + 76, + 796, + 318, + 813 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "As the first SFUDA method for panoramic image segmentation, there is no prior method for direct comparison. We thus empirically validate our method by comparing it with the existing UDA and panoramic segmentation methods on three widely used benchmarks.", + "bbox": [ + 75, + 824, + 470, + 898 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1. Datasets and Implementation Details.", + "text_level": 1, + "bbox": [ + 498, + 585, + 823, + 601 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Cityscapes [11] is a real-world dataset collected for autonomous driving that contains street scenes. DensePASS [27] is a panoramic dataset designed for capturing diverse street scenes. SynPASS [50] is a synthetic dataset consisting of 9080 synthetic panoramic images. Stanford2D3D [3] is an indoor panoramic dataset which has 1413 panoramic images. Overall, the experiments are conducted on both real-world (Cityscapes-to-DensePASS, C-to-D, and Stanford2D3D-pinhole-to-Stanford2D3D-panoramic, SPinto-SPan) and synthetic-to-real (SynPASS-to-DensePASS, S-to-D) scenarios.", + "bbox": [ + 496, + 608, + 893, + 773 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Experimental Results.", + "text_level": 1, + "bbox": [ + 500, + 786, + 705, + 801 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We first evaluate our proposed framework under the S-to-D scenario. The experimental results are shown in Tab. 1. Our proposed method consistently outperforms source-free UDA methods [25] and [41] and even achieves panoramic semantic segmentation performance closer to that of the UDA method Trans4PASS [50] which utilizes the source data", + "bbox": [ + 496, + 809, + 893, + 898 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "27890", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/197af54072faa422c4fff6e530f2010d61c8000877057bc61004858b5a7a3eb7.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodSFmIoUCeilingChairDoorFloorSofaTableWallWindowΔ
PVT-S w/ MPA [49]X57.9585.8551.7618.3990.7835.9365.4375.0040.43-
Trans4PASS w/ MPA [49]X64.5285.0858.7234.9791.1246.2571.7277.5850.75-
Trans4PASS+ [50]X63.7390.6362.3024.7992.6235.7373.1678.7451.78-
Trans4PASS+ w/ MPA [50]X67.1690.0464.0442.8991.7438.3471.4581.2457.54-
SFDA [25]54.7679.4433.2052.0967.3622.5453.6469.3860.46-
Ours w/ b157.6373.8129.9863.6573.4931.7649.2572.8966.22+2.87
Ours w/ b265.7582.8838.0065.8186.7136.3266.1080.2969.88+10.99
", + "bbox": [ + 84, + 88, + 888, + 224 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/d17ce85e1176d2c29240ac5087d1ac952b6305d7ed62ffd1d5641f3b79d30395.jpg", + "table_caption": [ + "Table 3. Experimental results on indoor Stanford2D3D [3]. The bold denotes the best performance among UDA and SFUDA methods." + ], + "table_footnote": [], + "table_body": "
Loss Function CombinationsC-to-DS-to-D
\\( \\mathcal{L}_{sup} \\)\\( \\mathcal{L}_{ppa} \\)\\( \\mathcal{L}_{sft} \\)\\( \\mathcal{L}_{cda} \\)\\( \\mathcal{L}_{bns} \\)mIoUΔmIoUΔ
38.65-35.81-
45.42+6.7738.37+2.56
46.23+7.5838.49+2.68
44.24+5.5938.38+2.57
44.79+6.1438.52+2.71
48.78+10.1341.78+5.97
", + "bbox": [ + 81, + 273, + 470, + 412 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "in the adaptation procedure. Our proposed method brings significant performance gain of $+3.57\\%$ and $+3.54\\%$ with SegFormer-B1 backbone then SFDA [25] and DATC [41], respectively. We also provide the TSNE visualization in Fig. 5 (b) and qualitative results in Fig. 4. Apparently, our method gains a significant improvement in distinguishing the pixels in panoramic images in both prediction and high-level feature space. As shown in Tab. 2, we then evaluate our proposed framework under the C-to-D scenario. Our proposed method significantly outperforms source-free methods [25, 41] and some panoramic semantic segmentation methods [43, 46, 48]. Specifically, our method achieves a significant performance gain over SFDA [25] and DTAC [41] by $+6.08\\%$ and $+5.72\\%$ , respectively. This demonstrates that our proposed method endowed by PPAM and CDAM is more suitable for panoramic semantic segmentation tasks. Furthermore, as shown in the qualitative results in Fig. 4, our method achieves better segmentation in driving-related categories, such as rider and car.", + "bbox": [ + 75, + 458, + 470, + 744 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We also provide TSNE visualizations [35] in Fig. 5 (a), showing that our proposed method brings significant improvements in distinguishing pixels from different categories in high-level feature space. Additionally, we evaluated our proposed method on the Stanford2D3D [3] dataset and compared it with the SFDA [25] and MPA [50] methods. As shown in the following table, our proposed method significantly outperforms the SFDA by $+7.09\\%$ mIoU and is on par with the MPA method using source data $(61.85\\%$ vs. $67.16\\%)$ . Notably, for some categories, such as door $(57.90\\%$", + "bbox": [ + 75, + 750, + 470, + 901 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/cbb698b08d6ba9c23e795092f41df038f18af5013d33e193185423d1d7a81479.jpg", + "table_caption": [ + "Table 4. Ablation study of different module combinations." + ], + "table_footnote": [], + "table_body": "
Combinationsτg+τpτg+τfτg+τp+τf
mIoU44.1444.2845.42
", + "bbox": [ + 504, + 273, + 890, + 318 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 5. Ablation study of different prototype combinations.", + "bbox": [ + 514, + 328, + 874, + 342 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "vs. $42.89\\%$ ) and window $(68.06\\%$ vs. $57.54\\%)$ , our method event outperforms the MPA [50].", + "bbox": [ + 498, + 363, + 890, + 393 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5. Ablation Study", + "text_level": 1, + "bbox": [ + 500, + 407, + 650, + 422 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Different Loss Function Combinations. To assess the effectiveness of the proposed modules, we conduct ablation experiments on both real-world and synthetic-to-real scenarios with various loss combinations. All of the proposed modules and loss functions have a positive impact on improving segmentation performance. Notably, our PPAM yields a significant performance gain of $+6.77\\%$ . This indicates that PPAM alleviates the intricate semantics and distortion problem with the tangent, and our proposed FFP projection is valid. This is further supported by the qualitative results presented in Fig. 4. Additionally, our proposed CDAM achieves a performance gain of $+5.59\\%$ compared to the source baseline, which means that CDAM imitates the spatial and channel-wise distributions of ERP and FFP features and further addresses the style discrepancy problems.", + "bbox": [ + 496, + 431, + 893, + 657 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Ablation of Different Prototype Combinations. To validate the effectiveness of all the prototypes in PPAM, we conduct experiments on C-to-D using SegFormer-B1 and only $\\mathcal{L}_{sup}$ and $\\mathcal{L}_{ppa}$ . The results of the performance with different prototype combinations are presented in Tab. 5. Both prototypes from TP and FFP have a positive effect on PPAM, with $\\tau_{p}$ and $\\tau_{f}$ resulting in mIoU improvements of $+5.49\\%$ and $+5.63\\%$ , respectively, compared to the source baseline. When both prototypes are combined together, there is a mIoU gain of $+6.77\\%$ , indicating that their combination is better for prototype-level adaptation.", + "bbox": [ + 496, + 659, + 893, + 824 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Dual Attention vs. Cross Dual Attention. The dual attention (DA) approach proposed in SFDA [25] aligns the spatial and channel characteristics of features between the fake source and target data. In contrast, our cross dual attention (CDA) approach aligns the distribution between different", + "bbox": [ + 496, + 825, + 893, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "27891", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/58fb62606424e11248d1022b69f80d5a25d367c9d1832cfb58073bd549911574.jpg", + "image_caption": [ + "Figure 5. TSNE visualization of (a) Cityscapes-to-DensePASS and (b) SynPASS-to-DensePASS." + ], + "image_footnote": [], + "bbox": [ + 84, + 89, + 467, + 340 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/77660571ebb2e84204d58a906915307d2b3bc4e03ad009015b58f6d796d9988e.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
FoVw/o60°72°90°120°180°360°
mIoU38.6544.0344.1644.2844.0241.6540.31
Δ-+5.38+5.51+5.63+5.37+3.00+1.66
", + "bbox": [ + 81, + 395, + 480, + 449 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "projections of the same spherical data, specifically ERP and FFP, resulting in more robust and stable knowledge transfer. Moreover, in our SFDA, we obtain spatial and channel characteristics across features, whereas DA operates within features. We also evaluate DA on the C-to-D scenario, and our CDA achieves $44.24\\%$ mIoU, while DA only reaches $41.53\\%$ mIoU. This indicates the proposed CDA is better for SFUDA in panoramic semantic segmentation.", + "bbox": [ + 75, + 502, + 468, + 625 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Field-of-view of FFP. Most existing approaches for panoramic semantic segmentation, such as those proposed in [49, 50, 59], primarily focus on alleviating distortion by introducing distortion-aware components and distinct projection strategies. However, as discussed in Sec. 3.2, $360^{\\circ}$ images contain more intricate semantic information and object correspondence than the pinhole images, resulting in an obvious semantic mismatch between domains. Therefore, we propose the Fixed FoV Pooling (FFP) strategy to address the semantic mismatch. Experimental results show that the fixed FoV is the most influential factor in FFP, with an FoV of $90^{\\circ}$ achieving the best segmentation performance, as shown in Tab. 6, with a mIoU of $44.28\\%$ .", + "bbox": [ + 75, + 625, + 470, + 821 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Ablation of Hyper-parameters. We now show the influence of hyperparameters $\\gamma$ and $\\lambda$ , which are the weights for the KL loss in CDAM and the MSE loss in PPAM, respectively. The experimental results are provided in Tab. 7.", + "bbox": [ + 75, + 823, + 470, + 883 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Fine-tuning the Source Model. As the pre-trained model", + "bbox": [ + 76, + 885, + 470, + 901 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/7aa3ea0d82e66d82bb6e14d28ba461098541c56cd1cc1353e51034ea2d92a5e8.jpg", + "table_caption": [ + "Table 6. Ablation study of the FoV of our proposed FFP." + ], + "table_footnote": [], + "table_body": "
γ00.010.020.050.10.2
mIoU38.6542.0543.2443.2844.2443.07
Δ-+3.40+4.59+4.63+5.59+4.42
λ0506080100120
mIoU38.6543.1343.2245.3645.4245.34
Δ-+4.48+4.57+6.71+6.77+6.69
", + "bbox": [ + 508, + 88, + 903, + 195 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 7. Ablation study of $\\gamma$ and $\\lambda$ .", + "bbox": [ + 588, + 205, + 800, + 219 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "in the source (pinhole) domain is not an ideal model for the target (panoramic) image domain, we propose to fine-tune the source model with the loss function $\\mathcal{L}_{sft}$ , as described in Sec. 3.2. Tab. 4 demonstrates the effectiveness of the proposed $\\mathcal{L}_{sft}$ . When combined with the prototypical adaptation loss $\\mathcal{L}_{ppa}$ , adding $\\mathcal{L}_{sft}$ results in a $6.77\\%$ mIoU gain compared with the source baseline of $38.65\\%$ . We present the performance metrics derived solely from the loss $\\mathcal{L}_{sft}$ of PPAM: C-2-D registers at $44.94\\%$ while S-2-D records $36.74\\%$ . These results underscore the efficacy of $\\mathcal{L}_{sft}$ integrated within our PPAM module. Concerning transfer-ability, our $\\mathcal{L}_{sft}$ exhibits compatibility with various projection methods, e.g., cube map. At its core, our fine-tuning loss seeks to align all projection images originating from the same panoramic source, irrespective of the employed projection technique. This intrinsic adaptability facilitates the application of $\\mathcal{L}_{sft}$ across diverse projections. More results refer to the supplementary material.", + "bbox": [ + 496, + 238, + 893, + 511 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 523, + 619, + 540 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this paper, we investigated a new problem of achieving SFUDA for panoramic semantic segmentation. To this end, we proposed an end-to-end SFUDA framework to address the domain shifts, including semantic mismatch, distortion, and style discrepancies, between pinhole and panoramic domains. Experiments on both real-world and synthetic benchmarks show that our proposed framework outperforms prior approaches and is on par with the methods using source data.", + "bbox": [ + 496, + 549, + 893, + 671 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Limitation and future work. One limitation of our proposed framework is the computational cost brought by the tangent projection during training, and there is still room for improvements in segmentation performance. However, components in our approach such as panoramic prototypes and fixed FoV projection have significant implications for the $360^{\\circ}$ vision, especially for the panoramic semantic segmentation. In the future, we plan to utilize the large language models (LLMs) and Multi-modal large language models (MLLMs) to alleviate the domain gaps, such as the semantic mismatches between pinhole and panoramic images.", + "bbox": [ + 496, + 670, + 893, + 835 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgement This paper is supported by the National Natural Science Foundation of China (NSF) under Grant No. NSFC22FYT45 and the Guangzhou City, University and Enterprise Joint Fund under Grant No.SL2022A03J01278.", + "bbox": [ + 496, + 835, + 893, + 897 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "27892", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Hao Ai, Zidong Cao, Jinjing Zhu, Haotian Bai, Yucheng Chen, and Ling Wang. Deep learning for omnidirectional vision: A survey and new perspectives. arXiv preprint arXiv:2205.10468, 2022. 1", + "[2] Nikita Araslanov and Stefan Roth. Self-supervised augmentation consistency for adapting semantic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15384-15394, 2021. 2", + "[3] Iro Armeni, Sasha Sax, Amir R Zamir, and Silvio Savarese. Joint 2d-3d-semantic data for indoor scene understanding. arXiv preprint arXiv:1702.01105, 2017. 6, 7", + "[4] Mathilde Bateson, Hoel Kervadec, Jose Dolz, Herve Lombaert, and Ismail Ben Ayed. Source-free domain adaptation for image segmentation. Medical Image Analysis, 82:102617, 2022. 2", + "[5] Chaoqi Chen, Weiping Xie, Tingyang Xu, Wenbing Huang, Yu Rong, Xinghao Ding, Yue Huang, and Junzhou Huang. Progressive feature alignment for unsupervised domain adaptation. 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 627-636, 2019. 2", + "[6] Jialei Chen, Daisuke Deguchi, Chenkai Zhang, Xu Zheng, and Hiroshi Murase. Frozen is better than learning: A new design of prototype-based classifier for semantic segmentation. Available at SSRN 4617170. 2", + "[7] Jialei Chen, Chong Fu, Haoyu Xie, Xu Zheng, Rong Geng, and Chiu-Wing Sham. Uncertainty teacher with dense focal loss for semi-supervised medical image segmentation. Computers in Biology and Medicine, 149:106034, 2022.", + "[8] Jialei Chen, Daisuke Deguchi, Chenkai Zhang, Xu Zheng, and Hiroshi Murase. Clip is also a good teacher: A new learning framework for inductive zero-shot semantic segmentation. arXiv preprint arXiv:2310.02296, 2023.", + "[9] Minghao Chen, Hongyang Xue, and Deng Cai. Domain adaptation for semantic segmentation with maximum squares loss. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2090-2099, 2019. 2", + "[10] Jaehoon Choi, Taekyung Kim, and Changick Kim. Self-ensembling with gan-based data augmentation for domain adaptation in semantic segmentation. 2019 IEEE/CVF International Conference on Computer Vision (ICCV), pages 6829–6839, 2019. 2", + "[11] Marius Cordts, Mohamed Omran, Sebastian Ramos, Timo Rehfeld, Markus Enzweiler, Rodrigo Benenson, Uwe Franke, Stefan Roth, and Bernt Schiele. The cityscapes dataset for semantic urban scene understanding. In Proc. of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 6", + "[12] Marc Eder, Mykhailo Shvets, John Lim, and Jan-Michael Frahm. Tangent images for mitigating spherical distortion. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12426-12434, 2020. 3", + "[13] Francois Fleuret et al. Uncertainty reduction for model adaptation in semantic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9613-9623, 2021. 2" + ], + "bbox": [ + 78, + 114, + 470, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[14] Xiaoqing Guo, Jie Liu, Tongliang Liu, and Yixuan Yuan. Simt: Handling open-set noise for domain adaptive semantic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7032-7041, 2022. 2, 5", + "[15] Judy Hoffman, Dequan Wang, Fisher Yu, and Trevor Darrell. Fcts in the wild: Pixel-level adversarial and constraint-based adaptation. ArXiv, abs/1612.02649, 2016. 2", + "[16] Judy Hoffman, Eric Tzeng, Taesung Park, Jun-Yan Zhu, Phillip Isola, Kate Saenko, Alexei A. Efros, and Trevor Darrell. Cycada: Cycle-consistent adversarial domain adaptation. In ICML, 2018. 2", + "[17] Lukas Hoyer, Dengxin Dai, and Luc Van Gool. Daformer: Improving network architectures and training strategies for domain-adaptive semantic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9924-9935, 2022. 2, 6", + "[18] Jiaxing Huang, Dayan Guan, Aoran Xiao, and Shijian Lu. Model adaptation: Historical contrastive learning for unsupervised domain adaptation without source data. Advances in Neural Information Processing Systems, 34:3635-3649, 2021. 2, 5", + "[19] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. arXiv preprint arXiv:2304.02643, 2023. 1", + "[20] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. arXiv preprint arXiv:2304.02643, 2023. 1", + "[21] Jogendra Nath Kundu, Akshay Kulkarni, Amit Singh, Varun Jampani, and R Venkatesh Babu. Generalize then adapt: Source-free domain adaptive semantic segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7046-7056, 2021. 2, 5", + "[22] Yunsheng Li, Lu Yuan, and Nuno Vasconcelos. Bidirectional learning for domain adaptation of semantic segmentation. 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 6929-6938, 2019. 2", + "[23] Yuyan Li, Yuliang Guo, Zhixin Yan, Xinyu Huang, Ye Duan, and Liu Ren. Omnifusion: 360 monocular depth estimation via geometry-aware fusion. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2801-2810, 2022. 4", + "[24] Mengyi Liu, Shuhui Wang, Yulan Guo, Yuan He, and Hui Xue. Pano-sfmlearner: Self-supervised multi-task learning of depth and semantics in panoramic videos. IEEE Signal Processing Letters, 28:832-836, 2021. 2", + "[25] Yang Liu, Wei Zhang, and Jun Wang. Source-free domain adaptation for semantic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1215-1224, 2021. 2, 3, 5, 6, 7", + "[26] Yawei Luo, Liang Zheng, Tao Guan, Junqing Yu, and Yi Yang. Taking a closer look at domain shift: Category-level adversaries for semantics consistent domain adaptation. 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2502-2511, 2019. 2" + ], + "bbox": [ + 501, + 92, + 893, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "27893", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[27] Chaoxiang Ma, Jiaming Zhang, Kailun Yang, Alina Roitberg, and Rainer Stiefelhagen. Densepass: Dense panoramic semantic segmentation via unsupervised domain adaptation with attention-augmented context exchange. In 2021 IEEE International Intelligent Transportation Systems Conference (ITSC), pages 2766-2772. IEEE, 2021. 6", + "[28] Luke Melas-Kyriazi and Arjun K. Manrai. Pixmatch: Unsupervised domain adaptation via pixelwise consistency training. 2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 12430-12440, 2021. 2", + "[29] Zak Murez, Soheil Kolouri, David J. Kriegman, Ravi Ramamoorthi, and Kyungnam Kim. Image to image translation for domain adaptation. 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4500-4509, 2018. 2", + "[30] Fei Pan, Inkyu Shin, Francois Rameau, Seokju Lee, and In So Kweon. Unsupervised intra-domain adaptation for semantic segmentation through self-supervision. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3764-3773, 2020. 2", + "[31] Swami Sankaranarayanan, Yogesh Balaji, Arpit Jain, Ser-Nam Lim, and Rama Chellappa. Learning from synthetic data: Addressing domain shift for semantic segmentation. 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3752-3761, 2018. 2", + "[32] Weifa Shen, Qixiong Wang, Hongxiang Jiang, Sen Li, and Jihao Yin. Unsupervised domain adaptation for semantic segmentation via self-supervision. In 2021 IEEE International Geoscience and Remote Sensing Symposium IGARSS, pages 2747-2750. IEEE, 2021. 2", + "[33] Serban Stan and Mohammad Rostami. Unsupervised model adaptation for continual semantic segmentation. In Proceedings of the AAAI conference on artificial intelligence, pages 2593-2601, 2021. 2", + "[34] Yi-Hsuan Tsai, Wei-Chih Hung, Samuel Schulter, Kihyuk Sohn, Ming-Hsuan Yang, and Manmohan Chandraker. Learning to adapt structured output space for semantic segmentation. 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7472-7481, 2018. 2", + "[35] Laurens Van der Maaten and Geoffrey Hinton. Visualizing data using t-sne. Journal of machine learning research, 9(11), 2008. 7", + "[36] Tuan-Hung Vu, Himalaya Jain, Maxime Bucher, Matthieu Cord, and Patrick Pérez. Advent: Adversarial entropy minimization for domain adaptation in semantic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2517-2526, 2019. 2", + "[37] Tuan-Hung Vu, Himalaya Jain, Maxime Bucher, Matthieu Cord, and Patrick Pérez. Dada: Depth-aware domain adaptation in semantic segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7364-7373, 2019. 2", + "[38] Qin Wang, Dengxin Dai, Lukas Hoyer, Olga Fink, and Luc Van Gool. Domain adaptive semantic segmentation with self-supervised depth estimation. 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pages 8495-8505, 2021. 2" + ], + "bbox": [ + 78, + 90, + 470, + 898 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[39] Wenhai Wang, Enze Xie, Xiang Li, Deng-Ping Fan, Kaitao Song, Ding Liang, Tong Lu, Ping Luo, and Ling Shao. Pyramid vision transformer: A versatile backbone for dense prediction without convolutions. In Proceedings of the IEEE/CVF international conference on computer vision, pages 568-578, 2021. 5", + "[40] Haoyu Xie, Chong Fu, Xu Zheng, Yu Zheng, Chiu-Wing Sham, and Xingwei Wang. Adversarial co-training for semantic segmentation over medical images. Computers in biology and medicine, 157:106736, 2023. 2", + "[41] Cheng-Yu Yang, Yuan-Jhe Kuo, and Chiou-Ting Hsu. Source free domain adaptation for semantic segmentation via distribution transfer and adaptive class-balanced self-training. In 2022 IEEE International Conference on Multimedia and Expo (ICME), pages 1-6. IEEE, 2022. 2, 3, 5, 6, 7", + "[42] Kailun Yang, Xinxin Hu, Luis M Bergasa, Eduardo Romero, and Kaiwei Wang. Pass: Panoramic annular semantic segmentation. IEEE Transactions on Intelligent Transportation Systems, 21(10):4171-4185, 2019. 1", + "[43] Kailun Yang, Xinxin Hu, Yicheng Fang, Kaiwei Wang, and Rainer Stiefelhagen. Omnisupervised omnidirectional semantic segmentation. IEEE Transactions on Intelligent Transportation Systems, 2020. 1, 7", + "[44] Mucong Ye, Jing Zhang, Jinpeng Ouyang, and Ding Yuan. Source data-free unsupervised domain adaptation for semantic segmentation. In Proceedings of the 29th ACM International Conference on Multimedia, pages 2233-2242, 2021. 2, 3", + "[45] Hao-Wei Yeh, Baoyao Yang, Pong C Yuen, and Tatsuya Harada. Sofa: Source-data-free feature alignment for unsupervised domain adaptation. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 474–483, 2021. 2", + "[46] Xiangyu Yue, Zangwei Zheng, Shanghang Zhang, Yang Gao, Trevor Darrell, Kurt Keutzer, and Alberto Sangiovanni Vincentelli. Prototypical cross-domain self-supervised learning for few-shot unsupervised domain adaptation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13834-13844, 2021. 1, 7", + "[47] Cheng Zhang, Zhaopeng Cui, Cai Chen, Shuaicheng Liu, Bing Zeng, Hujun Bao, and Yinda Zhang. Deeppanoocontext: Panoramic 3d scene understanding with holistic scene context graph and relation-based optimization. 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pages 12612-12621, 2021. 2", + "[48] Jiaming Zhang, Chaoxiang Ma, Kailun Yang, Alina Roitberg, Kunyu Peng, and Rainer Stiefelhagen. Transfer beyond the field of view: Dense panoramic semantic segmentation via unsupervised domain adaptation. IEEE Transactions on Intelligent Transportation Systems, 2021. 1, 7", + "[49] Jiaming Zhang, Kailun Yang, Chaoxiang Ma, Simon Reiβ, Kunyu Peng, and Rainer Stiefelhagen. Bending reality: Distortion-aware transformers for adapting to panoramic semantic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16917-16927, 2022. 1, 2, 3, 4, 6, 7, 8", + "[50] Jiaming Zhang, Kailun Yang, Hao Shi, Simon Reiβ, Kunyu Peng, Chaoxiang Ma, Haodong Fu, Kaiwei Wang, and Rainer" + ], + "bbox": [ + 501, + 92, + 893, + 901 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "27894", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Stiefelhagen. Behind every domain there is a shift: Adapting distortion-aware vision transformers for panoramic semantic segmentation. arXiv preprint arXiv:2207.11860, 2022. 1, 2, 4, 6, 7, 8", + "[51] Pan Zhang, Bo Zhang, Ting Zhang, Dong Chen, Yong Wang, and Fang Wen. Prototypical pseudo label denoising and target structure learning for domain adaptive semantic segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 12414-12424, 2021. 2, 5", + "[52] Qiming Zhang, Jing Zhang, Wei Liu, and Dacheng Tao. Category anchor-guided unsupervised domain adaptation for semantic segmentation. Advances in neural information processing systems, 32, 2019. 2", + "[53] Yang Zhang, Philip David, and Boqing Gong. Curriculum domain adaptation for semantic segmentation of urban scenes. 2017 IEEE International Conference on Computer Vision (ICCV), pages 2039-2049, 2017. 2", + "[54] Yuyang Zhao, Zhun Zhong, Zhiming Luo, Gim Hee Lee, and Nicu Sebe. Source-free open compound domain adaptation in semantic segmentation. IEEE Transactions on Circuits and Systems for Video Technology, 32(10):7019-7032, 2022. 2", + "[55] Xu Zheng, Chong Fu, Haoyu Xie, Jialei Chen, Xingwei Wang, and Chiu-Wing Sham. Uncertainty-aware deep co-training for semi-supervised medical image segmentation. Computers in Biology and Medicine, 149:106051, 2022. 2", + "[56] Xu Zheng, Yunhao Luo, Hao Wang, Chong Fu, and Lin Wang. Transformer-cnn cohort: Semi-supervised semantic segmentation by the best of both students. arXiv preprint arXiv:2209.02178, 2022.", + "[57] Xu Zheng, Yunhao Luo, Pengyuan Zhou, and Lin Wang. Distilling efficient vision transformers from cnns for semantic segmentation. arXiv preprint arXiv:2310.07265, 2023. 2", + "[58] Xu Zheng, Tianbo Pan, Yunhao Luo, and Lin Wang. Look at the neighbor: Distortion-aware unsupervised domain adaptation for panoramic semantic segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 18687-18698, 2023. 2, 6", + "[59] Xu Zheng, Jinjing Zhu, Yexin Liu, Zidong Cao, Chong Fu, and Lin Wang. Both style and distortion matter: Dual-path unsupervised domain adaptation for panoramic semantic segmentation. arXiv preprint arXiv:2303.14360, 2023. 1, 2, 4, 6, 8", + "[60] Jinjing Zhu, Yunhao Luo, Xu Zheng, Hao Wang, and Lin Wang. A good student is cooperative and reliable: Cnn-transformer collaborative learning for semantic segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11720-11730, 2023. 2", + "[61] Yang Zou, Zhiding Yu, BVK Kumar, and Jinsong Wang. Unsupervised domain adaptation for semantic segmentation via class-balanced self-training. In Proceedings of the European conference on computer vision (ECCV), pages 289-305, 2018. 2" + ], + "bbox": [ + 78, + 90, + 470, + 837 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "27895", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/2024/Semantics Distortion and Style Matter_ Towards Source-free UDA for Panoramic Segmentation/fc7b011a-8b94-4431-a0e3-dcc09545c286_model.json b/2024/Semantics Distortion and Style Matter_ Towards Source-free UDA for Panoramic Segmentation/fc7b011a-8b94-4431-a0e3-dcc09545c286_model.json new file mode 100644 index 0000000000000000000000000000000000000000..137649897901cf7321be0231ab271de64b676fe7 --- /dev/null +++ b/2024/Semantics Distortion and Style Matter_ Towards Source-free UDA for Panoramic Segmentation/fc7b011a-8b94-4431-a0e3-dcc09545c286_model.json @@ -0,0 +1,2180 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.812, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.13, + 0.895, + 0.178 + ], + "angle": 0, + "content": "Semantics, Distortion, and Style Matter: Towards Source-free UDA for Panoramic Segmentation" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.203, + 0.882, + 0.275 + ], + "angle": 0, + "content": "Xu Zheng\\(^{1}\\) Pengyuan Zhou\\(^{3}\\) Athanasios V. Vasilakos\\(^{4}\\) Lin Wang\\(^{1,2*}\\) \n\\(^{1}\\)AI Thrust, HKUST(GZ) \\(^{2}\\)Dept. of CSE, HKUST \\(^{3}\\)Aarhus University \\(^{4}\\)University of Agder zhengxu128@gmail.com, pengyuan.zhou@ece.au.dk, th.vasilakos@gmail.com, linwang@ust.hk Project Page: https://vlislab22.github.io/360SFUDA/" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.309, + 0.314, + 0.327 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.342, + 0.474, + 0.751 + ], + "angle": 0, + "content": "This paper addresses an interesting yet challenging problem—source-free unsupervised domain adaptation (SFUDA) for pinhole-to-panoramic semantic segmentation—given only a pinhole image-trained model (i.e., source) and unlabeled panoramic images (i.e., target). Tackling this problem is nontrivial due to the semantic mismatches, style discrepancies, and inevitable distortion of panoramic images. To this end, we propose a novel method that utilizes Tangent Projection (TP) as it has less distortion and meanwhile slits the equirectangular projection (ERP) with a fixed FoV to mimic the pinhole images. Both projections are shown effective in extracting knowledge from the source model. However, the distinct projection discrepancies between source and target domains impede the direct knowledge transfer; thus, we propose a panoramic prototype adaptation module (PPAM) to integrate panoramic prototypes from the extracted knowledge for adaptation. We then impose the loss constraints on both predictions and prototypes and propose a cross-dual attention module (CDAM) at the feature level to better align the spatial and channel characteristics across the domains and projections. Both knowledge extraction and transfer processes are synchronously updated to reach the best performance. Extensive experiments on the synthetic and real-world benchmarks, including outdoor and indoor scenarios, demonstrate that our method achieves significantly better performance than prior SFUDA methods for pinhole-to-panoramic adaptation." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.778, + 0.21, + 0.793 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.803, + 0.472, + 0.879 + ], + "angle": 0, + "content": "The comprehensive scene perception abilities of \\(360^{\\circ}\\) cameras have made them highly popular for applications, such as autonomous driving [1]. In contrast to pinhole cameras that capture 2D planer images with a limited field-of-view (FoV), \\(360^{\\circ}\\) cameras offer a much wider FoV of" + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.31, + 0.887, + 0.44 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.458, + 0.893, + 0.487 + ], + "angle": 0, + "content": "Figure 1. We address a new problem of achieving source-free pinhole-to-panoramic adaptation for segmentation." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.515, + 0.895, + 0.56 + ], + "angle": 0, + "content": "\\(360^{\\circ} \\times 180^{\\circ}\\). As a result, research on panoramic semantic segmentation [42, 43, 46, 48, 49] has been actively explored to achieve dense scene understanding for intelligent systems." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.561, + 0.896, + 0.865 + ], + "angle": 0, + "content": "Generally, the spherical data captured by the \\(360^{\\circ}\\) cameras is always projected into 2D planar representations, e.g., Equirectangular Projection (ERP), to be aligned with the existing imaging pipeline [1] while preserving the omnidirectional information1. However, ERP suffers from the inevitable distortion and object deformation due to the nonuniformly distributed pixels [59]. Meanwhile, learning effective panoramic segmentation models is often impeded by the lack of large precisely labeled datasets due to the difficulty of annotation. For these reasons, some unsupervised domain adaptation (UDA) methods [49, 50, 59] have been proposed to transfer the knowledge from the pinhole image domain to the panoramic image domain. In some crucial application scenarios, e.g., autonomous driving, source datasets are not always accessible due to privacy and commercial issues, such as data portability and transmission costs. One typical example is the recent large model, SAM [19], which brings significant progress in instance segmentation for pinhole images; however, the source datasets are too large (10TB) to be reused in end-tasks, such as [20]." + }, + { + "type": "page_footnote", + "bbox": [ + 0.5, + 0.875, + 0.895, + 0.901 + ], + "angle": 0, + "content": "1In this paper, omnidirectional and panoramic images are interchangeably used, and ERP images often indicate panoramic images." + }, + { + "type": "page_footnote", + "bbox": [ + 0.096, + 0.887, + 0.224, + 0.9 + ], + "angle": 0, + "content": "*Corresponding author." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "27885" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.304 + ], + "angle": 0, + "content": "Motivation: In this paper, we probe an interesting yet challenging problem: source-free UDA (SFUDA) for panoramic segmentation, in which only the source model (pretrained with pinhole images) and unlabeled panoramic images are available. As shown in Fig. 1 (a), different from existing SFUDA methods, e.g., [25, 41, 44] for the pinhole-to-pinhole image adaptation, transferring knowledge from the pinhole-to-panoramic image domain is hampered by: 1) semantic mismatch caused by the different FoV between the pinhole and \\(360^{\\circ}\\) cameras, i.e., \\(70^{\\circ}\\) vs. \\(360^{\\circ}\\); 2) inevitable distortion of the ERP; 3) style discrepancies caused by the distinct camera sensors and captured scenes. In Tab. 2, we show that naively adapting existing SFUDA methods to our problem leads to a limited performance boost." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.308, + 0.473, + 0.7 + ], + "angle": 0, + "content": "Contributions: To this end, we propose a novel SFUDA method that effectively extracts knowledge from the source model with only panoramic images and transfers the knowledge to the target panoramic domain. Our key idea is to leverage the multi-projection versatility of \\(360^{\\circ}\\) data for efficient domain knowledge transfer. Our method enjoys two key technical contributions. Specifically, we use Tangent Projection (TP) and divide the ERP images into patches with a fixed FoV, dubbed Fixed FoV Projection (FFP), to extract knowledge from the source model with less distortion and similar FoV to the pinhole images. Both projections make it possible to effectively extract knowledge from the source model. However, directly transferring the extracted knowledge to the target model is hardly approachable due to the distinct projection gaps. Thus, we propose a panoramic prototype adaptation module (PPAM) to obtain class-wise semantic prototypes from the features and predictions of the source model with TP and FFP images (Sec. 3.2). Then, these prototypes are integrated together to obtain the global panoramic prototypes for knowledge adaptation, which is updated across the adaptation procedure. Moreover, our proposed PPAM also fine-tunes the source model to promote better knowledge extraction using prototypes extracted from FFP images. Aligning the prototypes from each FFP image enables the source model to become more aware of distortion and semantics across the FoV." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.705, + 0.473, + 0.903 + ], + "angle": 0, + "content": "We initially apply both prediction-level and prototype-level loss constraints to facilitate knowledge transfer to the unlabeled target panoramic domain. Concretely, the FFP predictions of the source model are rebuilt together to provide a pseudo-supervision signal for the target model. The prototype-level loss constraint is performed between the panoramic prototypes from PPAM and the prototypes from the target model's features and predictions on the ERP images. Moreover, knowledge from the source model is not limited to predictions and prototypes, high-level features also contain crucial image characteristics that can enhance the performance of the target model. Consequently, we propose a Cross-Dual Attention Module (CDAM) that aligns" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.895, + 0.198 + ], + "angle": 0, + "content": "spatial and channel characteristics between domains to fully utilize the knowledge from the source model and address the style discrepancy problem (Sec. 3.3). Specifically, CDAM reconstructs the source model features from FFP images to provide a panoramic perception of the surrounding environment and aligns them with the ERP features from the target model for effective knowledge transfer." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.199, + 0.897, + 0.38 + ], + "angle": 0, + "content": "We conduct extensive experiments on both synthetic and real-world benchmarks, including outdoor and indoor scenarios. As no directly comparable works exist, we adapt the state-of-the-art (SoTA) SFUDA methods [14, 18, 21, 25, 41, 51] – designed for pinhole-to-pinhole image adaptation – to our problem in addressing the panoramic semantic segmentation. The results show that our framework significantly outperforms these methods by large margins of \\(+6.37\\%\\), \\(+11.47\\%\\), and \\(+10.99\\%\\) on three benchmarks. We also evaluate our method against UDA methods [49, 50, 58, 59], using the source pinhole image, the results demonstrate its comparable performance." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.395, + 0.642, + 0.41 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.42, + 0.805, + 0.436 + ], + "angle": 0, + "content": "2.1. Source-free UDA for Segmentation" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.443, + 0.897, + 0.777 + ], + "angle": 0, + "content": "UDA aims to mitigate the impact of domain shift caused by data distribution discrepancies in downstream computer vision tasks, such as semantic segmentation [2, 6-9, 13, 17, 30, 32, 33, 36, 37, 40, 52, 55-57, 60, 61]. However, the source domain data may not always be accessible due to the privacy protection and data storage concerns. Intuitively, source-free UDA (SFUDA) [18, 21, 45] methods are proposed to adapt source models to a target domain without access to the source data. Existing SFUDA methods for semantic segmentation primarily focus on source data estimation [41, 44] or self-training [4, 21, 25, 54] for pinhole images. In this paper, we make the first attempt at achieving SFUDA from the pinhole image domain to the panoramic domain. This task is nontrivial to be tackled due to the semantic mismatches, style discrepancies, and inevitable distortion of panoramic images. Unlike these methods that focus on the source domain data estimation [25, 44], we propose a novel SFUDA method that effectively extracts knowledge from the source model with only panoramic images and transfers the knowledge to the target panoramic image domain. Experiments also show that naively applying these methods leads to less optimal performance (See Tab. 2)." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.787, + 0.875, + 0.803 + ], + "angle": 0, + "content": "2.2. UDA for Panoramic Semantic Segmentation" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.81, + 0.897, + 0.903 + ], + "angle": 0, + "content": "It can be classified into three types, including adversarial training [10, 16, 31, 34, 59], pseudo labeling [24, 38, 47, 53] and prototypical adaptation methods [49, 50]. Specifically, the first line of research applies alignment approaches to capture the domain invariant characteristics of images [16, 22, 29], feature [5, 15, 16, 59] and predictions [26, 28]." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.957 + ], + "angle": 0, + "content": "27886" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.123, + 0.094, + 0.849, + 0.367 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.209, + 0.381, + 0.761, + 0.397 + ], + "angle": 0, + "content": "Figure 2. Overall framework of our proposed SFUDA for panoramic semantic segmentation." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.422, + 0.474, + 0.62 + ], + "angle": 0, + "content": "The second type of methods generates pseudo labels for the target domain training. The last line of research, e.g., Mutual Prototype Adaption (MPA) [49], mutually aligns the high-level features with the prototypes between domain. However, these methods treat panoramic images as pinhole images when extracting prototypes, ignoring the intricate semantic, object correspondence, and distortion information brought by the panoramic FoV. We are the first to address the SFUDA problem for panoramic segmentation. Considering the distinct projection discrepancies between source and target domains, we propose a PPAM to integrate the global panoramic prototypes from the extracted knowledge for adaptation." + }, + { + "type": "title", + "bbox": [ + 0.076, + 0.637, + 0.214, + 0.656 + ], + "angle": 0, + "content": "3. Methodology" + }, + { + "type": "title", + "bbox": [ + 0.076, + 0.663, + 0.188, + 0.679 + ], + "angle": 0, + "content": "3.1. Overview" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.688, + 0.47, + 0.764 + ], + "angle": 0, + "content": "The overall framework for panoramic segmentation is shown in Fig. 2. With only the source model \\( F_{S} \\) available and given the unlabeled panoramic image data \\( D_{T} \\), we aim to train a target model \\( F_{T} \\) that adapts knowledge from \\( F_{S} \\) to the common \\( K \\) categories across both domains." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.765, + 0.474, + 0.903 + ], + "angle": 0, + "content": "Unlike the pinhole image-to-image adaptation [25, 41, 44], pinhole-to-panoramic image domain adaptation is hampered by three key factors, specifically: semantic mismatch due to FoV variations \\((70^{\\circ}\\) vs. \\(360^{\\circ})\\), inevitable distortion in ERP, and ubiquitous style discrepancies in unsupervised domain adaptation (UDA) (refer to Fig.1 (a)). Therefore, naively applying existing SFUDA methods exhibits suboptimal segmentation performance (See Tab. 2), while UDA methods with source data, e.g., [25] for panoramic segmen" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.422, + 0.895, + 0.498 + ], + "angle": 0, + "content": "tation do not account for the semantic mismatch between the pinhole and panoramic images. Intuitively, the key challenges are : 1) how to extract knowledge from the source model with only panoramic images and 2) how to transfer knowledge to the target panoramic image domain." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.499, + 0.892, + 0.529 + ], + "angle": 0, + "content": "Our key idea is to leverage the multi-projection versatility of \\(360^{\\circ}\\) data for efficient domain knowledge transfer." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.53, + 0.897, + 0.773 + ], + "angle": 0, + "content": "Concretely, to address the first challenge (Sec. 3.2), we use the Tangent Projection (TP) which is characterized by a reduced distortion issue compared to the ERP images [12] to extract knowledge from the source model. Concurrently, ERP images are segmented into discrete patches, each possessing a constant FoV to mimic the pinhole images, dubbed Fixed FoV Projection (FFP). Both projections make it possible to effectively extract knowledge from the source model. The distinct projection formats make it impossible to directly transfer knowledge between domains, thus we propose a Panoramic Prototype Adaptation Module (PPAM) to obtain panoramic prototypes for adaptation. To address the second challenge (Sec. 3.3), we first impose prediction and prototype level loss constraints, and propose a Cross-Dual Attention Module (CDAM) at the feature level to transfer knowledge and further address the style discrepancies." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.785, + 0.71, + 0.801 + ], + "angle": 0, + "content": "3.2. Knowledge Extraction" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.809, + 0.895, + 0.903 + ], + "angle": 0, + "content": "As depicted in Fig. 2, given the target domain (i.e., panoramic domain) ERP images \\( D_{T} = \\{x_{T}|x_{T}\\in \\mathbf{R}^{H\\times W\\times 3}\\} \\), we first project them into TP images \\( D_{T}^{t} = \\{x_{T}^{t}|x_{T}^{t}\\in \\mathbf{R}^{h\\times w\\times 3}\\} \\) and FFP images \\( D_{T}^{f} = \\{x_{T}^{f}|x_{T}^{f}\\in \\mathbf{R}^{H\\times W / 4\\times 3}\\} \\) for effectively extracting knowledge from the source model. Note that one ERP image corresponds to 18" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "27887" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.094, + 0.104, + 0.445, + 0.272 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.296, + 0.47, + 0.325 + ], + "angle": 0, + "content": "Figure 3. Illustration of the prototype extraction (PE) in the panoramic prototype adaptation module (PPAM)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.352, + 0.47, + 0.428 + ], + "angle": 0, + "content": "TP images as [23, 59] and 4 FFP images with a fixed FoV of \\(90^{\\circ}\\) (See Sec. 5). To obtain the features and predictions from the source model for knowledge adaptation, the two types of projected images are first fed into the source model with batch sampling:" + }, + { + "type": "equation", + "bbox": [ + 0.13, + 0.429, + 0.47, + 0.448 + ], + "angle": 0, + "content": "\\[\nP ^ {p}, f ^ {p} = F _ {S} \\left(x _ {T} ^ {t}\\right), \\quad P ^ {f}, f ^ {f} = F _ {S} \\left(x _ {T} ^ {f}\\right), \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.45, + 0.472, + 0.587 + ], + "angle": 0, + "content": "where \\( f^p, f^f, P^p \\), and \\( P^f \\) are the source model features and predictions of the input TP and FFP images, respectively. For the target panoramic images, \\( x_T \\) is fed into \\( F_T \\) to obtain the target model features \\( f \\) and predictions \\( P \\) of the input batch of ERP images as \\( P, f = F_T(x_T) \\). However, the distinct projection formats of the input data in the source and target models make it difficult to align their features directly, thus we propose a Panoramic Prototype Adaptation Module (PPAM) to obtain panoramic prototypes for adaptation." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.587, + 0.472, + 0.753 + ], + "angle": 0, + "content": "Panoramic Prototype Adaptation Module (PPAM) Compared to prior UDA methods using prototypical adaptation, e.g., MPA [49, 50], our PPAM possesses three distinct characteristics: (a) class-wise prototypes are obtained from TP and FFP images to alleviate distortion and semantic mismatch problems; (b) global prototypes are iteratively updated with prototypes from two projections during the whole training procedure; (c) hard pseudo-labels are softened in the high-level feature space to obtain prototypes with different projection of panoramic images, indicating that the knowledge from the source model is fully utilized." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.754, + 0.471, + 0.784 + ], + "angle": 0, + "content": "Specifically, we project the source model predictions \\( P^p \\), \\( P^f \\) into pseudo labels:" + }, + { + "type": "equation", + "bbox": [ + 0.154, + 0.797, + 0.354, + 0.817 + ], + "angle": 0, + "content": "\\[\n\\hat {y} _ {(h, w, k)} ^ {p} = 1 _ {k \\dot {=} a r g m a x (P _ {h, w,:} ^ {p})},\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.154, + 0.818, + 0.47, + 0.844 + ], + "angle": 0, + "content": "\\[\n\\hat {y} _ {(H, W / 4, k)} ^ {f} = 1 _ {k \\div a r g m a x \\left(P _ {H, W / 4,:} ^ {f}\\right)}. \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.856, + 0.47, + 0.902 + ], + "angle": 0, + "content": "Here, \\( k \\) denotes the semantic category. Subsequently, we obtain the class-specific masked features by integrating the up-sampled features with the corresponding pseudo" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.09, + 0.895, + 0.173 + ], + "angle": 0, + "content": "labels \\(\\hat{y}_{(h,w,k)}^p\\) and \\(\\hat{y}_{(H,W/4,k)}^f\\). Notably, the prototypes \\(\\sum_{a=1}^{18} (\\tau_p^k)_a\\) and \\(\\sum_{b=1}^{4} (\\tau_f^k)_b\\) for TP and FFP images are obtained by masked average pooling (MAP) operation, as shown in Fig. 3. Within each projection, PPAM first integrates the prototypes:" + }, + { + "type": "equation", + "bbox": [ + 0.525, + 0.175, + 0.893, + 0.215 + ], + "angle": 0, + "content": "\\[\n\\tau_ {p} ^ {k} = a v g \\left(\\sum_ {a = 1} ^ {1 8} \\left(\\tau_ {p} ^ {k}\\right) _ {a}\\right), \\quad \\tau_ {f} ^ {k} = a v g \\left(\\sum_ {b = 1} ^ {4} \\left(\\tau_ {f} ^ {k}\\right) _ {b}\\right). \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.218, + 0.895, + 0.322 + ], + "angle": 0, + "content": "As shown in Fig. 2, \\(\\tau_{p}^{k}\\) and \\(\\tau_{f}^{k}\\) are integrated together as \\(\\tau_{pf}^{k}\\) to preserve the less distortion characteristics of \\(\\tau_{p}^{k}\\) and the similar scale semantics of \\(\\tau_{f}^{k}\\). The \\(\\tau_{pf}^{k}\\) is then used to update the panoramic global prototype \\(\\tau_{g}^{k}\\), which is iteratively updated with \\(\\tau_{pf}^{k}\\). To obtain more accurate and reliable prototypes, we update \\(\\tau_{g}^{k}\\) and \\(\\tau_{pf}^{k}\\) as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.584, + 0.324, + 0.893, + 0.353 + ], + "angle": 0, + "content": "\\[\n\\tau_ {g} ^ {i} = \\frac {1}{i} \\left(\\tau_ {p f} ^ {k}\\right) ^ {i} + \\left(1 - \\frac {1}{i}\\right) \\left(\\tau_ {g} ^ {k}\\right) ^ {i - 1}, \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.355, + 0.892, + 0.451 + ], + "angle": 0, + "content": "where \\((\\tau_{g}^{k})^{i}\\) and \\((\\tau_{pf}^{k})^{i}\\) are the prototypes for category \\(k\\) in the \\(i\\)-th training epoch, \\((\\tau_{g}^{k})^{i-1}\\) is the panoramic global prototype saved in the last training epoch, \\(i\\) is the current epoch number. The panoramic global prototype \\(\\tau_{g}^{k}\\) is then used to give supervision for the target prototype \\(\\tau_{t}^{k}\\) obtained from \\(P\\) and \\(f\\) with the same operations." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.453, + 0.893, + 0.709 + ], + "angle": 0, + "content": "Besides extracting prototype knowledge from the source model, PPAM also fine-tunes the source model to improve the effectiveness of knowledge extraction. Specifically, since each ERP image can be projected to 4 FFP images, the source model's extracted features \\( f_{f} \\) have 4 pieces of FFP features. As the content of all the features is within the same ERP image, we propose to align the class-wise prototypes from each piece of the features in PPAM to enhance the model's performance. Concretely, the prototypes \\( \\sum_{\\alpha = 1}^{4}\\tau_{\\alpha} \\) of the four FFP features are obtained through the same operations with \\( \\tau_g^t \\). Each FFP image captures a non-overlapping \\( 90^\\circ \\) FoV, resulting in distinct distortions, and similar content in each FFP image. Aligning the prototypes from each FFP image enhances distortion-awareness ability in the source model and helps to explore complementary semantic content in each FFP image. The MSE loss is imposed between each two of the prototypes as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.56, + 0.712, + 0.893, + 0.753 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {s f t} = \\sum_ {\\alpha \\neq \\beta} ^ {4} \\left\\{\\frac {1}{K} \\sum_ {k \\in K} \\left(\\left(\\tau_ {f} ^ {k}\\right) _ {\\alpha} - \\left(\\tau_ {f} ^ {k}\\right) _ {\\beta}\\right) ^ {2} \\right\\}. \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.756, + 0.871, + 0.772 + ], + "angle": 0, + "content": "Note that \\(\\mathcal{L}_{sft}\\) is used to fine-tune the source model \\(F_{S}\\)." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.785, + 0.714, + 0.802 + ], + "angle": 0, + "content": "3.3. Knowledge Adaptation" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.811, + 0.894, + 0.901 + ], + "angle": 0, + "content": "To adapt knowledge to the target domain, we impose the loss constraints on both predictions and prototypes and propose a cross-dual attention module (CDAM) at the feature level to better align the spatial and channel characteristics across the domains and projections. Specifically, the predictions of the FFP patch images are stitched to reconstruct an ERP image." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "27888" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.082, + 0.089, + 0.885, + 0.288 + ], + "angle": 0, + "content": "
MethodSFmIoURoadS.W.Build.WallFencePoleTr.L.Tr.S.Veget.Terr.SkyPers.CarΔ
PVT [39] SSL38.7455.3936.8780.8419.7215.188.045.392.1772.9132.0190.8126.7657.40-
PVT [39] MPA40.9070.7842.4782.1322.7910.7413.541.270.3071.1533.0389.6929.0764.73-
Source w/ seg-b135.8163.3624.0980.1315.6813.3916.267.420.0962.4520.2086.0523.0253.37-
SFDA w/ seg-b1 [25]38.2168.7830.7180.375.2618.9520.905.252.3670.1923.3090.2022.5557.90+2.40
ProDA w/ seg-b1 [51]37.3768.9330.8880.074.1718.6019.721.771.5670.0522.7390.6019.7157.04+2.73
GTA w/ seg-b1 [21]36.0064.6120.0479.048.0615.3619.866.022.1365.7717.7584.5626.7158.13+0.19
HCL w/ seg-b1 [18]38.3868.8230.4180.375.8820.1820.104.232.1170.5024.7489.8922.6559.04+2.57
DATC w/ seg-b1 [41]38.5469.4826.9680.6811.6415.2420.109.330.5566.1124.3185.1630.9060.58+2.73
Simt w/ seg-b1 [14]37.9468.4729.5179.626.7819.2019.482.311.3368.8526.5589.3022.3559.49+2.13
Ours w/ seg-b141.7870.1733.2481.6613.0623.4023.377.633.5971.0425.4689.3336.6064.60+5.97
Ours w/ seg-b242.1869.9932.2881.3410.6224.3524.299.193.6371.2830.0488.7537.4965.05+6.37
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.298, + 0.893, + 0.327 + ], + "angle": 0, + "content": "Table 1. Experimental results on the S-to-D scenario, the overlapped 13 classes of two datasets are used to test the UDA performance. The bold and underline denote the best and the second-best performance in source-free UDA methods, respectively." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.352, + 0.471, + 0.414 + ], + "angle": 0, + "content": "The ERP image is then passed to the source model \\( F_{S} \\) to predict a pseudo label, which serves as the supervision for the ERP predictions of the target model \\( F_{T} \\). For simplicity, we use the Cross-Entropy (CE) loss, which is formulated as:" + }, + { + "type": "equation", + "bbox": [ + 0.103, + 0.427, + 0.471, + 0.449 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {s u p} = C E (P, 1 _ {\\dot {k} = a r g m a x (\\{R e b u i l d (P _ {H, W / 4,:} ^ {f}) \\})}). \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.46, + 0.47, + 0.508 + ], + "angle": 0, + "content": "And the prototype-level knowledge transfer loss is achieved by Mean Squared Error (MSE) loss between the panoramic global prototype \\(\\tau_g^k\\) and the target prototype \\(\\tau_t^k\\):" + }, + { + "type": "equation", + "bbox": [ + 0.179, + 0.52, + 0.47, + 0.556 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {p p a} = \\frac {1}{K} \\sum_ {k \\in K} \\left(\\tau_ {g} ^ {k} - \\tau_ {t} ^ {k}\\right) ^ {2}. \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.568, + 0.471, + 0.658 + ], + "angle": 0, + "content": "With loss \\(\\mathcal{L}_{ppa}\\), the prototypes are pushed together to transfer the source-extracted knowledge to the target domain. In summary, with the proposed PPAM, we effectively address the distortion and semantic mismatch problems at the prediction and prototype level, we now tackle the style discrepancy problem at the feature level." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.66, + 0.472, + 0.901 + ], + "angle": 0, + "content": "Cross Dual Attention Module (CDAM). Inspired by the dual attention, focusing on spatial and channel characteristics [25], our CDAM imitates the spatial and channel-wise distributions of features to alleviate the style discrepancies. Different from [25] suggesting to minimize the distribution distance of the dual attention maps between the fake source (FFP images) and target data (ERP images), our CDAM focuses on aligning the distribution between FFP and ERP of the panoramic images rather than introducing additional parameters and computation cost in estimating source data. As shown in Fig. 2, we reconstruct the FFP features \\(F^f\\) to ensure that the rebuilt feature \\(F'\\) has the same spatial size as \\(F\\). Before the cross dual attention operation, we apply a Batch Normalization Statics (BNS) guided constraint on \\(F\\) and \\(F'\\). Since the BNS of the source model should satisfy the feature distribution of the source data, we align \\(F\\) and" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.352, + 0.853, + 0.368 + ], + "angle": 0, + "content": "\\(F^{\\prime}\\) with BNS to alleviate the domain gaps as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.543, + 0.378, + 0.892, + 0.422 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {L} _ {b n s} = \\left\\| \\mu (F) - \\bar {\\mu} \\right\\| _ {2} ^ {2} + \\left\\| \\sigma^ {2} (F) - \\bar {\\sigma} ^ {2} \\right\\| _ {2} ^ {2} \\\\ + \\left\\| \\mu \\left(F ^ {\\prime}\\right) - \\bar {\\mu} \\right\\| _ {2} ^ {2} + \\left\\| \\sigma^ {2} \\left(F ^ {\\prime}\\right) - \\bar {\\sigma} ^ {2} \\right\\| _ {2} ^ {2}, \\tag {8} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.432, + 0.892, + 0.461 + ], + "angle": 0, + "content": "where \\(\\bar{\\mu}\\) and \\(\\bar{\\sigma}^2\\) are the mean and variance parameters of the last BN layer in the source model \\(S\\)." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.462, + 0.893, + 0.553 + ], + "angle": 0, + "content": "As shown in Fig. 2 (a), after aligned with BNS, the ERP feature \\( f \\) and the rebuilt feature \\( f' \\) are first reshaped to be \\( f \\in \\mathbb{R}^{N \\times C} \\) and \\( f' \\in \\mathbb{R}^{N \\times C} \\), where \\( N \\) is the number of pixels and \\( C \\) is the channel number. Then we calculate the spatial-wise attention maps \\( M_{sp} \\in \\mathbb{R}^{N \\times C} \\) and \\( M_{sp}' \\in \\mathbb{R}^{N \\times C} \\) for \\( f \\) and \\( f' \\) by:" + }, + { + "type": "equation", + "bbox": [ + 0.565, + 0.563, + 0.795, + 0.606 + ], + "angle": 0, + "content": "\\[\n\\{M _ {s p} \\} _ {j i} = \\frac {\\exp (f _ {[ i : ]} ^ {\\prime} \\cdot f _ {[ : j ]} ^ {T})}{\\sum_ {i} ^ {N} \\exp (f _ {[ i : ]} ^ {\\prime} \\cdot f _ {[ : j ]} ^ {T})},\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.565, + 0.608, + 0.892, + 0.651 + ], + "angle": 0, + "content": "\\[\n\\left\\{M _ {s p} ^ {\\prime} \\right\\} _ {j i} = \\frac {\\exp \\left(f _ {[ i : ]} \\cdot f _ {[ : j ]} ^ {T}\\right)}{\\sum_ {i} ^ {N} \\exp \\left(f _ {[ i : ]} \\cdot f _ {[ : j ]} ^ {T}\\right)}, \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.66, + 0.894, + 0.722 + ], + "angle": 0, + "content": "where \\(f^T\\) is the transpose of \\(f\\) and \\(\\{M\\}_{ij}\\) measures the impact of the \\(i\\)-th position on the \\(j\\)-th position. Similarly, the channel-wise attention maps \\(M_{ch} \\in \\mathbb{R}^{C \\times C}\\) and \\(M_{ch}' \\in \\mathbb{R}^{C \\times C}\\) can be obtained through:" + }, + { + "type": "equation", + "bbox": [ + 0.565, + 0.731, + 0.795, + 0.775 + ], + "angle": 0, + "content": "\\[\n\\left\\{M _ {c h} \\right\\} _ {j i} = \\frac {\\exp \\left(f _ {[ i : ]} ^ {T} \\cdot f _ {[ : j ]}\\right)}{\\sum_ {i} ^ {C} \\exp \\left(f _ {[ i : ]} ^ {\\prime} \\cdot f _ {[ : j ]} ^ {T}\\right)},\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.566, + 0.777, + 0.892, + 0.82 + ], + "angle": 0, + "content": "\\[\n\\left\\{M _ {c h} ^ {\\prime} \\right\\} _ {j i} = \\frac {\\exp \\left(f _ {[ i : ]} ^ {T} \\cdot f _ {[ : j ]} ^ {\\prime}\\right)}{\\sum_ {i} ^ {C} \\exp \\left(f _ {[ i : ]} ^ {T} \\cdot f _ {[ : j ]} ^ {\\prime}\\right)}. \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.829, + 0.892, + 0.874 + ], + "angle": 0, + "content": "After obtaining the spatial and channel attention maps, the CDAM loss can be calculated with the Kullback-Liibler divergence (KL divergence) as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.538, + 0.885, + 0.892, + 0.904 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {c d a} = K L \\left(M _ {s p}, M _ {s p} ^ {\\prime}\\right) + K L \\left(M _ {c h}, M _ {c h} ^ {\\prime}\\right) \\tag {11}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "27889" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.084, + 0.089, + 0.891, + 0.293 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.14, + 0.309, + 0.829, + 0.325 + ], + "angle": 0, + "content": "Figure 4. Example visualization results. (a) source, (b) SFDA [25], (c) DATC [41], (d) Ours, (e) Ground Truth (GT)." + }, + { + "type": "table", + "bbox": [ + 0.086, + 0.337, + 0.891, + 0.521 + ], + "angle": 0, + "content": "
MethodSFmIoUPersonRiderCarTruckBusTrainMotorBikeΔ
Trans4PASS-T [49]53.1848.5416.9179.5865.3355.7684.6359.0537.61-
Trans4PASS-S [49]55.2248.8523.3681.0267.3169.5386.1360.8539.09-
DAFormer [17]54.6749.6925.1577.7063.0665.6186.6865.1248.13-
DPPASS [59]55.3052.0929.4079.1958.7347.2486.4866.6038.11-
DATR [58]56.8154.6229.5080.0367.3563.7587.6767.5737.10-
Source w/ seg-b138.6540.9310.8967.6736.8615.5626.4342.6827.16-
SFDA w/ seg-b1 [25]42.7041.658.4669.9747.4833.2472.0147.6132.77+4.05
DTAC w/ seg-b1 [41]43.0643.518.3570.1035.7940.7370.5249.4932.94+4.41
Ours w/ seg-b148.7845.3615.8375.7049.1655.6882.0754.8233.76+10.13
Ours w/ seg-b250.1249.9227.2276.2247.8164.1379.4756.8335.76+11.47
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.531, + 0.893, + 0.561 + ], + "angle": 0, + "content": "Table 2. Experimental results of 8 selected categories in panoramic semantic segmentation on C-to-D. SF: Source-free UDA. The bold and underline denote the best and the second-best performance in source-free UDA methods, respectively." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.586, + 0.218, + 0.602 + ], + "angle": 0, + "content": "3.4. Optimization" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.612, + 0.471, + 0.643 + ], + "angle": 0, + "content": "The training objective for learning the target model containing three losses is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.138, + 0.662, + 0.47, + 0.68 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = \\lambda \\cdot \\mathcal {L} _ {p p a} + \\gamma \\cdot \\mathcal {L} _ {c d a} + \\mathcal {L} _ {b n s} + \\mathcal {L} _ {s u p} \\tag {12}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.697, + 0.47, + 0.773 + ], + "angle": 0, + "content": "where \\(\\mathcal{L}_{ppa}\\) is the MSE loss from PPAM, \\(\\mathcal{L}_{cda}\\) refers to the KL loss from CDAM, \\(\\mathcal{L}_{sup}\\) denotes the CE loss for the prediction pseudo label supervision loss, \\(\\mathcal{L}_{bns}\\) refers to the BNS guided feature loss, and \\(\\lambda\\) and \\(\\gamma\\) are the trade-off weights of the proposed loss terms." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.797, + 0.32, + 0.814 + ], + "angle": 0, + "content": "4. Experiments and Analysis" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.825, + 0.471, + 0.9 + ], + "angle": 0, + "content": "As the first SFUDA method for panoramic image segmentation, there is no prior method for direct comparison. We thus empirically validate our method by comparing it with the existing UDA and panoramic segmentation methods on three widely used benchmarks." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.586, + 0.824, + 0.602 + ], + "angle": 0, + "content": "4.1. Datasets and Implementation Details." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.609, + 0.895, + 0.775 + ], + "angle": 0, + "content": "Cityscapes [11] is a real-world dataset collected for autonomous driving that contains street scenes. DensePASS [27] is a panoramic dataset designed for capturing diverse street scenes. SynPASS [50] is a synthetic dataset consisting of 9080 synthetic panoramic images. Stanford2D3D [3] is an indoor panoramic dataset which has 1413 panoramic images. Overall, the experiments are conducted on both real-world (Cityscapes-to-DensePASS, C-to-D, and Stanford2D3D-pinhole-to-Stanford2D3D-panoramic, SPinto-SPan) and synthetic-to-real (SynPASS-to-DensePASS, S-to-D) scenarios." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.787, + 0.707, + 0.803 + ], + "angle": 0, + "content": "4.2. Experimental Results." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.81, + 0.895, + 0.9 + ], + "angle": 0, + "content": "We first evaluate our proposed framework under the S-to-D scenario. The experimental results are shown in Tab. 1. Our proposed method consistently outperforms source-free UDA methods [25] and [41] and even achieves panoramic semantic segmentation performance closer to that of the UDA method Trans4PASS [50] which utilizes the source data" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.957 + ], + "angle": 0, + "content": "27890" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.086, + 0.089, + 0.89, + 0.226 + ], + "angle": 0, + "content": "
MethodSFmIoUCeilingChairDoorFloorSofaTableWallWindowΔ
PVT-S w/ MPA [49]X57.9585.8551.7618.3990.7835.9365.4375.0040.43-
Trans4PASS w/ MPA [49]X64.5285.0858.7234.9791.1246.2571.7277.5850.75-
Trans4PASS+ [50]X63.7390.6362.3024.7992.6235.7373.1678.7451.78-
Trans4PASS+ w/ MPA [50]X67.1690.0464.0442.8991.7438.3471.4581.2457.54-
SFDA [25]54.7679.4433.2052.0967.3622.5453.6469.3860.46-
Ours w/ b157.6373.8129.9863.6573.4931.7649.2572.8966.22+2.87
Ours w/ b265.7582.8838.0065.8186.7136.3266.1080.2969.88+10.99
" + }, + { + "type": "table_caption", + "bbox": [ + 0.086, + 0.236, + 0.882, + 0.251 + ], + "angle": 0, + "content": "Table 3. Experimental results on indoor Stanford2D3D [3]. The bold denotes the best performance among UDA and SFUDA methods." + }, + { + "type": "table", + "bbox": [ + 0.082, + 0.274, + 0.472, + 0.413 + ], + "angle": 0, + "content": "
Loss Function CombinationsC-to-DS-to-D
\\( \\mathcal{L}_{sup} \\)\\( \\mathcal{L}_{ppa} \\)\\( \\mathcal{L}_{sft} \\)\\( \\mathcal{L}_{cda} \\)\\( \\mathcal{L}_{bns} \\)mIoUΔmIoUΔ
38.65-35.81-
45.42+6.7738.37+2.56
46.23+7.5838.49+2.68
44.24+5.5938.38+2.57
44.79+6.1438.52+2.71
48.78+10.1341.78+5.97
" + }, + { + "type": "table_caption", + "bbox": [ + 0.1, + 0.423, + 0.446, + 0.437 + ], + "angle": 0, + "content": "Table 4. Ablation study of different module combinations." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.459, + 0.471, + 0.746 + ], + "angle": 0, + "content": "in the adaptation procedure. Our proposed method brings significant performance gain of \\(+3.57\\%\\) and \\(+3.54\\%\\) with SegFormer-B1 backbone then SFDA [25] and DATC [41], respectively. We also provide the TSNE visualization in Fig. 5 (b) and qualitative results in Fig. 4. Apparently, our method gains a significant improvement in distinguishing the pixels in panoramic images in both prediction and high-level feature space. As shown in Tab. 2, we then evaluate our proposed framework under the C-to-D scenario. Our proposed method significantly outperforms source-free methods [25, 41] and some panoramic semantic segmentation methods [43, 46, 48]. Specifically, our method achieves a significant performance gain over SFDA [25] and DTAC [41] by \\(+6.08\\%\\) and \\(+5.72\\%\\), respectively. This demonstrates that our proposed method endowed by PPAM and CDAM is more suitable for panoramic semantic segmentation tasks. Furthermore, as shown in the qualitative results in Fig. 4, our method achieves better segmentation in driving-related categories, such as rider and car." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.75, + 0.472, + 0.902 + ], + "angle": 0, + "content": "We also provide TSNE visualizations [35] in Fig. 5 (a), showing that our proposed method brings significant improvements in distinguishing pixels from different categories in high-level feature space. Additionally, we evaluated our proposed method on the Stanford2D3D [3] dataset and compared it with the SFDA [25] and MPA [50] methods. As shown in the following table, our proposed method significantly outperforms the SFDA by \\(+7.09\\%\\) mIoU and is on par with the MPA method using source data \\((61.85\\%\\) vs. \\(67.16\\%)\\). Notably, for some categories, such as door \\((57.90\\%\\)" + }, + { + "type": "table", + "bbox": [ + 0.505, + 0.275, + 0.892, + 0.319 + ], + "angle": 0, + "content": "
Combinationsτg+τpτg+τfτg+τp+τf
mIoU44.1444.2845.42
" + }, + { + "type": "table_caption", + "bbox": [ + 0.515, + 0.329, + 0.875, + 0.343 + ], + "angle": 0, + "content": "Table 5. Ablation study of different prototype combinations." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.364, + 0.892, + 0.395 + ], + "angle": 0, + "content": "vs. \\(42.89\\%\\) ) and window \\((68.06\\%\\) vs. \\(57.54\\%)\\), our method event outperforms the MPA [50]." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.408, + 0.651, + 0.424 + ], + "angle": 0, + "content": "5. Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.433, + 0.895, + 0.659 + ], + "angle": 0, + "content": "Different Loss Function Combinations. To assess the effectiveness of the proposed modules, we conduct ablation experiments on both real-world and synthetic-to-real scenarios with various loss combinations. All of the proposed modules and loss functions have a positive impact on improving segmentation performance. Notably, our PPAM yields a significant performance gain of \\(+6.77\\%\\). This indicates that PPAM alleviates the intricate semantics and distortion problem with the tangent, and our proposed FFP projection is valid. This is further supported by the qualitative results presented in Fig. 4. Additionally, our proposed CDAM achieves a performance gain of \\(+5.59\\%\\) compared to the source baseline, which means that CDAM imitates the spatial and channel-wise distributions of ERP and FFP features and further addresses the style discrepancy problems." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.66, + 0.895, + 0.825 + ], + "angle": 0, + "content": "Ablation of Different Prototype Combinations. To validate the effectiveness of all the prototypes in PPAM, we conduct experiments on C-to-D using SegFormer-B1 and only \\(\\mathcal{L}_{sup}\\) and \\(\\mathcal{L}_{ppa}\\). The results of the performance with different prototype combinations are presented in Tab. 5. Both prototypes from TP and FFP have a positive effect on PPAM, with \\(\\tau_{p}\\) and \\(\\tau_{f}\\) resulting in mIoU improvements of \\(+5.49\\%\\) and \\(+5.63\\%\\), respectively, compared to the source baseline. When both prototypes are combined together, there is a mIoU gain of \\(+6.77\\%\\), indicating that their combination is better for prototype-level adaptation." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.826, + 0.895, + 0.901 + ], + "angle": 0, + "content": "Dual Attention vs. Cross Dual Attention. The dual attention (DA) approach proposed in SFDA [25] aligns the spatial and channel characteristics of features between the fake source and target data. In contrast, our cross dual attention (CDA) approach aligns the distribution between different" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "27891" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.086, + 0.09, + 0.468, + 0.341 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.077, + 0.35, + 0.472, + 0.379 + ], + "angle": 0, + "content": "Figure 5. TSNE visualization of (a) Cityscapes-to-DensePASS and (b) SynPASS-to-DensePASS." + }, + { + "type": "table", + "bbox": [ + 0.082, + 0.396, + 0.481, + 0.45 + ], + "angle": 0, + "content": "
FoVw/o60°72°90°120°180°360°
mIoU38.6544.0344.1644.2844.0241.6540.31
Δ-+5.38+5.51+5.63+5.37+3.00+1.66
" + }, + { + "type": "table_caption", + "bbox": [ + 0.104, + 0.459, + 0.442, + 0.475 + ], + "angle": 0, + "content": "Table 6. Ablation study of the FoV of our proposed FFP." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.503, + 0.47, + 0.625 + ], + "angle": 0, + "content": "projections of the same spherical data, specifically ERP and FFP, resulting in more robust and stable knowledge transfer. Moreover, in our SFDA, we obtain spatial and channel characteristics across features, whereas DA operates within features. We also evaluate DA on the C-to-D scenario, and our CDA achieves \\(44.24\\%\\) mIoU, while DA only reaches \\(41.53\\%\\) mIoU. This indicates the proposed CDA is better for SFUDA in panoramic semantic segmentation." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.626, + 0.471, + 0.822 + ], + "angle": 0, + "content": "Field-of-view of FFP. Most existing approaches for panoramic semantic segmentation, such as those proposed in [49, 50, 59], primarily focus on alleviating distortion by introducing distortion-aware components and distinct projection strategies. However, as discussed in Sec. 3.2, \\(360^{\\circ}\\) images contain more intricate semantic information and object correspondence than the pinhole images, resulting in an obvious semantic mismatch between domains. Therefore, we propose the Fixed FoV Pooling (FFP) strategy to address the semantic mismatch. Experimental results show that the fixed FoV is the most influential factor in FFP, with an FoV of \\(90^{\\circ}\\) achieving the best segmentation performance, as shown in Tab. 6, with a mIoU of \\(44.28\\%\\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.824, + 0.472, + 0.884 + ], + "angle": 0, + "content": "Ablation of Hyper-parameters. We now show the influence of hyperparameters \\(\\gamma\\) and \\(\\lambda\\), which are the weights for the KL loss in CDAM and the MSE loss in PPAM, respectively. The experimental results are provided in Tab. 7." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.886, + 0.471, + 0.902 + ], + "angle": 0, + "content": "Fine-tuning the Source Model. As the pre-trained model" + }, + { + "type": "table", + "bbox": [ + 0.509, + 0.089, + 0.905, + 0.196 + ], + "angle": 0, + "content": "
γ00.010.020.050.10.2
mIoU38.6542.0543.2443.2844.2443.07
Δ-+3.40+4.59+4.63+5.59+4.42
λ0506080100120
mIoU38.6543.1343.2245.3645.4245.34
Δ-+4.48+4.57+6.71+6.77+6.69
" + }, + { + "type": "table_caption", + "bbox": [ + 0.589, + 0.206, + 0.802, + 0.22 + ], + "angle": 0, + "content": "Table 7. Ablation study of \\( \\gamma \\) and \\( \\lambda \\) ." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.239, + 0.895, + 0.512 + ], + "angle": 0, + "content": "in the source (pinhole) domain is not an ideal model for the target (panoramic) image domain, we propose to fine-tune the source model with the loss function \\(\\mathcal{L}_{sft}\\), as described in Sec. 3.2. Tab. 4 demonstrates the effectiveness of the proposed \\(\\mathcal{L}_{sft}\\). When combined with the prototypical adaptation loss \\(\\mathcal{L}_{ppa}\\), adding \\(\\mathcal{L}_{sft}\\) results in a \\(6.77\\%\\) mIoU gain compared with the source baseline of \\(38.65\\%\\). We present the performance metrics derived solely from the loss \\(\\mathcal{L}_{sft}\\) of PPAM: C-2-D registers at \\(44.94\\%\\) while S-2-D records \\(36.74\\%\\). These results underscore the efficacy of \\(\\mathcal{L}_{sft}\\) integrated within our PPAM module. Concerning transfer-ability, our \\(\\mathcal{L}_{sft}\\) exhibits compatibility with various projection methods, e.g., cube map. At its core, our fine-tuning loss seeks to align all projection images originating from the same panoramic source, irrespective of the employed projection technique. This intrinsic adaptability facilitates the application of \\(\\mathcal{L}_{sft}\\) across diverse projections. More results refer to the supplementary material." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.525, + 0.62, + 0.541 + ], + "angle": 0, + "content": "6. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.55, + 0.895, + 0.672 + ], + "angle": 0, + "content": "In this paper, we investigated a new problem of achieving SFUDA for panoramic semantic segmentation. To this end, we proposed an end-to-end SFUDA framework to address the domain shifts, including semantic mismatch, distortion, and style discrepancies, between pinhole and panoramic domains. Experiments on both real-world and synthetic benchmarks show that our proposed framework outperforms prior approaches and is on par with the methods using source data." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.671, + 0.895, + 0.837 + ], + "angle": 0, + "content": "Limitation and future work. One limitation of our proposed framework is the computational cost brought by the tangent projection during training, and there is still room for improvements in segmentation performance. However, components in our approach such as panoramic prototypes and fixed FoV projection have significant implications for the \\(360^{\\circ}\\) vision, especially for the panoramic semantic segmentation. In the future, we plan to utilize the large language models (LLMs) and Multi-modal large language models (MLLMs) to alleviate the domain gaps, such as the semantic mismatches between pinhole and panoramic images." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.837, + 0.895, + 0.898 + ], + "angle": 0, + "content": "Acknowledgement This paper is supported by the National Natural Science Foundation of China (NSF) under Grant No. NSFC22FYT45 and the Guangzhou City, University and Enterprise Joint Fund under Grant No.SL2022A03J01278." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "27892" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.17 + ], + "angle": 0, + "content": "[1] Hao Ai, Zidong Cao, Jinjing Zhu, Haotian Bai, Yucheng Chen, and Ling Wang. Deep learning for omnidirectional vision: A survey and new perspectives. arXiv preprint arXiv:2205.10468, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.173, + 0.472, + 0.228 + ], + "angle": 0, + "content": "[2] Nikita Araslanov and Stefan Roth. Self-supervised augmentation consistency for adapting semantic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15384-15394, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.231, + 0.472, + 0.272 + ], + "angle": 0, + "content": "[3] Iro Armeni, Sasha Sax, Amir R Zamir, and Silvio Savarese. Joint 2d-3d-semantic data for indoor scene understanding. arXiv preprint arXiv:1702.01105, 2017. 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.274, + 0.472, + 0.327 + ], + "angle": 0, + "content": "[4] Mathilde Bateson, Hoel Kervadec, Jose Dolz, Herve Lombaert, and Ismail Ben Ayed. Source-free domain adaptation for image segmentation. Medical Image Analysis, 82:102617, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.331, + 0.472, + 0.401 + ], + "angle": 0, + "content": "[5] Chaoqi Chen, Weiping Xie, Tingyang Xu, Wenbing Huang, Yu Rong, Xinghao Ding, Yue Huang, and Junzhou Huang. Progressive feature alignment for unsupervised domain adaptation. 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 627-636, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.403, + 0.472, + 0.457 + ], + "angle": 0, + "content": "[6] Jialei Chen, Daisuke Deguchi, Chenkai Zhang, Xu Zheng, and Hiroshi Murase. Frozen is better than learning: A new design of prototype-based classifier for semantic segmentation. Available at SSRN 4617170. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.46, + 0.472, + 0.515 + ], + "angle": 0, + "content": "[7] Jialei Chen, Chong Fu, Haoyu Xie, Xu Zheng, Rong Geng, and Chiu-Wing Sham. Uncertainty teacher with dense focal loss for semi-supervised medical image segmentation. Computers in Biology and Medicine, 149:106034, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.517, + 0.472, + 0.572 + ], + "angle": 0, + "content": "[8] Jialei Chen, Daisuke Deguchi, Chenkai Zhang, Xu Zheng, and Hiroshi Murase. Clip is also a good teacher: A new learning framework for inductive zero-shot semantic segmentation. arXiv preprint arXiv:2310.02296, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.575, + 0.472, + 0.63 + ], + "angle": 0, + "content": "[9] Minghao Chen, Hongyang Xue, and Deng Cai. Domain adaptation for semantic segmentation with maximum squares loss. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2090-2099, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.633, + 0.472, + 0.7 + ], + "angle": 0, + "content": "[10] Jaehoon Choi, Taekyung Kim, and Changick Kim. Self-ensembling with gan-based data augmentation for domain adaptation in semantic segmentation. 2019 IEEE/CVF International Conference on Computer Vision (ICCV), pages 6829–6839, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.703, + 0.472, + 0.785 + ], + "angle": 0, + "content": "[11] Marius Cordts, Mohamed Omran, Sebastian Ramos, Timo Rehfeld, Markus Enzweiler, Rodrigo Benenson, Uwe Franke, Stefan Roth, and Bernt Schiele. The cityscapes dataset for semantic urban scene understanding. In Proc. of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.788, + 0.472, + 0.844 + ], + "angle": 0, + "content": "[12] Marc Eder, Mykhailo Shvets, John Lim, and Jan-Michael Frahm. Tangent images for mitigating spherical distortion. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12426-12434, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.846, + 0.472, + 0.901 + ], + "angle": 0, + "content": "[13] Francois Fleuret et al. Uncertainty reduction for model adaptation in semantic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9613-9623, 2021. 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.472, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.895, + 0.162 + ], + "angle": 0, + "content": "[14] Xiaoqing Guo, Jie Liu, Tongliang Liu, and Yixuan Yuan. Simt: Handling open-set noise for domain adaptive semantic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7032-7041, 2022. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.164, + 0.895, + 0.205 + ], + "angle": 0, + "content": "[15] Judy Hoffman, Dequan Wang, Fisher Yu, and Trevor Darrell. Fcts in the wild: Pixel-level adversarial and constraint-based adaptation. ArXiv, abs/1612.02649, 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.207, + 0.895, + 0.261 + ], + "angle": 0, + "content": "[16] Judy Hoffman, Eric Tzeng, Taesung Park, Jun-Yan Zhu, Phillip Isola, Kate Saenko, Alexei A. Efros, and Trevor Darrell. Cycada: Cycle-consistent adversarial domain adaptation. In ICML, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.264, + 0.895, + 0.332 + ], + "angle": 0, + "content": "[17] Lukas Hoyer, Dengxin Dai, and Luc Van Gool. Daformer: Improving network architectures and training strategies for domain-adaptive semantic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9924-9935, 2022. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.334, + 0.895, + 0.403 + ], + "angle": 0, + "content": "[18] Jiaxing Huang, Dayan Guan, Aoran Xiao, and Shijian Lu. Model adaptation: Historical contrastive learning for unsupervised domain adaptation without source data. Advances in Neural Information Processing Systems, 34:3635-3649, 2021. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.405, + 0.895, + 0.461 + ], + "angle": 0, + "content": "[19] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. arXiv preprint arXiv:2304.02643, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.462, + 0.895, + 0.518 + ], + "angle": 0, + "content": "[20] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. arXiv preprint arXiv:2304.02643, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.52, + 0.895, + 0.588 + ], + "angle": 0, + "content": "[21] Jogendra Nath Kundu, Akshay Kulkarni, Amit Singh, Varun Jampani, and R Venkatesh Babu. Generalize then adapt: Source-free domain adaptive semantic segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7046-7056, 2021. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.591, + 0.895, + 0.646 + ], + "angle": 0, + "content": "[22] Yunsheng Li, Lu Yuan, and Nuno Vasconcelos. Bidirectional learning for domain adaptation of semantic segmentation. 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 6929-6938, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.648, + 0.895, + 0.716 + ], + "angle": 0, + "content": "[23] Yuyan Li, Yuliang Guo, Zhixin Yan, Xinyu Huang, Ye Duan, and Liu Ren. Omnifusion: 360 monocular depth estimation via geometry-aware fusion. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2801-2810, 2022. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.718, + 0.895, + 0.773 + ], + "angle": 0, + "content": "[24] Mengyi Liu, Shuhui Wang, Yulan Guo, Yuan He, and Hui Xue. Pano-sfmlearner: Self-supervised multi-task learning of depth and semantics in panoramic videos. IEEE Signal Processing Letters, 28:832-836, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.775, + 0.895, + 0.83 + ], + "angle": 0, + "content": "[25] Yang Liu, Wei Zhang, and Jun Wang. Source-free domain adaptation for semantic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1215-1224, 2021. 2, 3, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.832, + 0.895, + 0.901 + ], + "angle": 0, + "content": "[26] Yawei Luo, Liang Zheng, Tao Guan, Junqing Yu, and Yi Yang. Taking a closer look at domain shift: Category-level adversaries for semantics consistent domain adaptation. 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2502-2511, 2019. 2" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.895, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "27893" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.472, + 0.176 + ], + "angle": 0, + "content": "[27] Chaoxiang Ma, Jiaming Zhang, Kailun Yang, Alina Roitberg, and Rainer Stiefelhagen. Densepass: Dense panoramic semantic segmentation via unsupervised domain adaptation with attention-augmented context exchange. In 2021 IEEE International Intelligent Transportation Systems Conference (ITSC), pages 2766-2772. IEEE, 2021. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.178, + 0.472, + 0.233 + ], + "angle": 0, + "content": "[28] Luke Melas-Kyriazi and Arjun K. Manrai. Pixmatch: Unsupervised domain adaptation via pixelwise consistency training. 2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 12430-12440, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.235, + 0.472, + 0.303 + ], + "angle": 0, + "content": "[29] Zak Murez, Soheil Kolouri, David J. Kriegman, Ravi Ramamoorthi, and Kyungnam Kim. Image to image translation for domain adaptation. 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4500-4509, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.305, + 0.472, + 0.375 + ], + "angle": 0, + "content": "[30] Fei Pan, Inkyu Shin, Francois Rameau, Seokju Lee, and In So Kweon. Unsupervised intra-domain adaptation for semantic segmentation through self-supervision. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3764-3773, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.377, + 0.472, + 0.446 + ], + "angle": 0, + "content": "[31] Swami Sankaranarayanan, Yogesh Balaji, Arpit Jain, Ser-Nam Lim, and Rama Chellappa. Learning from synthetic data: Addressing domain shift for semantic segmentation. 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3752-3761, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.448, + 0.472, + 0.516 + ], + "angle": 0, + "content": "[32] Weifa Shen, Qixiong Wang, Hongxiang Jiang, Sen Li, and Jihao Yin. Unsupervised domain adaptation for semantic segmentation via self-supervision. In 2021 IEEE International Geoscience and Remote Sensing Symposium IGARSS, pages 2747-2750. IEEE, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.518, + 0.472, + 0.572 + ], + "angle": 0, + "content": "[33] Serban Stan and Mohammad Rostami. Unsupervised model adaptation for continual semantic segmentation. In Proceedings of the AAAI conference on artificial intelligence, pages 2593-2601, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.576, + 0.472, + 0.645 + ], + "angle": 0, + "content": "[34] Yi-Hsuan Tsai, Wei-Chih Hung, Samuel Schulter, Kihyuk Sohn, Ming-Hsuan Yang, and Manmohan Chandraker. Learning to adapt structured output space for semantic segmentation. 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7472-7481, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.647, + 0.472, + 0.687 + ], + "angle": 0, + "content": "[35] Laurens Van der Maaten and Geoffrey Hinton. Visualizing data using t-sne. Journal of machine learning research, 9(11), 2008. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.689, + 0.472, + 0.759 + ], + "angle": 0, + "content": "[36] Tuan-Hung Vu, Himalaya Jain, Maxime Bucher, Matthieu Cord, and Patrick Pérez. Advent: Adversarial entropy minimization for domain adaptation in semantic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2517-2526, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.761, + 0.472, + 0.83 + ], + "angle": 0, + "content": "[37] Tuan-Hung Vu, Himalaya Jain, Maxime Bucher, Matthieu Cord, and Patrick Pérez. Dada: Depth-aware domain adaptation in semantic segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7364-7373, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.832, + 0.472, + 0.9 + ], + "angle": 0, + "content": "[38] Qin Wang, Dengxin Dai, Lukas Hoyer, Olga Fink, and Luc Van Gool. Domain adaptive semantic segmentation with self-supervised depth estimation. 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pages 8495-8505, 2021. 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.472, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.174 + ], + "angle": 0, + "content": "[39] Wenhai Wang, Enze Xie, Xiang Li, Deng-Ping Fan, Kaitao Song, Ding Liang, Tong Lu, Ping Luo, and Ling Shao. Pyramid vision transformer: A versatile backbone for dense prediction without convolutions. In Proceedings of the IEEE/CVF international conference on computer vision, pages 568-578, 2021. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.176, + 0.894, + 0.231 + ], + "angle": 0, + "content": "[40] Haoyu Xie, Chong Fu, Xu Zheng, Yu Zheng, Chiu-Wing Sham, and Xingwei Wang. Adversarial co-training for semantic segmentation over medical images. Computers in biology and medicine, 157:106736, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.233, + 0.894, + 0.301 + ], + "angle": 0, + "content": "[41] Cheng-Yu Yang, Yuan-Jhe Kuo, and Chiou-Ting Hsu. Source free domain adaptation for semantic segmentation via distribution transfer and adaptive class-balanced self-training. In 2022 IEEE International Conference on Multimedia and Expo (ICME), pages 1-6. IEEE, 2022. 2, 3, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.302, + 0.894, + 0.357 + ], + "angle": 0, + "content": "[42] Kailun Yang, Xinxin Hu, Luis M Bergasa, Eduardo Romero, and Kaiwei Wang. Pass: Panoramic annular semantic segmentation. IEEE Transactions on Intelligent Transportation Systems, 21(10):4171-4185, 2019. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.358, + 0.894, + 0.413 + ], + "angle": 0, + "content": "[43] Kailun Yang, Xinxin Hu, Yicheng Fang, Kaiwei Wang, and Rainer Stiefelhagen. Omnisupervised omnidirectional semantic segmentation. IEEE Transactions on Intelligent Transportation Systems, 2020. 1, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.414, + 0.894, + 0.481 + ], + "angle": 0, + "content": "[44] Mucong Ye, Jing Zhang, Jinpeng Ouyang, and Ding Yuan. Source data-free unsupervised domain adaptation for semantic segmentation. In Proceedings of the 29th ACM International Conference on Multimedia, pages 2233-2242, 2021. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.484, + 0.894, + 0.551 + ], + "angle": 0, + "content": "[45] Hao-Wei Yeh, Baoyao Yang, Pong C Yuen, and Tatsuya Harada. Sofa: Source-data-free feature alignment for unsupervised domain adaptation. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 474–483, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.553, + 0.894, + 0.636 + ], + "angle": 0, + "content": "[46] Xiangyu Yue, Zangwei Zheng, Shanghang Zhang, Yang Gao, Trevor Darrell, Kurt Keutzer, and Alberto Sangiovanni Vincentelli. Prototypical cross-domain self-supervised learning for few-shot unsupervised domain adaptation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13834-13844, 2021. 1, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.637, + 0.894, + 0.718 + ], + "angle": 0, + "content": "[47] Cheng Zhang, Zhaopeng Cui, Cai Chen, Shuaicheng Liu, Bing Zeng, Hujun Bao, and Yinda Zhang. Deeppanoocontext: Panoramic 3d scene understanding with holistic scene context graph and relation-based optimization. 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pages 12612-12621, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.72, + 0.894, + 0.789 + ], + "angle": 0, + "content": "[48] Jiaming Zhang, Chaoxiang Ma, Kailun Yang, Alina Roitberg, Kunyu Peng, and Rainer Stiefelhagen. Transfer beyond the field of view: Dense panoramic semantic segmentation via unsupervised domain adaptation. IEEE Transactions on Intelligent Transportation Systems, 2021. 1, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.79, + 0.894, + 0.872 + ], + "angle": 0, + "content": "[49] Jiaming Zhang, Kailun Yang, Chaoxiang Ma, Simon Reiβ, Kunyu Peng, and Rainer Stiefelhagen. Bending reality: Distortion-aware transformers for adapting to panoramic semantic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16917-16927, 2022. 1, 2, 3, 4, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.873, + 0.894, + 0.902 + ], + "angle": 0, + "content": "[50] Jiaming Zhang, Kailun Yang, Hao Shi, Simon Reiβ, Kunyu Peng, Chaoxiang Ma, Haodong Fu, Kaiwei Wang, and Rainer" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "27894" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.11, + 0.092, + 0.47, + 0.147 + ], + "angle": 0, + "content": "Stiefelhagen. Behind every domain there is a shift: Adapting distortion-aware vision transformers for panoramic semantic segmentation. arXiv preprint arXiv:2207.11860, 2022. 1, 2, 4, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.149, + 0.472, + 0.232 + ], + "angle": 0, + "content": "[51] Pan Zhang, Bo Zhang, Ting Zhang, Dong Chen, Yong Wang, and Fang Wen. Prototypical pseudo label denoising and target structure learning for domain adaptive semantic segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 12414-12424, 2021. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.234, + 0.472, + 0.289 + ], + "angle": 0, + "content": "[52] Qiming Zhang, Jing Zhang, Wei Liu, and Dacheng Tao. Category anchor-guided unsupervised domain adaptation for semantic segmentation. Advances in neural information processing systems, 32, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.291, + 0.472, + 0.346 + ], + "angle": 0, + "content": "[53] Yang Zhang, Philip David, and Boqing Gong. Curriculum domain adaptation for semantic segmentation of urban scenes. 2017 IEEE International Conference on Computer Vision (ICCV), pages 2039-2049, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.348, + 0.472, + 0.403 + ], + "angle": 0, + "content": "[54] Yuyang Zhao, Zhun Zhong, Zhiming Luo, Gim Hee Lee, and Nicu Sebe. Source-free open compound domain adaptation in semantic segmentation. IEEE Transactions on Circuits and Systems for Video Technology, 32(10):7019-7032, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.404, + 0.472, + 0.459 + ], + "angle": 0, + "content": "[55] Xu Zheng, Chong Fu, Haoyu Xie, Jialei Chen, Xingwei Wang, and Chiu-Wing Sham. Uncertainty-aware deep co-training for semi-supervised medical image segmentation. Computers in Biology and Medicine, 149:106051, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.461, + 0.472, + 0.514 + ], + "angle": 0, + "content": "[56] Xu Zheng, Yunhao Luo, Hao Wang, Chong Fu, and Lin Wang. Transformer-cnn cohort: Semi-supervised semantic segmentation by the best of both students. arXiv preprint arXiv:2209.02178, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.517, + 0.472, + 0.559 + ], + "angle": 0, + "content": "[57] Xu Zheng, Yunhao Luo, Pengyuan Zhou, and Lin Wang. Distilling efficient vision transformers from cnns for semantic segmentation. arXiv preprint arXiv:2310.07265, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.56, + 0.472, + 0.628 + ], + "angle": 0, + "content": "[58] Xu Zheng, Tianbo Pan, Yunhao Luo, and Lin Wang. Look at the neighbor: Distortion-aware unsupervised domain adaptation for panoramic semantic segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 18687-18698, 2023. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.63, + 0.472, + 0.697 + ], + "angle": 0, + "content": "[59] Xu Zheng, Jinjing Zhu, Yexin Liu, Zidong Cao, Chong Fu, and Lin Wang. Both style and distortion matter: Dual-path unsupervised domain adaptation for panoramic semantic segmentation. arXiv preprint arXiv:2303.14360, 2023. 1, 2, 4, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.7, + 0.472, + 0.77 + ], + "angle": 0, + "content": "[60] Jinjing Zhu, Yunhao Luo, Xu Zheng, Hao Wang, and Lin Wang. A good student is cooperative and reliable: Cnn-transformer collaborative learning for semantic segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11720-11730, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.771, + 0.472, + 0.838 + ], + "angle": 0, + "content": "[61] Yang Zou, Zhiding Yu, BVK Kumar, and Jinsong Wang. Unsupervised domain adaptation for semantic segmentation via class-balanced self-training. In Proceedings of the European conference on computer vision (ECCV), pages 289-305, 2018. 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.472, + 0.838 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "27895" + } + ] +] \ No newline at end of file diff --git a/2024/Semantics Distortion and Style Matter_ Towards Source-free UDA for Panoramic Segmentation/fc7b011a-8b94-4431-a0e3-dcc09545c286_origin.pdf b/2024/Semantics Distortion and Style Matter_ Towards Source-free UDA for Panoramic Segmentation/fc7b011a-8b94-4431-a0e3-dcc09545c286_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..3d9d5c037430072024aa435bd80e45a1975c4b35 --- /dev/null +++ b/2024/Semantics Distortion and Style Matter_ Towards Source-free UDA for Panoramic Segmentation/fc7b011a-8b94-4431-a0e3-dcc09545c286_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b202646e32eae9f42286768d4cff0357ba2febba6b1962a3856aee8111d6456f +size 5200315 diff --git a/2024/Semantics Distortion and Style Matter_ Towards Source-free UDA for Panoramic Segmentation/full.md b/2024/Semantics Distortion and Style Matter_ Towards Source-free UDA for Panoramic Segmentation/full.md new file mode 100644 index 0000000000000000000000000000000000000000..395dce5b80b3dc766225312fb6673dee284af3fa --- /dev/null +++ b/2024/Semantics Distortion and Style Matter_ Towards Source-free UDA for Panoramic Segmentation/full.md @@ -0,0 +1,319 @@ +# Semantics, Distortion, and Style Matter: Towards Source-free UDA for Panoramic Segmentation + +Xu Zheng $^{1}$ Pengyuan Zhou $^{3}$ Athanasios V. Vasilakos $^{4}$ Lin Wang $^{1,2*}$ $^{1}$ AI Thrust, HKUST(GZ) $^{2}$ Dept. of CSE, HKUST $^{3}$ Aarhus University $^{4}$ University of Agder zhengxu128@gmail.com, pengyuan.zhou@ece.au.dk, th.vasilakos@gmail.com, linwang@ust.hk Project Page: https://vlislab22.github.io/360SFUDA/ + +# Abstract + +This paper addresses an interesting yet challenging problem—source-free unsupervised domain adaptation (SFUDA) for pinhole-to-panoramic semantic segmentation—given only a pinhole image-trained model (i.e., source) and unlabeled panoramic images (i.e., target). Tackling this problem is nontrivial due to the semantic mismatches, style discrepancies, and inevitable distortion of panoramic images. To this end, we propose a novel method that utilizes Tangent Projection (TP) as it has less distortion and meanwhile slits the equirectangular projection (ERP) with a fixed FoV to mimic the pinhole images. Both projections are shown effective in extracting knowledge from the source model. However, the distinct projection discrepancies between source and target domains impede the direct knowledge transfer; thus, we propose a panoramic prototype adaptation module (PPAM) to integrate panoramic prototypes from the extracted knowledge for adaptation. We then impose the loss constraints on both predictions and prototypes and propose a cross-dual attention module (CDAM) at the feature level to better align the spatial and channel characteristics across the domains and projections. Both knowledge extraction and transfer processes are synchronously updated to reach the best performance. Extensive experiments on the synthetic and real-world benchmarks, including outdoor and indoor scenarios, demonstrate that our method achieves significantly better performance than prior SFUDA methods for pinhole-to-panoramic adaptation. + +# 1. Introduction + +The comprehensive scene perception abilities of $360^{\circ}$ cameras have made them highly popular for applications, such as autonomous driving [1]. In contrast to pinhole cameras that capture 2D planer images with a limited field-of-view (FoV), $360^{\circ}$ cameras offer a much wider FoV of + +![](images/e7dab01ecd680f1cef9b78e4cc3b1aeab906a9ee8fdb9476ed363efce7353ec9.jpg) +Figure 1. We address a new problem of achieving source-free pinhole-to-panoramic adaptation for segmentation. + +$360^{\circ} \times 180^{\circ}$ . As a result, research on panoramic semantic segmentation [42, 43, 46, 48, 49] has been actively explored to achieve dense scene understanding for intelligent systems. + +Generally, the spherical data captured by the $360^{\circ}$ cameras is always projected into 2D planar representations, e.g., Equirectangular Projection (ERP), to be aligned with the existing imaging pipeline [1] while preserving the omnidirectional information1. However, ERP suffers from the inevitable distortion and object deformation due to the nonuniformly distributed pixels [59]. Meanwhile, learning effective panoramic segmentation models is often impeded by the lack of large precisely labeled datasets due to the difficulty of annotation. For these reasons, some unsupervised domain adaptation (UDA) methods [49, 50, 59] have been proposed to transfer the knowledge from the pinhole image domain to the panoramic image domain. In some crucial application scenarios, e.g., autonomous driving, source datasets are not always accessible due to privacy and commercial issues, such as data portability and transmission costs. One typical example is the recent large model, SAM [19], which brings significant progress in instance segmentation for pinhole images; however, the source datasets are too large (10TB) to be reused in end-tasks, such as [20]. + +Motivation: In this paper, we probe an interesting yet challenging problem: source-free UDA (SFUDA) for panoramic segmentation, in which only the source model (pretrained with pinhole images) and unlabeled panoramic images are available. As shown in Fig. 1 (a), different from existing SFUDA methods, e.g., [25, 41, 44] for the pinhole-to-pinhole image adaptation, transferring knowledge from the pinhole-to-panoramic image domain is hampered by: 1) semantic mismatch caused by the different FoV between the pinhole and $360^{\circ}$ cameras, i.e., $70^{\circ}$ vs. $360^{\circ}$ ; 2) inevitable distortion of the ERP; 3) style discrepancies caused by the distinct camera sensors and captured scenes. In Tab. 2, we show that naively adapting existing SFUDA methods to our problem leads to a limited performance boost. + +Contributions: To this end, we propose a novel SFUDA method that effectively extracts knowledge from the source model with only panoramic images and transfers the knowledge to the target panoramic domain. Our key idea is to leverage the multi-projection versatility of $360^{\circ}$ data for efficient domain knowledge transfer. Our method enjoys two key technical contributions. Specifically, we use Tangent Projection (TP) and divide the ERP images into patches with a fixed FoV, dubbed Fixed FoV Projection (FFP), to extract knowledge from the source model with less distortion and similar FoV to the pinhole images. Both projections make it possible to effectively extract knowledge from the source model. However, directly transferring the extracted knowledge to the target model is hardly approachable due to the distinct projection gaps. Thus, we propose a panoramic prototype adaptation module (PPAM) to obtain class-wise semantic prototypes from the features and predictions of the source model with TP and FFP images (Sec. 3.2). Then, these prototypes are integrated together to obtain the global panoramic prototypes for knowledge adaptation, which is updated across the adaptation procedure. Moreover, our proposed PPAM also fine-tunes the source model to promote better knowledge extraction using prototypes extracted from FFP images. Aligning the prototypes from each FFP image enables the source model to become more aware of distortion and semantics across the FoV. + +We initially apply both prediction-level and prototype-level loss constraints to facilitate knowledge transfer to the unlabeled target panoramic domain. Concretely, the FFP predictions of the source model are rebuilt together to provide a pseudo-supervision signal for the target model. The prototype-level loss constraint is performed between the panoramic prototypes from PPAM and the prototypes from the target model's features and predictions on the ERP images. Moreover, knowledge from the source model is not limited to predictions and prototypes, high-level features also contain crucial image characteristics that can enhance the performance of the target model. Consequently, we propose a Cross-Dual Attention Module (CDAM) that aligns + +spatial and channel characteristics between domains to fully utilize the knowledge from the source model and address the style discrepancy problem (Sec. 3.3). Specifically, CDAM reconstructs the source model features from FFP images to provide a panoramic perception of the surrounding environment and aligns them with the ERP features from the target model for effective knowledge transfer. + +We conduct extensive experiments on both synthetic and real-world benchmarks, including outdoor and indoor scenarios. As no directly comparable works exist, we adapt the state-of-the-art (SoTA) SFUDA methods [14, 18, 21, 25, 41, 51] – designed for pinhole-to-pinhole image adaptation – to our problem in addressing the panoramic semantic segmentation. The results show that our framework significantly outperforms these methods by large margins of $+6.37\%$ , $+11.47\%$ , and $+10.99\%$ on three benchmarks. We also evaluate our method against UDA methods [49, 50, 58, 59], using the source pinhole image, the results demonstrate its comparable performance. + +# 2. Related Work + +# 2.1. Source-free UDA for Segmentation + +UDA aims to mitigate the impact of domain shift caused by data distribution discrepancies in downstream computer vision tasks, such as semantic segmentation [2, 6-9, 13, 17, 30, 32, 33, 36, 37, 40, 52, 55-57, 60, 61]. However, the source domain data may not always be accessible due to the privacy protection and data storage concerns. Intuitively, source-free UDA (SFUDA) [18, 21, 45] methods are proposed to adapt source models to a target domain without access to the source data. Existing SFUDA methods for semantic segmentation primarily focus on source data estimation [41, 44] or self-training [4, 21, 25, 54] for pinhole images. In this paper, we make the first attempt at achieving SFUDA from the pinhole image domain to the panoramic domain. This task is nontrivial to be tackled due to the semantic mismatches, style discrepancies, and inevitable distortion of panoramic images. Unlike these methods that focus on the source domain data estimation [25, 44], we propose a novel SFUDA method that effectively extracts knowledge from the source model with only panoramic images and transfers the knowledge to the target panoramic image domain. Experiments also show that naively applying these methods leads to less optimal performance (See Tab. 2). + +# 2.2. UDA for Panoramic Semantic Segmentation + +It can be classified into three types, including adversarial training [10, 16, 31, 34, 59], pseudo labeling [24, 38, 47, 53] and prototypical adaptation methods [49, 50]. Specifically, the first line of research applies alignment approaches to capture the domain invariant characteristics of images [16, 22, 29], feature [5, 15, 16, 59] and predictions [26, 28]. + +![](images/82ebea8a067ecf92d4b0722afe7352152e23bb62c5d22bd703561ee158a75ed7.jpg) +Figure 2. Overall framework of our proposed SFUDA for panoramic semantic segmentation. + +The second type of methods generates pseudo labels for the target domain training. The last line of research, e.g., Mutual Prototype Adaption (MPA) [49], mutually aligns the high-level features with the prototypes between domain. However, these methods treat panoramic images as pinhole images when extracting prototypes, ignoring the intricate semantic, object correspondence, and distortion information brought by the panoramic FoV. We are the first to address the SFUDA problem for panoramic segmentation. Considering the distinct projection discrepancies between source and target domains, we propose a PPAM to integrate the global panoramic prototypes from the extracted knowledge for adaptation. + +# 3. Methodology + +# 3.1. Overview + +The overall framework for panoramic segmentation is shown in Fig. 2. With only the source model $F_{S}$ available and given the unlabeled panoramic image data $D_{T}$ , we aim to train a target model $F_{T}$ that adapts knowledge from $F_{S}$ to the common $K$ categories across both domains. + +Unlike the pinhole image-to-image adaptation [25, 41, 44], pinhole-to-panoramic image domain adaptation is hampered by three key factors, specifically: semantic mismatch due to FoV variations $(70^{\circ}$ vs. $360^{\circ})$ , inevitable distortion in ERP, and ubiquitous style discrepancies in unsupervised domain adaptation (UDA) (refer to Fig.1 (a)). Therefore, naively applying existing SFUDA methods exhibits suboptimal segmentation performance (See Tab. 2), while UDA methods with source data, e.g., [25] for panoramic segmen + +tation do not account for the semantic mismatch between the pinhole and panoramic images. Intuitively, the key challenges are : 1) how to extract knowledge from the source model with only panoramic images and 2) how to transfer knowledge to the target panoramic image domain. + +Our key idea is to leverage the multi-projection versatility of $360^{\circ}$ data for efficient domain knowledge transfer. + +Concretely, to address the first challenge (Sec. 3.2), we use the Tangent Projection (TP) which is characterized by a reduced distortion issue compared to the ERP images [12] to extract knowledge from the source model. Concurrently, ERP images are segmented into discrete patches, each possessing a constant FoV to mimic the pinhole images, dubbed Fixed FoV Projection (FFP). Both projections make it possible to effectively extract knowledge from the source model. The distinct projection formats make it impossible to directly transfer knowledge between domains, thus we propose a Panoramic Prototype Adaptation Module (PPAM) to obtain panoramic prototypes for adaptation. To address the second challenge (Sec. 3.3), we first impose prediction and prototype level loss constraints, and propose a Cross-Dual Attention Module (CDAM) at the feature level to transfer knowledge and further address the style discrepancies. + +# 3.2. Knowledge Extraction + +As depicted in Fig. 2, given the target domain (i.e., panoramic domain) ERP images $D_{T} = \{x_{T}|x_{T}\in \mathbf{R}^{H\times W\times 3}\}$ , we first project them into TP images $D_{T}^{t} = \{x_{T}^{t}|x_{T}^{t}\in \mathbf{R}^{h\times w\times 3}\}$ and FFP images $D_{T}^{f} = \{x_{T}^{f}|x_{T}^{f}\in \mathbf{R}^{H\times W / 4\times 3}\}$ for effectively extracting knowledge from the source model. Note that one ERP image corresponds to 18 + +![](images/4e199840d916a08eeb2b3c2fd773d5f1503f1882600aa7a5c1b2cfea96baa60c.jpg) +Figure 3. Illustration of the prototype extraction (PE) in the panoramic prototype adaptation module (PPAM). + +TP images as [23, 59] and 4 FFP images with a fixed FoV of $90^{\circ}$ (See Sec. 5). To obtain the features and predictions from the source model for knowledge adaptation, the two types of projected images are first fed into the source model with batch sampling: + +$$ +P ^ {p}, f ^ {p} = F _ {S} \left(x _ {T} ^ {t}\right), \quad P ^ {f}, f ^ {f} = F _ {S} \left(x _ {T} ^ {f}\right), \tag {1} +$$ + +where $f^p, f^f, P^p$ , and $P^f$ are the source model features and predictions of the input TP and FFP images, respectively. For the target panoramic images, $x_T$ is fed into $F_T$ to obtain the target model features $f$ and predictions $P$ of the input batch of ERP images as $P, f = F_T(x_T)$ . However, the distinct projection formats of the input data in the source and target models make it difficult to align their features directly, thus we propose a Panoramic Prototype Adaptation Module (PPAM) to obtain panoramic prototypes for adaptation. + +Panoramic Prototype Adaptation Module (PPAM) Compared to prior UDA methods using prototypical adaptation, e.g., MPA [49, 50], our PPAM possesses three distinct characteristics: (a) class-wise prototypes are obtained from TP and FFP images to alleviate distortion and semantic mismatch problems; (b) global prototypes are iteratively updated with prototypes from two projections during the whole training procedure; (c) hard pseudo-labels are softened in the high-level feature space to obtain prototypes with different projection of panoramic images, indicating that the knowledge from the source model is fully utilized. + +Specifically, we project the source model predictions $P^p$ , $P^f$ into pseudo labels: + +$$ +\hat {y} _ {(h, w, k)} ^ {p} = 1 _ {k \dot {=} a r g m a x (P _ {h, w,:} ^ {p})}, +$$ + +$$ +\hat {y} _ {(H, W / 4, k)} ^ {f} = 1 _ {k \div a r g m a x \left(P _ {H, W / 4,:} ^ {f}\right)}. \tag {2} +$$ + +Here, $k$ denotes the semantic category. Subsequently, we obtain the class-specific masked features by integrating the up-sampled features with the corresponding pseudo + +labels $\hat{y}_{(h,w,k)}^p$ and $\hat{y}_{(H,W/4,k)}^f$ . Notably, the prototypes $\sum_{a=1}^{18} (\tau_p^k)_a$ and $\sum_{b=1}^{4} (\tau_f^k)_b$ for TP and FFP images are obtained by masked average pooling (MAP) operation, as shown in Fig. 3. Within each projection, PPAM first integrates the prototypes: + +$$ +\tau_ {p} ^ {k} = a v g \left(\sum_ {a = 1} ^ {1 8} \left(\tau_ {p} ^ {k}\right) _ {a}\right), \quad \tau_ {f} ^ {k} = a v g \left(\sum_ {b = 1} ^ {4} \left(\tau_ {f} ^ {k}\right) _ {b}\right). \tag {3} +$$ + +As shown in Fig. 2, $\tau_{p}^{k}$ and $\tau_{f}^{k}$ are integrated together as $\tau_{pf}^{k}$ to preserve the less distortion characteristics of $\tau_{p}^{k}$ and the similar scale semantics of $\tau_{f}^{k}$ . The $\tau_{pf}^{k}$ is then used to update the panoramic global prototype $\tau_{g}^{k}$ , which is iteratively updated with $\tau_{pf}^{k}$ . To obtain more accurate and reliable prototypes, we update $\tau_{g}^{k}$ and $\tau_{pf}^{k}$ as follows: + +$$ +\tau_ {g} ^ {i} = \frac {1}{i} \left(\tau_ {p f} ^ {k}\right) ^ {i} + \left(1 - \frac {1}{i}\right) \left(\tau_ {g} ^ {k}\right) ^ {i - 1}, \tag {4} +$$ + +where $(\tau_{g}^{k})^{i}$ and $(\tau_{pf}^{k})^{i}$ are the prototypes for category $k$ in the $i$ -th training epoch, $(\tau_{g}^{k})^{i-1}$ is the panoramic global prototype saved in the last training epoch, $i$ is the current epoch number. The panoramic global prototype $\tau_{g}^{k}$ is then used to give supervision for the target prototype $\tau_{t}^{k}$ obtained from $P$ and $f$ with the same operations. + +Besides extracting prototype knowledge from the source model, PPAM also fine-tunes the source model to improve the effectiveness of knowledge extraction. Specifically, since each ERP image can be projected to 4 FFP images, the source model's extracted features $f_{f}$ have 4 pieces of FFP features. As the content of all the features is within the same ERP image, we propose to align the class-wise prototypes from each piece of the features in PPAM to enhance the model's performance. Concretely, the prototypes $\sum_{\alpha = 1}^{4}\tau_{\alpha}$ of the four FFP features are obtained through the same operations with $\tau_g^t$ . Each FFP image captures a non-overlapping $90^\circ$ FoV, resulting in distinct distortions, and similar content in each FFP image. Aligning the prototypes from each FFP image enhances distortion-awareness ability in the source model and helps to explore complementary semantic content in each FFP image. The MSE loss is imposed between each two of the prototypes as follows: + +$$ +\mathcal {L} _ {s f t} = \sum_ {\alpha \neq \beta} ^ {4} \left\{\frac {1}{K} \sum_ {k \in K} \left(\left(\tau_ {f} ^ {k}\right) _ {\alpha} - \left(\tau_ {f} ^ {k}\right) _ {\beta}\right) ^ {2} \right\}. \tag {5} +$$ + +Note that $\mathcal{L}_{sft}$ is used to fine-tune the source model $F_{S}$ . + +# 3.3. Knowledge Adaptation + +To adapt knowledge to the target domain, we impose the loss constraints on both predictions and prototypes and propose a cross-dual attention module (CDAM) at the feature level to better align the spatial and channel characteristics across the domains and projections. Specifically, the predictions of the FFP patch images are stitched to reconstruct an ERP image. + +
MethodSFmIoURoadS.W.Build.WallFencePoleTr.L.Tr.S.Veget.Terr.SkyPers.CarΔ
PVT [39] SSL38.7455.3936.8780.8419.7215.188.045.392.1772.9132.0190.8126.7657.40-
PVT [39] MPA40.9070.7842.4782.1322.7910.7413.541.270.3071.1533.0389.6929.0764.73-
Source w/ seg-b135.8163.3624.0980.1315.6813.3916.267.420.0962.4520.2086.0523.0253.37-
SFDA w/ seg-b1 [25]38.2168.7830.7180.375.2618.9520.905.252.3670.1923.3090.2022.5557.90+2.40
ProDA w/ seg-b1 [51]37.3768.9330.8880.074.1718.6019.721.771.5670.0522.7390.6019.7157.04+2.73
GTA w/ seg-b1 [21]36.0064.6120.0479.048.0615.3619.866.022.1365.7717.7584.5626.7158.13+0.19
HCL w/ seg-b1 [18]38.3868.8230.4180.375.8820.1820.104.232.1170.5024.7489.8922.6559.04+2.57
DATC w/ seg-b1 [41]38.5469.4826.9680.6811.6415.2420.109.330.5566.1124.3185.1630.9060.58+2.73
Simt w/ seg-b1 [14]37.9468.4729.5179.626.7819.2019.482.311.3368.8526.5589.3022.3559.49+2.13
Ours w/ seg-b141.7870.1733.2481.6613.0623.4023.377.633.5971.0425.4689.3336.6064.60+5.97
Ours w/ seg-b242.1869.9932.2881.3410.6224.3524.299.193.6371.2830.0488.7537.4965.05+6.37
+ +Table 1. Experimental results on the S-to-D scenario, the overlapped 13 classes of two datasets are used to test the UDA performance. The bold and underline denote the best and the second-best performance in source-free UDA methods, respectively. + +The ERP image is then passed to the source model $F_{S}$ to predict a pseudo label, which serves as the supervision for the ERP predictions of the target model $F_{T}$ . For simplicity, we use the Cross-Entropy (CE) loss, which is formulated as: + +$$ +\mathcal {L} _ {s u p} = C E (P, 1 _ {\dot {k} = a r g m a x (\{R e b u i l d (P _ {H, W / 4,:} ^ {f}) \})}). \tag {6} +$$ + +And the prototype-level knowledge transfer loss is achieved by Mean Squared Error (MSE) loss between the panoramic global prototype $\tau_g^k$ and the target prototype $\tau_t^k$ : + +$$ +\mathcal {L} _ {p p a} = \frac {1}{K} \sum_ {k \in K} \left(\tau_ {g} ^ {k} - \tau_ {t} ^ {k}\right) ^ {2}. \tag {7} +$$ + +With loss $\mathcal{L}_{ppa}$ , the prototypes are pushed together to transfer the source-extracted knowledge to the target domain. In summary, with the proposed PPAM, we effectively address the distortion and semantic mismatch problems at the prediction and prototype level, we now tackle the style discrepancy problem at the feature level. + +Cross Dual Attention Module (CDAM). Inspired by the dual attention, focusing on spatial and channel characteristics [25], our CDAM imitates the spatial and channel-wise distributions of features to alleviate the style discrepancies. Different from [25] suggesting to minimize the distribution distance of the dual attention maps between the fake source (FFP images) and target data (ERP images), our CDAM focuses on aligning the distribution between FFP and ERP of the panoramic images rather than introducing additional parameters and computation cost in estimating source data. As shown in Fig. 2, we reconstruct the FFP features $F^f$ to ensure that the rebuilt feature $F'$ has the same spatial size as $F$ . Before the cross dual attention operation, we apply a Batch Normalization Statics (BNS) guided constraint on $F$ and $F'$ . Since the BNS of the source model should satisfy the feature distribution of the source data, we align $F$ and + +$F^{\prime}$ with BNS to alleviate the domain gaps as follows: + +$$ +\begin{array}{l} \mathcal {L} _ {b n s} = \left\| \mu (F) - \bar {\mu} \right\| _ {2} ^ {2} + \left\| \sigma^ {2} (F) - \bar {\sigma} ^ {2} \right\| _ {2} ^ {2} \\ + \left\| \mu \left(F ^ {\prime}\right) - \bar {\mu} \right\| _ {2} ^ {2} + \left\| \sigma^ {2} \left(F ^ {\prime}\right) - \bar {\sigma} ^ {2} \right\| _ {2} ^ {2}, \tag {8} \\ \end{array} +$$ + +where $\bar{\mu}$ and $\bar{\sigma}^2$ are the mean and variance parameters of the last BN layer in the source model $S$ . + +As shown in Fig. 2 (a), after aligned with BNS, the ERP feature $f$ and the rebuilt feature $f'$ are first reshaped to be $f \in \mathbb{R}^{N \times C}$ and $f' \in \mathbb{R}^{N \times C}$ , where $N$ is the number of pixels and $C$ is the channel number. Then we calculate the spatial-wise attention maps $M_{sp} \in \mathbb{R}^{N \times C}$ and $M_{sp}' \in \mathbb{R}^{N \times C}$ for $f$ and $f'$ by: + +$$ +\{M _ {s p} \} _ {j i} = \frac {\exp (f _ {[ i : ]} ^ {\prime} \cdot f _ {[ : j ]} ^ {T})}{\sum_ {i} ^ {N} \exp (f _ {[ i : ]} ^ {\prime} \cdot f _ {[ : j ]} ^ {T})}, +$$ + +$$ +\left\{M _ {s p} ^ {\prime} \right\} _ {j i} = \frac {\exp \left(f _ {[ i : ]} \cdot f _ {[ : j ]} ^ {T}\right)}{\sum_ {i} ^ {N} \exp \left(f _ {[ i : ]} \cdot f _ {[ : j ]} ^ {T}\right)}, \tag {9} +$$ + +where $f^T$ is the transpose of $f$ and $\{M\}_{ij}$ measures the impact of the $i$ -th position on the $j$ -th position. Similarly, the channel-wise attention maps $M_{ch} \in \mathbb{R}^{C \times C}$ and $M_{ch}' \in \mathbb{R}^{C \times C}$ can be obtained through: + +$$ +\left\{M _ {c h} \right\} _ {j i} = \frac {\exp \left(f _ {[ i : ]} ^ {T} \cdot f _ {[ : j ]}\right)}{\sum_ {i} ^ {C} \exp \left(f _ {[ i : ]} ^ {\prime} \cdot f _ {[ : j ]} ^ {T}\right)}, +$$ + +$$ +\left\{M _ {c h} ^ {\prime} \right\} _ {j i} = \frac {\exp \left(f _ {[ i : ]} ^ {T} \cdot f _ {[ : j ]} ^ {\prime}\right)}{\sum_ {i} ^ {C} \exp \left(f _ {[ i : ]} ^ {T} \cdot f _ {[ : j ]} ^ {\prime}\right)}. \tag {10} +$$ + +After obtaining the spatial and channel attention maps, the CDAM loss can be calculated with the Kullback-Liibler divergence (KL divergence) as follows: + +$$ +\mathcal {L} _ {c d a} = K L \left(M _ {s p}, M _ {s p} ^ {\prime}\right) + K L \left(M _ {c h}, M _ {c h} ^ {\prime}\right) \tag {11} +$$ + +![](images/56372506da771c02d126a5a0b84cc0c960978ea26b98df2281949a2bc7b8b3a3.jpg) +Figure 4. Example visualization results. (a) source, (b) SFDA [25], (c) DATC [41], (d) Ours, (e) Ground Truth (GT). + +
MethodSFmIoUPersonRiderCarTruckBusTrainMotorBikeΔ
Trans4PASS-T [49]53.1848.5416.9179.5865.3355.7684.6359.0537.61-
Trans4PASS-S [49]55.2248.8523.3681.0267.3169.5386.1360.8539.09-
DAFormer [17]54.6749.6925.1577.7063.0665.6186.6865.1248.13-
DPPASS [59]55.3052.0929.4079.1958.7347.2486.4866.6038.11-
DATR [58]56.8154.6229.5080.0367.3563.7587.6767.5737.10-
Source w/ seg-b138.6540.9310.8967.6736.8615.5626.4342.6827.16-
SFDA w/ seg-b1 [25]42.7041.658.4669.9747.4833.2472.0147.6132.77+4.05
DTAC w/ seg-b1 [41]43.0643.518.3570.1035.7940.7370.5249.4932.94+4.41
Ours w/ seg-b148.7845.3615.8375.7049.1655.6882.0754.8233.76+10.13
Ours w/ seg-b250.1249.9227.2276.2247.8164.1379.4756.8335.76+11.47
+ +Table 2. Experimental results of 8 selected categories in panoramic semantic segmentation on C-to-D. SF: Source-free UDA. The bold and underline denote the best and the second-best performance in source-free UDA methods, respectively. + +# 3.4. Optimization + +The training objective for learning the target model containing three losses is defined as: + +$$ +\mathcal {L} = \lambda \cdot \mathcal {L} _ {p p a} + \gamma \cdot \mathcal {L} _ {c d a} + \mathcal {L} _ {b n s} + \mathcal {L} _ {s u p} \tag {12} +$$ + +where $\mathcal{L}_{ppa}$ is the MSE loss from PPAM, $\mathcal{L}_{cda}$ refers to the KL loss from CDAM, $\mathcal{L}_{sup}$ denotes the CE loss for the prediction pseudo label supervision loss, $\mathcal{L}_{bns}$ refers to the BNS guided feature loss, and $\lambda$ and $\gamma$ are the trade-off weights of the proposed loss terms. + +# 4. Experiments and Analysis + +As the first SFUDA method for panoramic image segmentation, there is no prior method for direct comparison. We thus empirically validate our method by comparing it with the existing UDA and panoramic segmentation methods on three widely used benchmarks. + +# 4.1. Datasets and Implementation Details. + +Cityscapes [11] is a real-world dataset collected for autonomous driving that contains street scenes. DensePASS [27] is a panoramic dataset designed for capturing diverse street scenes. SynPASS [50] is a synthetic dataset consisting of 9080 synthetic panoramic images. Stanford2D3D [3] is an indoor panoramic dataset which has 1413 panoramic images. Overall, the experiments are conducted on both real-world (Cityscapes-to-DensePASS, C-to-D, and Stanford2D3D-pinhole-to-Stanford2D3D-panoramic, SPinto-SPan) and synthetic-to-real (SynPASS-to-DensePASS, S-to-D) scenarios. + +# 4.2. Experimental Results. + +We first evaluate our proposed framework under the S-to-D scenario. The experimental results are shown in Tab. 1. Our proposed method consistently outperforms source-free UDA methods [25] and [41] and even achieves panoramic semantic segmentation performance closer to that of the UDA method Trans4PASS [50] which utilizes the source data + +
MethodSFmIoUCeilingChairDoorFloorSofaTableWallWindowΔ
PVT-S w/ MPA [49]X57.9585.8551.7618.3990.7835.9365.4375.0040.43-
Trans4PASS w/ MPA [49]X64.5285.0858.7234.9791.1246.2571.7277.5850.75-
Trans4PASS+ [50]X63.7390.6362.3024.7992.6235.7373.1678.7451.78-
Trans4PASS+ w/ MPA [50]X67.1690.0464.0442.8991.7438.3471.4581.2457.54-
SFDA [25]54.7679.4433.2052.0967.3622.5453.6469.3860.46-
Ours w/ b157.6373.8129.9863.6573.4931.7649.2572.8966.22+2.87
Ours w/ b265.7582.8838.0065.8186.7136.3266.1080.2969.88+10.99
+ +Table 3. Experimental results on indoor Stanford2D3D [3]. The bold denotes the best performance among UDA and SFUDA methods. + +
Loss Function CombinationsC-to-DS-to-D
\( \mathcal{L}_{sup} \)\( \mathcal{L}_{ppa} \)\( \mathcal{L}_{sft} \)\( \mathcal{L}_{cda} \)\( \mathcal{L}_{bns} \)mIoUΔmIoUΔ
38.65-35.81-
45.42+6.7738.37+2.56
46.23+7.5838.49+2.68
44.24+5.5938.38+2.57
44.79+6.1438.52+2.71
48.78+10.1341.78+5.97
+ +in the adaptation procedure. Our proposed method brings significant performance gain of $+3.57\%$ and $+3.54\%$ with SegFormer-B1 backbone then SFDA [25] and DATC [41], respectively. We also provide the TSNE visualization in Fig. 5 (b) and qualitative results in Fig. 4. Apparently, our method gains a significant improvement in distinguishing the pixels in panoramic images in both prediction and high-level feature space. As shown in Tab. 2, we then evaluate our proposed framework under the C-to-D scenario. Our proposed method significantly outperforms source-free methods [25, 41] and some panoramic semantic segmentation methods [43, 46, 48]. Specifically, our method achieves a significant performance gain over SFDA [25] and DTAC [41] by $+6.08\%$ and $+5.72\%$ , respectively. This demonstrates that our proposed method endowed by PPAM and CDAM is more suitable for panoramic semantic segmentation tasks. Furthermore, as shown in the qualitative results in Fig. 4, our method achieves better segmentation in driving-related categories, such as rider and car. + +We also provide TSNE visualizations [35] in Fig. 5 (a), showing that our proposed method brings significant improvements in distinguishing pixels from different categories in high-level feature space. Additionally, we evaluated our proposed method on the Stanford2D3D [3] dataset and compared it with the SFDA [25] and MPA [50] methods. As shown in the following table, our proposed method significantly outperforms the SFDA by $+7.09\%$ mIoU and is on par with the MPA method using source data $(61.85\%$ vs. $67.16\%)$ . Notably, for some categories, such as door $(57.90\%$ + +Table 4. Ablation study of different module combinations. + +
Combinationsτg+τpτg+τfτg+τp+τf
mIoU44.1444.2845.42
+ +Table 5. Ablation study of different prototype combinations. + +vs. $42.89\%$ ) and window $(68.06\%$ vs. $57.54\%)$ , our method event outperforms the MPA [50]. + +# 5. Ablation Study + +Different Loss Function Combinations. To assess the effectiveness of the proposed modules, we conduct ablation experiments on both real-world and synthetic-to-real scenarios with various loss combinations. All of the proposed modules and loss functions have a positive impact on improving segmentation performance. Notably, our PPAM yields a significant performance gain of $+6.77\%$ . This indicates that PPAM alleviates the intricate semantics and distortion problem with the tangent, and our proposed FFP projection is valid. This is further supported by the qualitative results presented in Fig. 4. Additionally, our proposed CDAM achieves a performance gain of $+5.59\%$ compared to the source baseline, which means that CDAM imitates the spatial and channel-wise distributions of ERP and FFP features and further addresses the style discrepancy problems. + +Ablation of Different Prototype Combinations. To validate the effectiveness of all the prototypes in PPAM, we conduct experiments on C-to-D using SegFormer-B1 and only $\mathcal{L}_{sup}$ and $\mathcal{L}_{ppa}$ . The results of the performance with different prototype combinations are presented in Tab. 5. Both prototypes from TP and FFP have a positive effect on PPAM, with $\tau_{p}$ and $\tau_{f}$ resulting in mIoU improvements of $+5.49\%$ and $+5.63\%$ , respectively, compared to the source baseline. When both prototypes are combined together, there is a mIoU gain of $+6.77\%$ , indicating that their combination is better for prototype-level adaptation. + +Dual Attention vs. Cross Dual Attention. The dual attention (DA) approach proposed in SFDA [25] aligns the spatial and channel characteristics of features between the fake source and target data. In contrast, our cross dual attention (CDA) approach aligns the distribution between different + +![](images/58fb62606424e11248d1022b69f80d5a25d367c9d1832cfb58073bd549911574.jpg) +Figure 5. TSNE visualization of (a) Cityscapes-to-DensePASS and (b) SynPASS-to-DensePASS. + +
FoVw/o60°72°90°120°180°360°
mIoU38.6544.0344.1644.2844.0241.6540.31
Δ-+5.38+5.51+5.63+5.37+3.00+1.66
+ +projections of the same spherical data, specifically ERP and FFP, resulting in more robust and stable knowledge transfer. Moreover, in our SFDA, we obtain spatial and channel characteristics across features, whereas DA operates within features. We also evaluate DA on the C-to-D scenario, and our CDA achieves $44.24\%$ mIoU, while DA only reaches $41.53\%$ mIoU. This indicates the proposed CDA is better for SFUDA in panoramic semantic segmentation. + +Field-of-view of FFP. Most existing approaches for panoramic semantic segmentation, such as those proposed in [49, 50, 59], primarily focus on alleviating distortion by introducing distortion-aware components and distinct projection strategies. However, as discussed in Sec. 3.2, $360^{\circ}$ images contain more intricate semantic information and object correspondence than the pinhole images, resulting in an obvious semantic mismatch between domains. Therefore, we propose the Fixed FoV Pooling (FFP) strategy to address the semantic mismatch. Experimental results show that the fixed FoV is the most influential factor in FFP, with an FoV of $90^{\circ}$ achieving the best segmentation performance, as shown in Tab. 6, with a mIoU of $44.28\%$ . + +Ablation of Hyper-parameters. We now show the influence of hyperparameters $\gamma$ and $\lambda$ , which are the weights for the KL loss in CDAM and the MSE loss in PPAM, respectively. The experimental results are provided in Tab. 7. + +Fine-tuning the Source Model. As the pre-trained model + +Table 6. Ablation study of the FoV of our proposed FFP. + +
γ00.010.020.050.10.2
mIoU38.6542.0543.2443.2844.2443.07
Δ-+3.40+4.59+4.63+5.59+4.42
λ0506080100120
mIoU38.6543.1343.2245.3645.4245.34
Δ-+4.48+4.57+6.71+6.77+6.69
+ +Table 7. Ablation study of $\gamma$ and $\lambda$ . + +in the source (pinhole) domain is not an ideal model for the target (panoramic) image domain, we propose to fine-tune the source model with the loss function $\mathcal{L}_{sft}$ , as described in Sec. 3.2. Tab. 4 demonstrates the effectiveness of the proposed $\mathcal{L}_{sft}$ . When combined with the prototypical adaptation loss $\mathcal{L}_{ppa}$ , adding $\mathcal{L}_{sft}$ results in a $6.77\%$ mIoU gain compared with the source baseline of $38.65\%$ . We present the performance metrics derived solely from the loss $\mathcal{L}_{sft}$ of PPAM: C-2-D registers at $44.94\%$ while S-2-D records $36.74\%$ . These results underscore the efficacy of $\mathcal{L}_{sft}$ integrated within our PPAM module. Concerning transfer-ability, our $\mathcal{L}_{sft}$ exhibits compatibility with various projection methods, e.g., cube map. At its core, our fine-tuning loss seeks to align all projection images originating from the same panoramic source, irrespective of the employed projection technique. This intrinsic adaptability facilitates the application of $\mathcal{L}_{sft}$ across diverse projections. More results refer to the supplementary material. + +# 6. Conclusion + +In this paper, we investigated a new problem of achieving SFUDA for panoramic semantic segmentation. To this end, we proposed an end-to-end SFUDA framework to address the domain shifts, including semantic mismatch, distortion, and style discrepancies, between pinhole and panoramic domains. Experiments on both real-world and synthetic benchmarks show that our proposed framework outperforms prior approaches and is on par with the methods using source data. + +Limitation and future work. One limitation of our proposed framework is the computational cost brought by the tangent projection during training, and there is still room for improvements in segmentation performance. However, components in our approach such as panoramic prototypes and fixed FoV projection have significant implications for the $360^{\circ}$ vision, especially for the panoramic semantic segmentation. In the future, we plan to utilize the large language models (LLMs) and Multi-modal large language models (MLLMs) to alleviate the domain gaps, such as the semantic mismatches between pinhole and panoramic images. + +Acknowledgement This paper is supported by the National Natural Science Foundation of China (NSF) under Grant No. NSFC22FYT45 and the Guangzhou City, University and Enterprise Joint Fund under Grant No.SL2022A03J01278. + +# References + +[1] Hao Ai, Zidong Cao, Jinjing Zhu, Haotian Bai, Yucheng Chen, and Ling Wang. Deep learning for omnidirectional vision: A survey and new perspectives. arXiv preprint arXiv:2205.10468, 2022. 1 +[2] Nikita Araslanov and Stefan Roth. Self-supervised augmentation consistency for adapting semantic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15384-15394, 2021. 2 +[3] Iro Armeni, Sasha Sax, Amir R Zamir, and Silvio Savarese. Joint 2d-3d-semantic data for indoor scene understanding. arXiv preprint arXiv:1702.01105, 2017. 6, 7 +[4] Mathilde Bateson, Hoel Kervadec, Jose Dolz, Herve Lombaert, and Ismail Ben Ayed. Source-free domain adaptation for image segmentation. Medical Image Analysis, 82:102617, 2022. 2 +[5] Chaoqi Chen, Weiping Xie, Tingyang Xu, Wenbing Huang, Yu Rong, Xinghao Ding, Yue Huang, and Junzhou Huang. Progressive feature alignment for unsupervised domain adaptation. 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 627-636, 2019. 2 +[6] Jialei Chen, Daisuke Deguchi, Chenkai Zhang, Xu Zheng, and Hiroshi Murase. Frozen is better than learning: A new design of prototype-based classifier for semantic segmentation. Available at SSRN 4617170. 2 +[7] Jialei Chen, Chong Fu, Haoyu Xie, Xu Zheng, Rong Geng, and Chiu-Wing Sham. Uncertainty teacher with dense focal loss for semi-supervised medical image segmentation. Computers in Biology and Medicine, 149:106034, 2022. +[8] Jialei Chen, Daisuke Deguchi, Chenkai Zhang, Xu Zheng, and Hiroshi Murase. Clip is also a good teacher: A new learning framework for inductive zero-shot semantic segmentation. arXiv preprint arXiv:2310.02296, 2023. +[9] Minghao Chen, Hongyang Xue, and Deng Cai. Domain adaptation for semantic segmentation with maximum squares loss. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2090-2099, 2019. 2 +[10] Jaehoon Choi, Taekyung Kim, and Changick Kim. Self-ensembling with gan-based data augmentation for domain adaptation in semantic segmentation. 2019 IEEE/CVF International Conference on Computer Vision (ICCV), pages 6829–6839, 2019. 2 +[11] Marius Cordts, Mohamed Omran, Sebastian Ramos, Timo Rehfeld, Markus Enzweiler, Rodrigo Benenson, Uwe Franke, Stefan Roth, and Bernt Schiele. The cityscapes dataset for semantic urban scene understanding. In Proc. of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 6 +[12] Marc Eder, Mykhailo Shvets, John Lim, and Jan-Michael Frahm. Tangent images for mitigating spherical distortion. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12426-12434, 2020. 3 +[13] Francois Fleuret et al. Uncertainty reduction for model adaptation in semantic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9613-9623, 2021. 2 + +[14] Xiaoqing Guo, Jie Liu, Tongliang Liu, and Yixuan Yuan. Simt: Handling open-set noise for domain adaptive semantic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7032-7041, 2022. 2, 5 +[15] Judy Hoffman, Dequan Wang, Fisher Yu, and Trevor Darrell. Fcts in the wild: Pixel-level adversarial and constraint-based adaptation. ArXiv, abs/1612.02649, 2016. 2 +[16] Judy Hoffman, Eric Tzeng, Taesung Park, Jun-Yan Zhu, Phillip Isola, Kate Saenko, Alexei A. Efros, and Trevor Darrell. Cycada: Cycle-consistent adversarial domain adaptation. In ICML, 2018. 2 +[17] Lukas Hoyer, Dengxin Dai, and Luc Van Gool. Daformer: Improving network architectures and training strategies for domain-adaptive semantic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9924-9935, 2022. 2, 6 +[18] Jiaxing Huang, Dayan Guan, Aoran Xiao, and Shijian Lu. Model adaptation: Historical contrastive learning for unsupervised domain adaptation without source data. Advances in Neural Information Processing Systems, 34:3635-3649, 2021. 2, 5 +[19] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. arXiv preprint arXiv:2304.02643, 2023. 1 +[20] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. arXiv preprint arXiv:2304.02643, 2023. 1 +[21] Jogendra Nath Kundu, Akshay Kulkarni, Amit Singh, Varun Jampani, and R Venkatesh Babu. Generalize then adapt: Source-free domain adaptive semantic segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7046-7056, 2021. 2, 5 +[22] Yunsheng Li, Lu Yuan, and Nuno Vasconcelos. Bidirectional learning for domain adaptation of semantic segmentation. 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 6929-6938, 2019. 2 +[23] Yuyan Li, Yuliang Guo, Zhixin Yan, Xinyu Huang, Ye Duan, and Liu Ren. Omnifusion: 360 monocular depth estimation via geometry-aware fusion. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2801-2810, 2022. 4 +[24] Mengyi Liu, Shuhui Wang, Yulan Guo, Yuan He, and Hui Xue. Pano-sfmlearner: Self-supervised multi-task learning of depth and semantics in panoramic videos. IEEE Signal Processing Letters, 28:832-836, 2021. 2 +[25] Yang Liu, Wei Zhang, and Jun Wang. Source-free domain adaptation for semantic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1215-1224, 2021. 2, 3, 5, 6, 7 +[26] Yawei Luo, Liang Zheng, Tao Guan, Junqing Yu, and Yi Yang. Taking a closer look at domain shift: Category-level adversaries for semantics consistent domain adaptation. 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2502-2511, 2019. 2 + +[27] Chaoxiang Ma, Jiaming Zhang, Kailun Yang, Alina Roitberg, and Rainer Stiefelhagen. Densepass: Dense panoramic semantic segmentation via unsupervised domain adaptation with attention-augmented context exchange. In 2021 IEEE International Intelligent Transportation Systems Conference (ITSC), pages 2766-2772. IEEE, 2021. 6 +[28] Luke Melas-Kyriazi and Arjun K. Manrai. Pixmatch: Unsupervised domain adaptation via pixelwise consistency training. 2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 12430-12440, 2021. 2 +[29] Zak Murez, Soheil Kolouri, David J. Kriegman, Ravi Ramamoorthi, and Kyungnam Kim. Image to image translation for domain adaptation. 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4500-4509, 2018. 2 +[30] Fei Pan, Inkyu Shin, Francois Rameau, Seokju Lee, and In So Kweon. Unsupervised intra-domain adaptation for semantic segmentation through self-supervision. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3764-3773, 2020. 2 +[31] Swami Sankaranarayanan, Yogesh Balaji, Arpit Jain, Ser-Nam Lim, and Rama Chellappa. Learning from synthetic data: Addressing domain shift for semantic segmentation. 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3752-3761, 2018. 2 +[32] Weifa Shen, Qixiong Wang, Hongxiang Jiang, Sen Li, and Jihao Yin. Unsupervised domain adaptation for semantic segmentation via self-supervision. In 2021 IEEE International Geoscience and Remote Sensing Symposium IGARSS, pages 2747-2750. IEEE, 2021. 2 +[33] Serban Stan and Mohammad Rostami. Unsupervised model adaptation for continual semantic segmentation. In Proceedings of the AAAI conference on artificial intelligence, pages 2593-2601, 2021. 2 +[34] Yi-Hsuan Tsai, Wei-Chih Hung, Samuel Schulter, Kihyuk Sohn, Ming-Hsuan Yang, and Manmohan Chandraker. Learning to adapt structured output space for semantic segmentation. 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7472-7481, 2018. 2 +[35] Laurens Van der Maaten and Geoffrey Hinton. Visualizing data using t-sne. Journal of machine learning research, 9(11), 2008. 7 +[36] Tuan-Hung Vu, Himalaya Jain, Maxime Bucher, Matthieu Cord, and Patrick Pérez. Advent: Adversarial entropy minimization for domain adaptation in semantic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2517-2526, 2019. 2 +[37] Tuan-Hung Vu, Himalaya Jain, Maxime Bucher, Matthieu Cord, and Patrick Pérez. Dada: Depth-aware domain adaptation in semantic segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7364-7373, 2019. 2 +[38] Qin Wang, Dengxin Dai, Lukas Hoyer, Olga Fink, and Luc Van Gool. Domain adaptive semantic segmentation with self-supervised depth estimation. 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pages 8495-8505, 2021. 2 + +[39] Wenhai Wang, Enze Xie, Xiang Li, Deng-Ping Fan, Kaitao Song, Ding Liang, Tong Lu, Ping Luo, and Ling Shao. Pyramid vision transformer: A versatile backbone for dense prediction without convolutions. In Proceedings of the IEEE/CVF international conference on computer vision, pages 568-578, 2021. 5 +[40] Haoyu Xie, Chong Fu, Xu Zheng, Yu Zheng, Chiu-Wing Sham, and Xingwei Wang. Adversarial co-training for semantic segmentation over medical images. Computers in biology and medicine, 157:106736, 2023. 2 +[41] Cheng-Yu Yang, Yuan-Jhe Kuo, and Chiou-Ting Hsu. Source free domain adaptation for semantic segmentation via distribution transfer and adaptive class-balanced self-training. In 2022 IEEE International Conference on Multimedia and Expo (ICME), pages 1-6. IEEE, 2022. 2, 3, 5, 6, 7 +[42] Kailun Yang, Xinxin Hu, Luis M Bergasa, Eduardo Romero, and Kaiwei Wang. Pass: Panoramic annular semantic segmentation. IEEE Transactions on Intelligent Transportation Systems, 21(10):4171-4185, 2019. 1 +[43] Kailun Yang, Xinxin Hu, Yicheng Fang, Kaiwei Wang, and Rainer Stiefelhagen. Omnisupervised omnidirectional semantic segmentation. IEEE Transactions on Intelligent Transportation Systems, 2020. 1, 7 +[44] Mucong Ye, Jing Zhang, Jinpeng Ouyang, and Ding Yuan. Source data-free unsupervised domain adaptation for semantic segmentation. In Proceedings of the 29th ACM International Conference on Multimedia, pages 2233-2242, 2021. 2, 3 +[45] Hao-Wei Yeh, Baoyao Yang, Pong C Yuen, and Tatsuya Harada. Sofa: Source-data-free feature alignment for unsupervised domain adaptation. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 474–483, 2021. 2 +[46] Xiangyu Yue, Zangwei Zheng, Shanghang Zhang, Yang Gao, Trevor Darrell, Kurt Keutzer, and Alberto Sangiovanni Vincentelli. Prototypical cross-domain self-supervised learning for few-shot unsupervised domain adaptation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13834-13844, 2021. 1, 7 +[47] Cheng Zhang, Zhaopeng Cui, Cai Chen, Shuaicheng Liu, Bing Zeng, Hujun Bao, and Yinda Zhang. Deeppanoocontext: Panoramic 3d scene understanding with holistic scene context graph and relation-based optimization. 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pages 12612-12621, 2021. 2 +[48] Jiaming Zhang, Chaoxiang Ma, Kailun Yang, Alina Roitberg, Kunyu Peng, and Rainer Stiefelhagen. Transfer beyond the field of view: Dense panoramic semantic segmentation via unsupervised domain adaptation. IEEE Transactions on Intelligent Transportation Systems, 2021. 1, 7 +[49] Jiaming Zhang, Kailun Yang, Chaoxiang Ma, Simon Reiβ, Kunyu Peng, and Rainer Stiefelhagen. Bending reality: Distortion-aware transformers for adapting to panoramic semantic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16917-16927, 2022. 1, 2, 3, 4, 6, 7, 8 +[50] Jiaming Zhang, Kailun Yang, Hao Shi, Simon Reiβ, Kunyu Peng, Chaoxiang Ma, Haodong Fu, Kaiwei Wang, and Rainer + +Stiefelhagen. Behind every domain there is a shift: Adapting distortion-aware vision transformers for panoramic semantic segmentation. arXiv preprint arXiv:2207.11860, 2022. 1, 2, 4, 6, 7, 8 +[51] Pan Zhang, Bo Zhang, Ting Zhang, Dong Chen, Yong Wang, and Fang Wen. Prototypical pseudo label denoising and target structure learning for domain adaptive semantic segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 12414-12424, 2021. 2, 5 +[52] Qiming Zhang, Jing Zhang, Wei Liu, and Dacheng Tao. Category anchor-guided unsupervised domain adaptation for semantic segmentation. Advances in neural information processing systems, 32, 2019. 2 +[53] Yang Zhang, Philip David, and Boqing Gong. Curriculum domain adaptation for semantic segmentation of urban scenes. 2017 IEEE International Conference on Computer Vision (ICCV), pages 2039-2049, 2017. 2 +[54] Yuyang Zhao, Zhun Zhong, Zhiming Luo, Gim Hee Lee, and Nicu Sebe. Source-free open compound domain adaptation in semantic segmentation. IEEE Transactions on Circuits and Systems for Video Technology, 32(10):7019-7032, 2022. 2 +[55] Xu Zheng, Chong Fu, Haoyu Xie, Jialei Chen, Xingwei Wang, and Chiu-Wing Sham. Uncertainty-aware deep co-training for semi-supervised medical image segmentation. Computers in Biology and Medicine, 149:106051, 2022. 2 +[56] Xu Zheng, Yunhao Luo, Hao Wang, Chong Fu, and Lin Wang. Transformer-cnn cohort: Semi-supervised semantic segmentation by the best of both students. arXiv preprint arXiv:2209.02178, 2022. +[57] Xu Zheng, Yunhao Luo, Pengyuan Zhou, and Lin Wang. Distilling efficient vision transformers from cnns for semantic segmentation. arXiv preprint arXiv:2310.07265, 2023. 2 +[58] Xu Zheng, Tianbo Pan, Yunhao Luo, and Lin Wang. Look at the neighbor: Distortion-aware unsupervised domain adaptation for panoramic semantic segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 18687-18698, 2023. 2, 6 +[59] Xu Zheng, Jinjing Zhu, Yexin Liu, Zidong Cao, Chong Fu, and Lin Wang. Both style and distortion matter: Dual-path unsupervised domain adaptation for panoramic semantic segmentation. arXiv preprint arXiv:2303.14360, 2023. 1, 2, 4, 6, 8 +[60] Jinjing Zhu, Yunhao Luo, Xu Zheng, Hao Wang, and Lin Wang. A good student is cooperative and reliable: Cnn-transformer collaborative learning for semantic segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11720-11730, 2023. 2 +[61] Yang Zou, Zhiding Yu, BVK Kumar, and Jinsong Wang. Unsupervised domain adaptation for semantic segmentation via class-balanced self-training. In Proceedings of the European conference on computer vision (ECCV), pages 289-305, 2018. 2 \ No newline at end of file diff --git a/2024/Semantics Distortion and Style Matter_ Towards Source-free UDA for Panoramic Segmentation/images.zip b/2024/Semantics Distortion and Style Matter_ Towards Source-free UDA for Panoramic Segmentation/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..d7c65331d642395806eb3074f53bb42416008648 --- /dev/null +++ b/2024/Semantics Distortion and Style Matter_ Towards Source-free UDA for Panoramic Segmentation/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7da40a601d114291ebb4bdcdda05a0761658baf85c02827d69b14acddd0d014e +size 783368 diff --git a/2024/Semantics Distortion and Style Matter_ Towards Source-free UDA for Panoramic Segmentation/layout.json b/2024/Semantics Distortion and Style Matter_ Towards Source-free UDA for Panoramic Segmentation/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..7a7e31065d9caa4ee6c90e8c19d2b99cb678137b --- /dev/null +++ b/2024/Semantics Distortion and Style Matter_ Towards Source-free UDA for Panoramic Segmentation/layout.json @@ -0,0 +1,9646 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 47, + 102, + 547, + 140 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 102, + 547, + 140 + ], + "spans": [ + { + "bbox": [ + 47, + 102, + 547, + 140 + ], + "type": "text", + "content": "Semantics, Distortion, and Style Matter: Towards Source-free UDA for Panoramic Segmentation" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 160, + 539, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 160, + 539, + 217 + ], + "spans": [ + { + "bbox": [ + 55, + 160, + 539, + 217 + ], + "type": "text", + "content": "Xu Zheng" + }, + { + "bbox": [ + 55, + 160, + 539, + 217 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 55, + 160, + 539, + 217 + ], + "type": "text", + "content": " Pengyuan Zhou" + }, + { + "bbox": [ + 55, + 160, + 539, + 217 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 55, + 160, + 539, + 217 + ], + "type": "text", + "content": " Athanasios V. Vasilakos" + }, + { + "bbox": [ + 55, + 160, + 539, + 217 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 55, + 160, + 539, + 217 + ], + "type": "text", + "content": " Lin Wang" + }, + { + "bbox": [ + 55, + 160, + 539, + 217 + ], + "type": "inline_equation", + "content": "^{1,2*}" + }, + { + "bbox": [ + 55, + 160, + 539, + 217 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 55, + 160, + 539, + 217 + ], + "type": "text", + "content": "AI Thrust, HKUST(GZ) " + }, + { + "bbox": [ + 55, + 160, + 539, + 217 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 55, + 160, + 539, + 217 + ], + "type": "text", + "content": "Dept. of CSE, HKUST " + }, + { + "bbox": [ + 55, + 160, + 539, + 217 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 55, + 160, + 539, + 217 + ], + "type": "text", + "content": "Aarhus University " + }, + { + "bbox": [ + 55, + 160, + 539, + 217 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 55, + 160, + 539, + 217 + ], + "type": "text", + "content": "University of Agder zhengxu128@gmail.com, pengyuan.zhou@ece.au.dk, th.vasilakos@gmail.com, linwang@ust.hk Project Page: https://vlislab22.github.io/360SFUDA/" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 143, + 244, + 192, + 258 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 244, + 192, + 258 + ], + "spans": [ + { + "bbox": [ + 143, + 244, + 192, + 258 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 270, + 290, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 270, + 290, + 594 + ], + "spans": [ + { + "bbox": [ + 46, + 270, + 290, + 594 + ], + "type": "text", + "content": "This paper addresses an interesting yet challenging problem—source-free unsupervised domain adaptation (SFUDA) for pinhole-to-panoramic semantic segmentation—given only a pinhole image-trained model (i.e., source) and unlabeled panoramic images (i.e., target). Tackling this problem is nontrivial due to the semantic mismatches, style discrepancies, and inevitable distortion of panoramic images. To this end, we propose a novel method that utilizes Tangent Projection (TP) as it has less distortion and meanwhile slits the equirectangular projection (ERP) with a fixed FoV to mimic the pinhole images. Both projections are shown effective in extracting knowledge from the source model. However, the distinct projection discrepancies between source and target domains impede the direct knowledge transfer; thus, we propose a panoramic prototype adaptation module (PPAM) to integrate panoramic prototypes from the extracted knowledge for adaptation. We then impose the loss constraints on both predictions and prototypes and propose a cross-dual attention module (CDAM) at the feature level to better align the spatial and channel characteristics across the domains and projections. Both knowledge extraction and transfer processes are synchronously updated to reach the best performance. Extensive experiments on the synthetic and real-world benchmarks, including outdoor and indoor scenarios, demonstrate that our method achieves significantly better performance than prior SFUDA methods for pinhole-to-panoramic adaptation." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 616, + 128, + 628 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 616, + 128, + 628 + ], + "spans": [ + { + "bbox": [ + 47, + 616, + 128, + 628 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 635, + 288, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 635, + 288, + 696 + ], + "spans": [ + { + "bbox": [ + 46, + 635, + 288, + 696 + ], + "type": "text", + "content": "The comprehensive scene perception abilities of " + }, + { + "bbox": [ + 46, + 635, + 288, + 696 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 635, + 288, + 696 + ], + "type": "text", + "content": " cameras have made them highly popular for applications, such as autonomous driving [1]. In contrast to pinhole cameras that capture 2D planer images with a limited field-of-view (FoV), " + }, + { + "bbox": [ + 46, + 635, + 288, + 696 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 635, + 288, + 696 + ], + "type": "text", + "content": " cameras offer a much wider FoV of" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 310, + 245, + 542, + 348 + ], + "blocks": [ + { + "bbox": [ + 310, + 245, + 542, + 348 + ], + "lines": [ + { + "bbox": [ + 310, + 245, + 542, + 348 + ], + "spans": [ + { + "bbox": [ + 310, + 245, + 542, + 348 + ], + "type": "image", + "image_path": "e7dab01ecd680f1cef9b78e4cc3b1aeab906a9ee8fdb9476ed363efce7353ec9.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 362, + 546, + 385 + ], + "lines": [ + { + "bbox": [ + 305, + 362, + 546, + 385 + ], + "spans": [ + { + "bbox": [ + 305, + 362, + 546, + 385 + ], + "type": "text", + "content": "Figure 1. We address a new problem of achieving source-free pinhole-to-panoramic adaptation for segmentation." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 407, + 547, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 407, + 547, + 443 + ], + "spans": [ + { + "bbox": [ + 304, + 407, + 547, + 443 + ], + "type": "inline_equation", + "content": "360^{\\circ} \\times 180^{\\circ}" + }, + { + "bbox": [ + 304, + 407, + 547, + 443 + ], + "type": "text", + "content": ". As a result, research on panoramic semantic segmentation [42, 43, 46, 48, 49] has been actively explored to achieve dense scene understanding for intelligent systems." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 444, + 548, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 444, + 548, + 685 + ], + "spans": [ + { + "bbox": [ + 304, + 444, + 548, + 685 + ], + "type": "text", + "content": "Generally, the spherical data captured by the " + }, + { + "bbox": [ + 304, + 444, + 548, + 685 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 304, + 444, + 548, + 685 + ], + "type": "text", + "content": " cameras is always projected into 2D planar representations, e.g., Equirectangular Projection (ERP), to be aligned with the existing imaging pipeline [1] while preserving the omnidirectional information1. However, ERP suffers from the inevitable distortion and object deformation due to the nonuniformly distributed pixels [59]. Meanwhile, learning effective panoramic segmentation models is often impeded by the lack of large precisely labeled datasets due to the difficulty of annotation. For these reasons, some unsupervised domain adaptation (UDA) methods [49, 50, 59] have been proposed to transfer the knowledge from the pinhole image domain to the panoramic image domain. In some crucial application scenarios, e.g., autonomous driving, source datasets are not always accessible due to privacy and commercial issues, such as data portability and transmission costs. One typical example is the recent large model, SAM [19], which brings significant progress in instance segmentation for pinhole images; however, the source datasets are too large (10TB) to be reused in end-tasks, such as [20]." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 306, + 693, + 547, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 693, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 693, + 547, + 713 + ], + "type": "text", + "content": "1In this paper, omnidirectional and panoramic images are interchangeably used, and ERP images often indicate panoramic images." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 58, + 702, + 137, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 702, + 137, + 712 + ], + "spans": [ + { + "bbox": [ + 58, + 702, + 137, + 712 + ], + "type": "text", + "content": "*Corresponding author." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "27885" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 240 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 240 + ], + "type": "text", + "content": "Motivation: In this paper, we probe an interesting yet challenging problem: source-free UDA (SFUDA) for panoramic segmentation, in which only the source model (pretrained with pinhole images) and unlabeled panoramic images are available. As shown in Fig. 1 (a), different from existing SFUDA methods, e.g., [25, 41, 44] for the pinhole-to-pinhole image adaptation, transferring knowledge from the pinhole-to-panoramic image domain is hampered by: 1) semantic mismatch caused by the different FoV between the pinhole and " + }, + { + "bbox": [ + 46, + 72, + 289, + 240 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 72, + 289, + 240 + ], + "type": "text", + "content": " cameras, i.e., " + }, + { + "bbox": [ + 46, + 72, + 289, + 240 + ], + "type": "inline_equation", + "content": "70^{\\circ}" + }, + { + "bbox": [ + 46, + 72, + 289, + 240 + ], + "type": "text", + "content": " vs. " + }, + { + "bbox": [ + 46, + 72, + 289, + 240 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 72, + 289, + 240 + ], + "type": "text", + "content": "; 2) inevitable distortion of the ERP; 3) style discrepancies caused by the distinct camera sensors and captured scenes. In Tab. 2, we show that naively adapting existing SFUDA methods to our problem leads to a limited performance boost." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 243, + 289, + 554 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 243, + 289, + 554 + ], + "spans": [ + { + "bbox": [ + 46, + 243, + 289, + 554 + ], + "type": "text", + "content": "Contributions: To this end, we propose a novel SFUDA method that effectively extracts knowledge from the source model with only panoramic images and transfers the knowledge to the target panoramic domain. Our key idea is to leverage the multi-projection versatility of " + }, + { + "bbox": [ + 46, + 243, + 289, + 554 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 243, + 289, + 554 + ], + "type": "text", + "content": " data for efficient domain knowledge transfer. Our method enjoys two key technical contributions. Specifically, we use Tangent Projection (TP) and divide the ERP images into patches with a fixed FoV, dubbed Fixed FoV Projection (FFP), to extract knowledge from the source model with less distortion and similar FoV to the pinhole images. Both projections make it possible to effectively extract knowledge from the source model. However, directly transferring the extracted knowledge to the target model is hardly approachable due to the distinct projection gaps. Thus, we propose a panoramic prototype adaptation module (PPAM) to obtain class-wise semantic prototypes from the features and predictions of the source model with TP and FFP images (Sec. 3.2). Then, these prototypes are integrated together to obtain the global panoramic prototypes for knowledge adaptation, which is updated across the adaptation procedure. Moreover, our proposed PPAM also fine-tunes the source model to promote better knowledge extraction using prototypes extracted from FFP images. Aligning the prototypes from each FFP image enables the source model to become more aware of distortion and semantics across the FoV." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 558, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 558, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 558, + 289, + 715 + ], + "type": "text", + "content": "We initially apply both prediction-level and prototype-level loss constraints to facilitate knowledge transfer to the unlabeled target panoramic domain. Concretely, the FFP predictions of the source model are rebuilt together to provide a pseudo-supervision signal for the target model. The prototype-level loss constraint is performed between the panoramic prototypes from PPAM and the prototypes from the target model's features and predictions on the ERP images. Moreover, knowledge from the source model is not limited to predictions and prototypes, high-level features also contain crucial image characteristics that can enhance the performance of the target model. Consequently, we propose a Cross-Dual Attention Module (CDAM) that aligns" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 304, + 72, + 547, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 547, + 156 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 547, + 156 + ], + "type": "text", + "content": "spatial and channel characteristics between domains to fully utilize the knowledge from the source model and address the style discrepancy problem (Sec. 3.3). Specifically, CDAM reconstructs the source model features from FFP images to provide a panoramic perception of the surrounding environment and aligns them with the ERP features from the target model for effective knowledge transfer." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 304, + 157, + 548, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 157, + 548, + 300 + ], + "spans": [ + { + "bbox": [ + 304, + 157, + 548, + 300 + ], + "type": "text", + "content": "We conduct extensive experiments on both synthetic and real-world benchmarks, including outdoor and indoor scenarios. As no directly comparable works exist, we adapt the state-of-the-art (SoTA) SFUDA methods [14, 18, 21, 25, 41, 51] – designed for pinhole-to-pinhole image adaptation – to our problem in addressing the panoramic semantic segmentation. The results show that our framework significantly outperforms these methods by large margins of " + }, + { + "bbox": [ + 304, + 157, + 548, + 300 + ], + "type": "inline_equation", + "content": "+6.37\\%" + }, + { + "bbox": [ + 304, + 157, + 548, + 300 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 157, + 548, + 300 + ], + "type": "inline_equation", + "content": "+11.47\\%" + }, + { + "bbox": [ + 304, + 157, + 548, + 300 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 304, + 157, + 548, + 300 + ], + "type": "inline_equation", + "content": "+10.99\\%" + }, + { + "bbox": [ + 304, + 157, + 548, + 300 + ], + "type": "text", + "content": " on three benchmarks. We also evaluate our method against UDA methods [49, 50, 58, 59], using the source pinhole image, the results demonstrate its comparable performance." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 306, + 312, + 392, + 324 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 312, + 392, + 324 + ], + "spans": [ + { + "bbox": [ + 306, + 312, + 392, + 324 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 306, + 332, + 492, + 345 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 332, + 492, + 345 + ], + "spans": [ + { + "bbox": [ + 306, + 332, + 492, + 345 + ], + "type": "text", + "content": "2.1. Source-free UDA for Segmentation" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 350, + 548, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 350, + 548, + 615 + ], + "spans": [ + { + "bbox": [ + 304, + 350, + 548, + 615 + ], + "type": "text", + "content": "UDA aims to mitigate the impact of domain shift caused by data distribution discrepancies in downstream computer vision tasks, such as semantic segmentation [2, 6-9, 13, 17, 30, 32, 33, 36, 37, 40, 52, 55-57, 60, 61]. However, the source domain data may not always be accessible due to the privacy protection and data storage concerns. Intuitively, source-free UDA (SFUDA) [18, 21, 45] methods are proposed to adapt source models to a target domain without access to the source data. Existing SFUDA methods for semantic segmentation primarily focus on source data estimation [41, 44] or self-training [4, 21, 25, 54] for pinhole images. In this paper, we make the first attempt at achieving SFUDA from the pinhole image domain to the panoramic domain. This task is nontrivial to be tackled due to the semantic mismatches, style discrepancies, and inevitable distortion of panoramic images. Unlike these methods that focus on the source domain data estimation [25, 44], we propose a novel SFUDA method that effectively extracts knowledge from the source model with only panoramic images and transfers the knowledge to the target panoramic image domain. Experiments also show that naively applying these methods leads to less optimal performance (See Tab. 2)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 623, + 535, + 635 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 623, + 535, + 635 + ], + "spans": [ + { + "bbox": [ + 306, + 623, + 535, + 635 + ], + "type": "text", + "content": "2.2. UDA for Panoramic Semantic Segmentation" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 641, + 548, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 641, + 548, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 641, + 548, + 715 + ], + "type": "text", + "content": "It can be classified into three types, including adversarial training [10, 16, 31, 34, 59], pseudo labeling [24, 38, 47, 53] and prototypical adaptation methods [49, 50]. Specifically, the first line of research applies alignment approaches to capture the domain invariant characteristics of images [16, 22, 29], feature [5, 15, 16, 59] and predictions [26, 28]." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "27886" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 75, + 74, + 519, + 290 + ], + "blocks": [ + { + "bbox": [ + 75, + 74, + 519, + 290 + ], + "lines": [ + { + "bbox": [ + 75, + 74, + 519, + 290 + ], + "spans": [ + { + "bbox": [ + 75, + 74, + 519, + 290 + ], + "type": "image", + "image_path": "82ebea8a067ecf92d4b0722afe7352152e23bb62c5d22bd703561ee158a75ed7.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 127, + 301, + 465, + 314 + ], + "lines": [ + { + "bbox": [ + 127, + 301, + 465, + 314 + ], + "spans": [ + { + "bbox": [ + 127, + 301, + 465, + 314 + ], + "type": "text", + "content": "Figure 2. Overall framework of our proposed SFUDA for panoramic semantic segmentation." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 45, + 334, + 290, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 334, + 290, + 491 + ], + "spans": [ + { + "bbox": [ + 45, + 334, + 290, + 491 + ], + "type": "text", + "content": "The second type of methods generates pseudo labels for the target domain training. The last line of research, e.g., Mutual Prototype Adaption (MPA) [49], mutually aligns the high-level features with the prototypes between domain. However, these methods treat panoramic images as pinhole images when extracting prototypes, ignoring the intricate semantic, object correspondence, and distortion information brought by the panoramic FoV. We are the first to address the SFUDA problem for panoramic segmentation. Considering the distinct projection discrepancies between source and target domains, we propose a PPAM to integrate the global panoramic prototypes from the extracted knowledge for adaptation." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 504, + 130, + 519 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 504, + 130, + 519 + ], + "spans": [ + { + "bbox": [ + 46, + 504, + 130, + 519 + ], + "type": "text", + "content": "3. Methodology" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 525, + 115, + 537 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 525, + 115, + 537 + ], + "spans": [ + { + "bbox": [ + 46, + 525, + 115, + 537 + ], + "type": "text", + "content": "3.1. Overview" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 544, + 287, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 544, + 287, + 605 + ], + "spans": [ + { + "bbox": [ + 46, + 544, + 287, + 605 + ], + "type": "text", + "content": "The overall framework for panoramic segmentation is shown in Fig. 2. With only the source model " + }, + { + "bbox": [ + 46, + 544, + 287, + 605 + ], + "type": "inline_equation", + "content": "F_{S}" + }, + { + "bbox": [ + 46, + 544, + 287, + 605 + ], + "type": "text", + "content": " available and given the unlabeled panoramic image data " + }, + { + "bbox": [ + 46, + 544, + 287, + 605 + ], + "type": "inline_equation", + "content": "D_{T}" + }, + { + "bbox": [ + 46, + 544, + 287, + 605 + ], + "type": "text", + "content": ", we aim to train a target model " + }, + { + "bbox": [ + 46, + 544, + 287, + 605 + ], + "type": "inline_equation", + "content": "F_{T}" + }, + { + "bbox": [ + 46, + 544, + 287, + 605 + ], + "type": "text", + "content": " that adapts knowledge from " + }, + { + "bbox": [ + 46, + 544, + 287, + 605 + ], + "type": "inline_equation", + "content": "F_{S}" + }, + { + "bbox": [ + 46, + 544, + 287, + 605 + ], + "type": "text", + "content": " to the common " + }, + { + "bbox": [ + 46, + 544, + 287, + 605 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 46, + 544, + 287, + 605 + ], + "type": "text", + "content": " categories across both domains." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 605, + 290, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 605, + 290, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 605, + 290, + 715 + ], + "type": "text", + "content": "Unlike the pinhole image-to-image adaptation [25, 41, 44], pinhole-to-panoramic image domain adaptation is hampered by three key factors, specifically: semantic mismatch due to FoV variations " + }, + { + "bbox": [ + 46, + 605, + 290, + 715 + ], + "type": "inline_equation", + "content": "(70^{\\circ}" + }, + { + "bbox": [ + 46, + 605, + 290, + 715 + ], + "type": "text", + "content": " vs. " + }, + { + "bbox": [ + 46, + 605, + 290, + 715 + ], + "type": "inline_equation", + "content": "360^{\\circ})" + }, + { + "bbox": [ + 46, + 605, + 290, + 715 + ], + "type": "text", + "content": ", inevitable distortion in ERP, and ubiquitous style discrepancies in unsupervised domain adaptation (UDA) (refer to Fig.1 (a)). Therefore, naively applying existing SFUDA methods exhibits suboptimal segmentation performance (See Tab. 2), while UDA methods with source data, e.g., [25] for panoramic segmen" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 334, + 547, + 394 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 334, + 547, + 394 + ], + "spans": [ + { + "bbox": [ + 304, + 334, + 547, + 394 + ], + "type": "text", + "content": "tation do not account for the semantic mismatch between the pinhole and panoramic images. Intuitively, the key challenges are : 1) how to extract knowledge from the source model with only panoramic images and 2) how to transfer knowledge to the target panoramic image domain." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 395, + 545, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 395, + 545, + 418 + ], + "spans": [ + { + "bbox": [ + 304, + 395, + 545, + 418 + ], + "type": "text", + "content": "Our key idea is to leverage the multi-projection versatility of " + }, + { + "bbox": [ + 304, + 395, + 545, + 418 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 304, + 395, + 545, + 418 + ], + "type": "text", + "content": " data for efficient domain knowledge transfer." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 419, + 548, + 612 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 419, + 548, + 612 + ], + "spans": [ + { + "bbox": [ + 304, + 419, + 548, + 612 + ], + "type": "text", + "content": "Concretely, to address the first challenge (Sec. 3.2), we use the Tangent Projection (TP) which is characterized by a reduced distortion issue compared to the ERP images [12] to extract knowledge from the source model. Concurrently, ERP images are segmented into discrete patches, each possessing a constant FoV to mimic the pinhole images, dubbed Fixed FoV Projection (FFP). Both projections make it possible to effectively extract knowledge from the source model. The distinct projection formats make it impossible to directly transfer knowledge between domains, thus we propose a Panoramic Prototype Adaptation Module (PPAM) to obtain panoramic prototypes for adaptation. To address the second challenge (Sec. 3.3), we first impose prediction and prototype level loss constraints, and propose a Cross-Dual Attention Module (CDAM) at the feature level to transfer knowledge and further address the style discrepancies." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 621, + 434, + 634 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 621, + 434, + 634 + ], + "spans": [ + { + "bbox": [ + 306, + 621, + 434, + 634 + ], + "type": "text", + "content": "3.2. Knowledge Extraction" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 640, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 640, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 640, + 547, + 715 + ], + "type": "text", + "content": "As depicted in Fig. 2, given the target domain (i.e., panoramic domain) ERP images " + }, + { + "bbox": [ + 304, + 640, + 547, + 715 + ], + "type": "inline_equation", + "content": "D_{T} = \\{x_{T}|x_{T}\\in \\mathbf{R}^{H\\times W\\times 3}\\}" + }, + { + "bbox": [ + 304, + 640, + 547, + 715 + ], + "type": "text", + "content": ", we first project them into TP images " + }, + { + "bbox": [ + 304, + 640, + 547, + 715 + ], + "type": "inline_equation", + "content": "D_{T}^{t} = \\{x_{T}^{t}|x_{T}^{t}\\in \\mathbf{R}^{h\\times w\\times 3}\\}" + }, + { + "bbox": [ + 304, + 640, + 547, + 715 + ], + "type": "text", + "content": " and FFP images " + }, + { + "bbox": [ + 304, + 640, + 547, + 715 + ], + "type": "inline_equation", + "content": "D_{T}^{f} = \\{x_{T}^{f}|x_{T}^{f}\\in \\mathbf{R}^{H\\times W / 4\\times 3}\\}" + }, + { + "bbox": [ + 304, + 640, + 547, + 715 + ], + "type": "text", + "content": " for effectively extracting knowledge from the source model. Note that one ERP image corresponds to 18" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "27887" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 57, + 82, + 272, + 215 + ], + "blocks": [ + { + "bbox": [ + 57, + 82, + 272, + 215 + ], + "lines": [ + { + "bbox": [ + 57, + 82, + 272, + 215 + ], + "spans": [ + { + "bbox": [ + 57, + 82, + 272, + 215 + ], + "type": "image", + "image_path": "4e199840d916a08eeb2b3c2fd773d5f1503f1882600aa7a5c1b2cfea96baa60c.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 234, + 287, + 257 + ], + "lines": [ + { + "bbox": [ + 46, + 234, + 287, + 257 + ], + "spans": [ + { + "bbox": [ + 46, + 234, + 287, + 257 + ], + "type": "text", + "content": "Figure 3. Illustration of the prototype extraction (PE) in the panoramic prototype adaptation module (PPAM)." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 278, + 287, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 278, + 287, + 338 + ], + "spans": [ + { + "bbox": [ + 46, + 278, + 287, + 338 + ], + "type": "text", + "content": "TP images as [23, 59] and 4 FFP images with a fixed FoV of " + }, + { + "bbox": [ + 46, + 278, + 287, + 338 + ], + "type": "inline_equation", + "content": "90^{\\circ}" + }, + { + "bbox": [ + 46, + 278, + 287, + 338 + ], + "type": "text", + "content": " (See Sec. 5). To obtain the features and predictions from the source model for knowledge adaptation, the two types of projected images are first fed into the source model with batch sampling:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 79, + 339, + 287, + 354 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 339, + 287, + 354 + ], + "spans": [ + { + "bbox": [ + 79, + 339, + 287, + 354 + ], + "type": "interline_equation", + "content": "P ^ {p}, f ^ {p} = F _ {S} \\left(x _ {T} ^ {t}\\right), \\quad P ^ {f}, f ^ {f} = F _ {S} \\left(x _ {T} ^ {f}\\right), \\tag {1}", + "image_path": "c6fa106aa15bce043905ac183eae7eaa5038de5481b67d772a4a8b36ecc7555d.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 356, + 288, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 356, + 288, + 464 + ], + "spans": [ + { + "bbox": [ + 46, + 356, + 288, + 464 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 356, + 288, + 464 + ], + "type": "inline_equation", + "content": "f^p, f^f, P^p" + }, + { + "bbox": [ + 46, + 356, + 288, + 464 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 46, + 356, + 288, + 464 + ], + "type": "inline_equation", + "content": "P^f" + }, + { + "bbox": [ + 46, + 356, + 288, + 464 + ], + "type": "text", + "content": " are the source model features and predictions of the input TP and FFP images, respectively. For the target panoramic images, " + }, + { + "bbox": [ + 46, + 356, + 288, + 464 + ], + "type": "inline_equation", + "content": "x_T" + }, + { + "bbox": [ + 46, + 356, + 288, + 464 + ], + "type": "text", + "content": " is fed into " + }, + { + "bbox": [ + 46, + 356, + 288, + 464 + ], + "type": "inline_equation", + "content": "F_T" + }, + { + "bbox": [ + 46, + 356, + 288, + 464 + ], + "type": "text", + "content": " to obtain the target model features " + }, + { + "bbox": [ + 46, + 356, + 288, + 464 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 46, + 356, + 288, + 464 + ], + "type": "text", + "content": " and predictions " + }, + { + "bbox": [ + 46, + 356, + 288, + 464 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 46, + 356, + 288, + 464 + ], + "type": "text", + "content": " of the input batch of ERP images as " + }, + { + "bbox": [ + 46, + 356, + 288, + 464 + ], + "type": "inline_equation", + "content": "P, f = F_T(x_T)" + }, + { + "bbox": [ + 46, + 356, + 288, + 464 + ], + "type": "text", + "content": ". However, the distinct projection formats of the input data in the source and target models make it difficult to align their features directly, thus we propose a Panoramic Prototype Adaptation Module (PPAM) to obtain panoramic prototypes for adaptation." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 464, + 288, + 596 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 464, + 288, + 596 + ], + "spans": [ + { + "bbox": [ + 46, + 464, + 288, + 596 + ], + "type": "text", + "content": "Panoramic Prototype Adaptation Module (PPAM) Compared to prior UDA methods using prototypical adaptation, e.g., MPA [49, 50], our PPAM possesses three distinct characteristics: (a) class-wise prototypes are obtained from TP and FFP images to alleviate distortion and semantic mismatch problems; (b) global prototypes are iteratively updated with prototypes from two projections during the whole training procedure; (c) hard pseudo-labels are softened in the high-level feature space to obtain prototypes with different projection of panoramic images, indicating that the knowledge from the source model is fully utilized." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 597, + 288, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 597, + 288, + 620 + ], + "spans": [ + { + "bbox": [ + 47, + 597, + 288, + 620 + ], + "type": "text", + "content": "Specifically, we project the source model predictions " + }, + { + "bbox": [ + 47, + 597, + 288, + 620 + ], + "type": "inline_equation", + "content": "P^p" + }, + { + "bbox": [ + 47, + 597, + 288, + 620 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 47, + 597, + 288, + 620 + ], + "type": "inline_equation", + "content": "P^f" + }, + { + "bbox": [ + 47, + 597, + 288, + 620 + ], + "type": "text", + "content": " into pseudo labels:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 94, + 631, + 216, + 647 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 631, + 216, + 647 + ], + "spans": [ + { + "bbox": [ + 94, + 631, + 216, + 647 + ], + "type": "interline_equation", + "content": "\\hat {y} _ {(h, w, k)} ^ {p} = 1 _ {k \\dot {=} a r g m a x (P _ {h, w,:} ^ {p})},", + "image_path": "b098f9b70c4217c5de557d0b27832bbb98786cf0d540ecc7756199addd778478.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 94, + 647, + 287, + 668 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 647, + 287, + 668 + ], + "spans": [ + { + "bbox": [ + 94, + 647, + 287, + 668 + ], + "type": "interline_equation", + "content": "\\hat {y} _ {(H, W / 4, k)} ^ {f} = 1 _ {k \\div a r g m a x \\left(P _ {H, W / 4,:} ^ {f}\\right)}. \\tag {2}", + "image_path": "1111abfb025ef3a62f82ceb3f497d7167905d14e096e6a9b7adbe0e833ed9f81.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 677, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 287, + 714 + ], + "type": "text", + "content": "Here, " + }, + { + "bbox": [ + 46, + 677, + 287, + 714 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 46, + 677, + 287, + 714 + ], + "type": "text", + "content": " denotes the semantic category. Subsequently, we obtain the class-specific masked features by integrating the up-sampled features with the corresponding pseudo" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 71, + 547, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 71, + 547, + 137 + ], + "spans": [ + { + "bbox": [ + 304, + 71, + 547, + 137 + ], + "type": "text", + "content": "labels " + }, + { + "bbox": [ + 304, + 71, + 547, + 137 + ], + "type": "inline_equation", + "content": "\\hat{y}_{(h,w,k)}^p" + }, + { + "bbox": [ + 304, + 71, + 547, + 137 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 71, + 547, + 137 + ], + "type": "inline_equation", + "content": "\\hat{y}_{(H,W/4,k)}^f" + }, + { + "bbox": [ + 304, + 71, + 547, + 137 + ], + "type": "text", + "content": ". Notably, the prototypes " + }, + { + "bbox": [ + 304, + 71, + 547, + 137 + ], + "type": "inline_equation", + "content": "\\sum_{a=1}^{18} (\\tau_p^k)_a" + }, + { + "bbox": [ + 304, + 71, + 547, + 137 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 71, + 547, + 137 + ], + "type": "inline_equation", + "content": "\\sum_{b=1}^{4} (\\tau_f^k)_b" + }, + { + "bbox": [ + 304, + 71, + 547, + 137 + ], + "type": "text", + "content": " for TP and FFP images are obtained by masked average pooling (MAP) operation, as shown in Fig. 3. Within each projection, PPAM first integrates the prototypes:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 321, + 138, + 546, + 170 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 138, + 546, + 170 + ], + "spans": [ + { + "bbox": [ + 321, + 138, + 546, + 170 + ], + "type": "interline_equation", + "content": "\\tau_ {p} ^ {k} = a v g \\left(\\sum_ {a = 1} ^ {1 8} \\left(\\tau_ {p} ^ {k}\\right) _ {a}\\right), \\quad \\tau_ {f} ^ {k} = a v g \\left(\\sum_ {b = 1} ^ {4} \\left(\\tau_ {f} ^ {k}\\right) _ {b}\\right). \\tag {3}", + "image_path": "c01eba56cdc221e438f6cd50434deee3c3340ae92af6c1106d40ace0c5a5ac01.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 172, + 547, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 172, + 547, + 255 + ], + "spans": [ + { + "bbox": [ + 304, + 172, + 547, + 255 + ], + "type": "text", + "content": "As shown in Fig. 2, " + }, + { + "bbox": [ + 304, + 172, + 547, + 255 + ], + "type": "inline_equation", + "content": "\\tau_{p}^{k}" + }, + { + "bbox": [ + 304, + 172, + 547, + 255 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 172, + 547, + 255 + ], + "type": "inline_equation", + "content": "\\tau_{f}^{k}" + }, + { + "bbox": [ + 304, + 172, + 547, + 255 + ], + "type": "text", + "content": " are integrated together as " + }, + { + "bbox": [ + 304, + 172, + 547, + 255 + ], + "type": "inline_equation", + "content": "\\tau_{pf}^{k}" + }, + { + "bbox": [ + 304, + 172, + 547, + 255 + ], + "type": "text", + "content": " to preserve the less distortion characteristics of " + }, + { + "bbox": [ + 304, + 172, + 547, + 255 + ], + "type": "inline_equation", + "content": "\\tau_{p}^{k}" + }, + { + "bbox": [ + 304, + 172, + 547, + 255 + ], + "type": "text", + "content": " and the similar scale semantics of " + }, + { + "bbox": [ + 304, + 172, + 547, + 255 + ], + "type": "inline_equation", + "content": "\\tau_{f}^{k}" + }, + { + "bbox": [ + 304, + 172, + 547, + 255 + ], + "type": "text", + "content": ". The " + }, + { + "bbox": [ + 304, + 172, + 547, + 255 + ], + "type": "inline_equation", + "content": "\\tau_{pf}^{k}" + }, + { + "bbox": [ + 304, + 172, + 547, + 255 + ], + "type": "text", + "content": " is then used to update the panoramic global prototype " + }, + { + "bbox": [ + 304, + 172, + 547, + 255 + ], + "type": "inline_equation", + "content": "\\tau_{g}^{k}" + }, + { + "bbox": [ + 304, + 172, + 547, + 255 + ], + "type": "text", + "content": ", which is iteratively updated with " + }, + { + "bbox": [ + 304, + 172, + 547, + 255 + ], + "type": "inline_equation", + "content": "\\tau_{pf}^{k}" + }, + { + "bbox": [ + 304, + 172, + 547, + 255 + ], + "type": "text", + "content": ". To obtain more accurate and reliable prototypes, we update " + }, + { + "bbox": [ + 304, + 172, + 547, + 255 + ], + "type": "inline_equation", + "content": "\\tau_{g}^{k}" + }, + { + "bbox": [ + 304, + 172, + 547, + 255 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 172, + 547, + 255 + ], + "type": "inline_equation", + "content": "\\tau_{pf}^{k}" + }, + { + "bbox": [ + 304, + 172, + 547, + 255 + ], + "type": "text", + "content": " as follows:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 357, + 256, + 546, + 279 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 357, + 256, + 546, + 279 + ], + "spans": [ + { + "bbox": [ + 357, + 256, + 546, + 279 + ], + "type": "interline_equation", + "content": "\\tau_ {g} ^ {i} = \\frac {1}{i} \\left(\\tau_ {p f} ^ {k}\\right) ^ {i} + \\left(1 - \\frac {1}{i}\\right) \\left(\\tau_ {g} ^ {k}\\right) ^ {i - 1}, \\tag {4}", + "image_path": "64e9d8199d918ca87278baf2d562c4ae43a23998e248c00b0465c3ae7d5c911a.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 281, + 545, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 281, + 545, + 357 + ], + "spans": [ + { + "bbox": [ + 304, + 281, + 545, + 357 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 281, + 545, + 357 + ], + "type": "inline_equation", + "content": "(\\tau_{g}^{k})^{i}" + }, + { + "bbox": [ + 304, + 281, + 545, + 357 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 281, + 545, + 357 + ], + "type": "inline_equation", + "content": "(\\tau_{pf}^{k})^{i}" + }, + { + "bbox": [ + 304, + 281, + 545, + 357 + ], + "type": "text", + "content": " are the prototypes for category " + }, + { + "bbox": [ + 304, + 281, + 545, + 357 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 304, + 281, + 545, + 357 + ], + "type": "text", + "content": " in the " + }, + { + "bbox": [ + 304, + 281, + 545, + 357 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 304, + 281, + 545, + 357 + ], + "type": "text", + "content": "-th training epoch, " + }, + { + "bbox": [ + 304, + 281, + 545, + 357 + ], + "type": "inline_equation", + "content": "(\\tau_{g}^{k})^{i-1}" + }, + { + "bbox": [ + 304, + 281, + 545, + 357 + ], + "type": "text", + "content": " is the panoramic global prototype saved in the last training epoch, " + }, + { + "bbox": [ + 304, + 281, + 545, + 357 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 304, + 281, + 545, + 357 + ], + "type": "text", + "content": " is the current epoch number. The panoramic global prototype " + }, + { + "bbox": [ + 304, + 281, + 545, + 357 + ], + "type": "inline_equation", + "content": "\\tau_{g}^{k}" + }, + { + "bbox": [ + 304, + 281, + 545, + 357 + ], + "type": "text", + "content": " is then used to give supervision for the target prototype " + }, + { + "bbox": [ + 304, + 281, + 545, + 357 + ], + "type": "inline_equation", + "content": "\\tau_{t}^{k}" + }, + { + "bbox": [ + 304, + 281, + 545, + 357 + ], + "type": "text", + "content": " obtained from " + }, + { + "bbox": [ + 304, + 281, + 545, + 357 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 304, + 281, + 545, + 357 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 281, + 545, + 357 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 304, + 281, + 545, + 357 + ], + "type": "text", + "content": " with the same operations." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 358, + 546, + 561 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 358, + 546, + 561 + ], + "spans": [ + { + "bbox": [ + 304, + 358, + 546, + 561 + ], + "type": "text", + "content": "Besides extracting prototype knowledge from the source model, PPAM also fine-tunes the source model to improve the effectiveness of knowledge extraction. Specifically, since each ERP image can be projected to 4 FFP images, the source model's extracted features " + }, + { + "bbox": [ + 304, + 358, + 546, + 561 + ], + "type": "inline_equation", + "content": "f_{f}" + }, + { + "bbox": [ + 304, + 358, + 546, + 561 + ], + "type": "text", + "content": " have 4 pieces of FFP features. As the content of all the features is within the same ERP image, we propose to align the class-wise prototypes from each piece of the features in PPAM to enhance the model's performance. Concretely, the prototypes " + }, + { + "bbox": [ + 304, + 358, + 546, + 561 + ], + "type": "inline_equation", + "content": "\\sum_{\\alpha = 1}^{4}\\tau_{\\alpha}" + }, + { + "bbox": [ + 304, + 358, + 546, + 561 + ], + "type": "text", + "content": " of the four FFP features are obtained through the same operations with " + }, + { + "bbox": [ + 304, + 358, + 546, + 561 + ], + "type": "inline_equation", + "content": "\\tau_g^t" + }, + { + "bbox": [ + 304, + 358, + 546, + 561 + ], + "type": "text", + "content": ". Each FFP image captures a non-overlapping " + }, + { + "bbox": [ + 304, + 358, + 546, + 561 + ], + "type": "inline_equation", + "content": "90^\\circ" + }, + { + "bbox": [ + 304, + 358, + 546, + 561 + ], + "type": "text", + "content": " FoV, resulting in distinct distortions, and similar content in each FFP image. Aligning the prototypes from each FFP image enhances distortion-awareness ability in the source model and helps to explore complementary semantic content in each FFP image. The MSE loss is imposed between each two of the prototypes as follows:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 342, + 563, + 546, + 596 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 342, + 563, + 546, + 596 + ], + "spans": [ + { + "bbox": [ + 342, + 563, + 546, + 596 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {s f t} = \\sum_ {\\alpha \\neq \\beta} ^ {4} \\left\\{\\frac {1}{K} \\sum_ {k \\in K} \\left(\\left(\\tau_ {f} ^ {k}\\right) _ {\\alpha} - \\left(\\tau_ {f} ^ {k}\\right) _ {\\beta}\\right) ^ {2} \\right\\}. \\tag {5}", + "image_path": "8c3b7557d5e9dd6c6223e4fee1e52c5480d2accdcd308bf89831c15c243f3ff3.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 598, + 533, + 611 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 598, + 533, + 611 + ], + "spans": [ + { + "bbox": [ + 306, + 598, + 533, + 611 + ], + "type": "text", + "content": "Note that " + }, + { + "bbox": [ + 306, + 598, + 533, + 611 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{sft}" + }, + { + "bbox": [ + 306, + 598, + 533, + 611 + ], + "type": "text", + "content": " is used to fine-tune the source model " + }, + { + "bbox": [ + 306, + 598, + 533, + 611 + ], + "type": "inline_equation", + "content": "F_{S}" + }, + { + "bbox": [ + 306, + 598, + 533, + 611 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 306, + 621, + 436, + 635 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 621, + 436, + 635 + ], + "spans": [ + { + "bbox": [ + 306, + 621, + 436, + 635 + ], + "type": "text", + "content": "3.3. Knowledge Adaptation" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 642, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 642, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 642, + 547, + 713 + ], + "type": "text", + "content": "To adapt knowledge to the target domain, we impose the loss constraints on both predictions and prototypes and propose a cross-dual attention module (CDAM) at the feature level to better align the spatial and channel characteristics across the domains and projections. Specifically, the predictions of the FFP patch images are stitched to reconstruct an ERP image." + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "27888" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 50, + 70, + 541, + 228 + ], + "blocks": [ + { + "bbox": [ + 50, + 70, + 541, + 228 + ], + "lines": [ + { + "bbox": [ + 50, + 70, + 541, + 228 + ], + "spans": [ + { + "bbox": [ + 50, + 70, + 541, + 228 + ], + "type": "table", + "html": "
MethodSFmIoURoadS.W.Build.WallFencePoleTr.L.Tr.S.Veget.Terr.SkyPers.CarΔ
PVT [39] SSL38.7455.3936.8780.8419.7215.188.045.392.1772.9132.0190.8126.7657.40-
PVT [39] MPA40.9070.7842.4782.1322.7910.7413.541.270.3071.1533.0389.6929.0764.73-
Source w/ seg-b135.8163.3624.0980.1315.6813.3916.267.420.0962.4520.2086.0523.0253.37-
SFDA w/ seg-b1 [25]38.2168.7830.7180.375.2618.9520.905.252.3670.1923.3090.2022.5557.90+2.40
ProDA w/ seg-b1 [51]37.3768.9330.8880.074.1718.6019.721.771.5670.0522.7390.6019.7157.04+2.73
GTA w/ seg-b1 [21]36.0064.6120.0479.048.0615.3619.866.022.1365.7717.7584.5626.7158.13+0.19
HCL w/ seg-b1 [18]38.3868.8230.4180.375.8820.1820.104.232.1170.5024.7489.8922.6559.04+2.57
DATC w/ seg-b1 [41]38.5469.4826.9680.6811.6415.2420.109.330.5566.1124.3185.1630.9060.58+2.73
Simt w/ seg-b1 [14]37.9468.4729.5179.626.7819.2019.482.311.3368.8526.5589.3022.3559.49+2.13
Ours w/ seg-b141.7870.1733.2481.6613.0623.4023.377.633.5971.0425.4689.3336.6064.60+5.97
Ours w/ seg-b242.1869.9932.2881.3410.6224.3524.299.193.6371.2830.0488.7537.4965.05+6.37
", + "image_path": "56972b681a2a3f0809411118061365a58e005c524a84ce04fc09b0d630aea13c.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 236, + 546, + 258 + ], + "lines": [ + { + "bbox": [ + 46, + 236, + 546, + 258 + ], + "spans": [ + { + "bbox": [ + 46, + 236, + 546, + 258 + ], + "type": "text", + "content": "Table 1. Experimental results on the S-to-D scenario, the overlapped 13 classes of two datasets are used to test the UDA performance. The bold and underline denote the best and the second-best performance in source-free UDA methods, respectively." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 46, + 278, + 288, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 278, + 288, + 327 + ], + "spans": [ + { + "bbox": [ + 46, + 278, + 288, + 327 + ], + "type": "text", + "content": "The ERP image is then passed to the source model " + }, + { + "bbox": [ + 46, + 278, + 288, + 327 + ], + "type": "inline_equation", + "content": "F_{S}" + }, + { + "bbox": [ + 46, + 278, + 288, + 327 + ], + "type": "text", + "content": " to predict a pseudo label, which serves as the supervision for the ERP predictions of the target model " + }, + { + "bbox": [ + 46, + 278, + 288, + 327 + ], + "type": "inline_equation", + "content": "F_{T}" + }, + { + "bbox": [ + 46, + 278, + 288, + 327 + ], + "type": "text", + "content": ". For simplicity, we use the Cross-Entropy (CE) loss, which is formulated as:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 63, + 338, + 288, + 355 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 338, + 288, + 355 + ], + "spans": [ + { + "bbox": [ + 63, + 338, + 288, + 355 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {s u p} = C E (P, 1 _ {\\dot {k} = a r g m a x (\\{R e b u i l d (P _ {H, W / 4,:} ^ {f}) \\})}). \\tag {6}", + "image_path": "c05a29b84c8a2d8e7bc717b964d17bb65fa0238205c45d7b8972f4f8be7837a1.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 364, + 287, + 402 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 364, + 287, + 402 + ], + "spans": [ + { + "bbox": [ + 46, + 364, + 287, + 402 + ], + "type": "text", + "content": "And the prototype-level knowledge transfer loss is achieved by Mean Squared Error (MSE) loss between the panoramic global prototype " + }, + { + "bbox": [ + 46, + 364, + 287, + 402 + ], + "type": "inline_equation", + "content": "\\tau_g^k" + }, + { + "bbox": [ + 46, + 364, + 287, + 402 + ], + "type": "text", + "content": " and the target prototype " + }, + { + "bbox": [ + 46, + 364, + 287, + 402 + ], + "type": "inline_equation", + "content": "\\tau_t^k" + }, + { + "bbox": [ + 46, + 364, + 287, + 402 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 109, + 411, + 287, + 440 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 411, + 287, + 440 + ], + "spans": [ + { + "bbox": [ + 109, + 411, + 287, + 440 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {p p a} = \\frac {1}{K} \\sum_ {k \\in K} \\left(\\tau_ {g} ^ {k} - \\tau_ {t} ^ {k}\\right) ^ {2}. \\tag {7}", + "image_path": "08d5baaa00e3f2dc1690aa49cee1c3114c52b6e802defc7ac9afbb9cfbe7aa6f.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 449, + 288, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 449, + 288, + 521 + ], + "spans": [ + { + "bbox": [ + 46, + 449, + 288, + 521 + ], + "type": "text", + "content": "With loss " + }, + { + "bbox": [ + 46, + 449, + 288, + 521 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{ppa}" + }, + { + "bbox": [ + 46, + 449, + 288, + 521 + ], + "type": "text", + "content": ", the prototypes are pushed together to transfer the source-extracted knowledge to the target domain. In summary, with the proposed PPAM, we effectively address the distortion and semantic mismatch problems at the prediction and prototype level, we now tackle the style discrepancy problem at the feature level." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 522, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 522, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 522, + 288, + 713 + ], + "type": "text", + "content": "Cross Dual Attention Module (CDAM). Inspired by the dual attention, focusing on spatial and channel characteristics [25], our CDAM imitates the spatial and channel-wise distributions of features to alleviate the style discrepancies. Different from [25] suggesting to minimize the distribution distance of the dual attention maps between the fake source (FFP images) and target data (ERP images), our CDAM focuses on aligning the distribution between FFP and ERP of the panoramic images rather than introducing additional parameters and computation cost in estimating source data. As shown in Fig. 2, we reconstruct the FFP features " + }, + { + "bbox": [ + 46, + 522, + 288, + 713 + ], + "type": "inline_equation", + "content": "F^f" + }, + { + "bbox": [ + 46, + 522, + 288, + 713 + ], + "type": "text", + "content": " to ensure that the rebuilt feature " + }, + { + "bbox": [ + 46, + 522, + 288, + 713 + ], + "type": "inline_equation", + "content": "F'" + }, + { + "bbox": [ + 46, + 522, + 288, + 713 + ], + "type": "text", + "content": " has the same spatial size as " + }, + { + "bbox": [ + 46, + 522, + 288, + 713 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 46, + 522, + 288, + 713 + ], + "type": "text", + "content": ". Before the cross dual attention operation, we apply a Batch Normalization Statics (BNS) guided constraint on " + }, + { + "bbox": [ + 46, + 522, + 288, + 713 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 46, + 522, + 288, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 522, + 288, + 713 + ], + "type": "inline_equation", + "content": "F'" + }, + { + "bbox": [ + 46, + 522, + 288, + 713 + ], + "type": "text", + "content": ". Since the BNS of the source model should satisfy the feature distribution of the source data, we align " + }, + { + "bbox": [ + 46, + 522, + 288, + 713 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 46, + 522, + 288, + 713 + ], + "type": "text", + "content": " and" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 278, + 522, + 291 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 278, + 522, + 291 + ], + "spans": [ + { + "bbox": [ + 306, + 278, + 522, + 291 + ], + "type": "inline_equation", + "content": "F^{\\prime}" + }, + { + "bbox": [ + 306, + 278, + 522, + 291 + ], + "type": "text", + "content": " with BNS to alleviate the domain gaps as follows:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 332, + 299, + 545, + 334 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 332, + 299, + 545, + 334 + ], + "spans": [ + { + "bbox": [ + 332, + 299, + 545, + 334 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} _ {b n s} = \\left\\| \\mu (F) - \\bar {\\mu} \\right\\| _ {2} ^ {2} + \\left\\| \\sigma^ {2} (F) - \\bar {\\sigma} ^ {2} \\right\\| _ {2} ^ {2} \\\\ + \\left\\| \\mu \\left(F ^ {\\prime}\\right) - \\bar {\\mu} \\right\\| _ {2} ^ {2} + \\left\\| \\sigma^ {2} \\left(F ^ {\\prime}\\right) - \\bar {\\sigma} ^ {2} \\right\\| _ {2} ^ {2}, \\tag {8} \\\\ \\end{array}", + "image_path": "91f9ae0ba04186080593eba41e960a42a8f2f6c7286466f5277d99504a98a3f3.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 342, + 545, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 342, + 545, + 365 + ], + "spans": [ + { + "bbox": [ + 304, + 342, + 545, + 365 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 342, + 545, + 365 + ], + "type": "inline_equation", + "content": "\\bar{\\mu}" + }, + { + "bbox": [ + 304, + 342, + 545, + 365 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 342, + 545, + 365 + ], + "type": "inline_equation", + "content": "\\bar{\\sigma}^2" + }, + { + "bbox": [ + 304, + 342, + 545, + 365 + ], + "type": "text", + "content": " are the mean and variance parameters of the last BN layer in the source model " + }, + { + "bbox": [ + 304, + 342, + 545, + 365 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 304, + 342, + 545, + 365 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 365, + 546, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 365, + 546, + 437 + ], + "spans": [ + { + "bbox": [ + 305, + 365, + 546, + 437 + ], + "type": "text", + "content": "As shown in Fig. 2 (a), after aligned with BNS, the ERP feature " + }, + { + "bbox": [ + 305, + 365, + 546, + 437 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 305, + 365, + 546, + 437 + ], + "type": "text", + "content": " and the rebuilt feature " + }, + { + "bbox": [ + 305, + 365, + 546, + 437 + ], + "type": "inline_equation", + "content": "f'" + }, + { + "bbox": [ + 305, + 365, + 546, + 437 + ], + "type": "text", + "content": " are first reshaped to be " + }, + { + "bbox": [ + 305, + 365, + 546, + 437 + ], + "type": "inline_equation", + "content": "f \\in \\mathbb{R}^{N \\times C}" + }, + { + "bbox": [ + 305, + 365, + 546, + 437 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 365, + 546, + 437 + ], + "type": "inline_equation", + "content": "f' \\in \\mathbb{R}^{N \\times C}" + }, + { + "bbox": [ + 305, + 365, + 546, + 437 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 305, + 365, + 546, + 437 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 305, + 365, + 546, + 437 + ], + "type": "text", + "content": " is the number of pixels and " + }, + { + "bbox": [ + 305, + 365, + 546, + 437 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 305, + 365, + 546, + 437 + ], + "type": "text", + "content": " is the channel number. Then we calculate the spatial-wise attention maps " + }, + { + "bbox": [ + 305, + 365, + 546, + 437 + ], + "type": "inline_equation", + "content": "M_{sp} \\in \\mathbb{R}^{N \\times C}" + }, + { + "bbox": [ + 305, + 365, + 546, + 437 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 365, + 546, + 437 + ], + "type": "inline_equation", + "content": "M_{sp}' \\in \\mathbb{R}^{N \\times C}" + }, + { + "bbox": [ + 305, + 365, + 546, + 437 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 305, + 365, + 546, + 437 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 305, + 365, + 546, + 437 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 365, + 546, + 437 + ], + "type": "inline_equation", + "content": "f'" + }, + { + "bbox": [ + 305, + 365, + 546, + 437 + ], + "type": "text", + "content": " by:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 345, + 445, + 486, + 479 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 345, + 445, + 486, + 479 + ], + "spans": [ + { + "bbox": [ + 345, + 445, + 486, + 479 + ], + "type": "interline_equation", + "content": "\\{M _ {s p} \\} _ {j i} = \\frac {\\exp (f _ {[ i : ]} ^ {\\prime} \\cdot f _ {[ : j ]} ^ {T})}{\\sum_ {i} ^ {N} \\exp (f _ {[ i : ]} ^ {\\prime} \\cdot f _ {[ : j ]} ^ {T})},", + "image_path": "56ad0a1270b76d992660b6e5f9c2c18ab27c36e6af98b08faec5a01819ac594e.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 345, + 481, + 545, + 515 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 345, + 481, + 545, + 515 + ], + "spans": [ + { + "bbox": [ + 345, + 481, + 545, + 515 + ], + "type": "interline_equation", + "content": "\\left\\{M _ {s p} ^ {\\prime} \\right\\} _ {j i} = \\frac {\\exp \\left(f _ {[ i : ]} \\cdot f _ {[ : j ]} ^ {T}\\right)}{\\sum_ {i} ^ {N} \\exp \\left(f _ {[ i : ]} \\cdot f _ {[ : j ]} ^ {T}\\right)}, \\tag {9}", + "image_path": "ed2c9a9508f40b371d418e424ca4e459f4f0494f4268b1f8caa6c3f95c9b5f28.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 522, + 547, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 522, + 547, + 571 + ], + "spans": [ + { + "bbox": [ + 306, + 522, + 547, + 571 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 306, + 522, + 547, + 571 + ], + "type": "inline_equation", + "content": "f^T" + }, + { + "bbox": [ + 306, + 522, + 547, + 571 + ], + "type": "text", + "content": " is the transpose of " + }, + { + "bbox": [ + 306, + 522, + 547, + 571 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 306, + 522, + 547, + 571 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 306, + 522, + 547, + 571 + ], + "type": "inline_equation", + "content": "\\{M\\}_{ij}" + }, + { + "bbox": [ + 306, + 522, + 547, + 571 + ], + "type": "text", + "content": " measures the impact of the " + }, + { + "bbox": [ + 306, + 522, + 547, + 571 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 306, + 522, + 547, + 571 + ], + "type": "text", + "content": "-th position on the " + }, + { + "bbox": [ + 306, + 522, + 547, + 571 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 306, + 522, + 547, + 571 + ], + "type": "text", + "content": "-th position. Similarly, the channel-wise attention maps " + }, + { + "bbox": [ + 306, + 522, + 547, + 571 + ], + "type": "inline_equation", + "content": "M_{ch} \\in \\mathbb{R}^{C \\times C}" + }, + { + "bbox": [ + 306, + 522, + 547, + 571 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 306, + 522, + 547, + 571 + ], + "type": "inline_equation", + "content": "M_{ch}' \\in \\mathbb{R}^{C \\times C}" + }, + { + "bbox": [ + 306, + 522, + 547, + 571 + ], + "type": "text", + "content": " can be obtained through:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 345, + 578, + 486, + 613 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 345, + 578, + 486, + 613 + ], + "spans": [ + { + "bbox": [ + 345, + 578, + 486, + 613 + ], + "type": "interline_equation", + "content": "\\left\\{M _ {c h} \\right\\} _ {j i} = \\frac {\\exp \\left(f _ {[ i : ]} ^ {T} \\cdot f _ {[ : j ]}\\right)}{\\sum_ {i} ^ {C} \\exp \\left(f _ {[ i : ]} ^ {\\prime} \\cdot f _ {[ : j ]} ^ {T}\\right)},", + "image_path": "6700e9d5fe32b8366fdad48771517088c4d0481d97db27044b0ce51a551d434f.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 346, + 615, + 545, + 649 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 346, + 615, + 545, + 649 + ], + "spans": [ + { + "bbox": [ + 346, + 615, + 545, + 649 + ], + "type": "interline_equation", + "content": "\\left\\{M _ {c h} ^ {\\prime} \\right\\} _ {j i} = \\frac {\\exp \\left(f _ {[ i : ]} ^ {T} \\cdot f _ {[ : j ]} ^ {\\prime}\\right)}{\\sum_ {i} ^ {C} \\exp \\left(f _ {[ i : ]} ^ {T} \\cdot f _ {[ : j ]} ^ {\\prime}\\right)}. \\tag {10}", + "image_path": "87ee6a2b8a5f58fea90b4c7870a3813b3cd1bc5090f14f175b23e53278abb4ee.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 656, + 545, + 692 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 656, + 545, + 692 + ], + "spans": [ + { + "bbox": [ + 306, + 656, + 545, + 692 + ], + "type": "text", + "content": "After obtaining the spatial and channel attention maps, the CDAM loss can be calculated with the Kullback-Liibler divergence (KL divergence) as follows:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 329, + 700, + 545, + 715 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 329, + 700, + 545, + 715 + ], + "spans": [ + { + "bbox": [ + 329, + 700, + 545, + 715 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {c d a} = K L \\left(M _ {s p}, M _ {s p} ^ {\\prime}\\right) + K L \\left(M _ {c h}, M _ {c h} ^ {\\prime}\\right) \\tag {11}", + "image_path": "cee68eef0a4be1cad533c7e5f5b40363184b48b4c09a474560a8ae7804167fcf.jpg" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "27889" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 51, + 70, + 545, + 232 + ], + "blocks": [ + { + "bbox": [ + 51, + 70, + 545, + 232 + ], + "lines": [ + { + "bbox": [ + 51, + 70, + 545, + 232 + ], + "spans": [ + { + "bbox": [ + 51, + 70, + 545, + 232 + ], + "type": "image", + "image_path": "56372506da771c02d126a5a0b84cc0c960978ea26b98df2281949a2bc7b8b3a3.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 85, + 244, + 507, + 257 + ], + "lines": [ + { + "bbox": [ + 85, + 244, + 507, + 257 + ], + "spans": [ + { + "bbox": [ + 85, + 244, + 507, + 257 + ], + "type": "text", + "content": "Figure 4. Example visualization results. (a) source, (b) SFDA [25], (c) DATC [41], (d) Ours, (e) Ground Truth (GT)." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 52, + 266, + 545, + 412 + ], + "blocks": [ + { + "bbox": [ + 52, + 266, + 545, + 412 + ], + "lines": [ + { + "bbox": [ + 52, + 266, + 545, + 412 + ], + "spans": [ + { + "bbox": [ + 52, + 266, + 545, + 412 + ], + "type": "table", + "html": "
MethodSFmIoUPersonRiderCarTruckBusTrainMotorBikeΔ
Trans4PASS-T [49]53.1848.5416.9179.5865.3355.7684.6359.0537.61-
Trans4PASS-S [49]55.2248.8523.3681.0267.3169.5386.1360.8539.09-
DAFormer [17]54.6749.6925.1577.7063.0665.6186.6865.1248.13-
DPPASS [59]55.3052.0929.4079.1958.7347.2486.4866.6038.11-
DATR [58]56.8154.6229.5080.0367.3563.7587.6767.5737.10-
Source w/ seg-b138.6540.9310.8967.6736.8615.5626.4342.6827.16-
SFDA w/ seg-b1 [25]42.7041.658.4669.9747.4833.2472.0147.6132.77+4.05
DTAC w/ seg-b1 [41]43.0643.518.3570.1035.7940.7370.5249.4932.94+4.41
Ours w/ seg-b148.7845.3615.8375.7049.1655.6882.0754.8233.76+10.13
Ours w/ seg-b250.1249.9227.2276.2247.8164.1379.4756.8335.76+11.47
", + "image_path": "bf1d67e171db265b4f64b2da4f105428130491007ab0bf138c6e9aeb9b88fee9.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 420, + 546, + 444 + ], + "lines": [ + { + "bbox": [ + 46, + 420, + 546, + 444 + ], + "spans": [ + { + "bbox": [ + 46, + 420, + 546, + 444 + ], + "type": "text", + "content": "Table 2. Experimental results of 8 selected categories in panoramic semantic segmentation on C-to-D. SF: Source-free UDA. The bold and underline denote the best and the second-best performance in source-free UDA methods, respectively." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 47, + 464, + 133, + 476 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 464, + 133, + 476 + ], + "spans": [ + { + "bbox": [ + 47, + 464, + 133, + 476 + ], + "type": "text", + "content": "3.4. Optimization" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 484, + 288, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 484, + 288, + 509 + ], + "spans": [ + { + "bbox": [ + 46, + 484, + 288, + 509 + ], + "type": "text", + "content": "The training objective for learning the target model containing three losses is defined as:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 84, + 524, + 287, + 538 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 524, + 287, + 538 + ], + "spans": [ + { + "bbox": [ + 84, + 524, + 287, + 538 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\lambda \\cdot \\mathcal {L} _ {p p a} + \\gamma \\cdot \\mathcal {L} _ {c d a} + \\mathcal {L} _ {b n s} + \\mathcal {L} _ {s u p} \\tag {12}", + "image_path": "27eb339de1e02adce8e554bd4ac0c8b2460983fb4d860bd50eff24dcc90d9936.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 552, + 287, + 612 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 552, + 287, + 612 + ], + "spans": [ + { + "bbox": [ + 46, + 552, + 287, + 612 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 552, + 287, + 612 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{ppa}" + }, + { + "bbox": [ + 46, + 552, + 287, + 612 + ], + "type": "text", + "content": " is the MSE loss from PPAM, " + }, + { + "bbox": [ + 46, + 552, + 287, + 612 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{cda}" + }, + { + "bbox": [ + 46, + 552, + 287, + 612 + ], + "type": "text", + "content": " refers to the KL loss from CDAM, " + }, + { + "bbox": [ + 46, + 552, + 287, + 612 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{sup}" + }, + { + "bbox": [ + 46, + 552, + 287, + 612 + ], + "type": "text", + "content": " denotes the CE loss for the prediction pseudo label supervision loss, " + }, + { + "bbox": [ + 46, + 552, + 287, + 612 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{bns}" + }, + { + "bbox": [ + 46, + 552, + 287, + 612 + ], + "type": "text", + "content": " refers to the BNS guided feature loss, and " + }, + { + "bbox": [ + 46, + 552, + 287, + 612 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 46, + 552, + 287, + 612 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 552, + 287, + 612 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 46, + 552, + 287, + 612 + ], + "type": "text", + "content": " are the trade-off weights of the proposed loss terms." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 631, + 195, + 644 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 631, + 195, + 644 + ], + "spans": [ + { + "bbox": [ + 47, + 631, + 195, + 644 + ], + "type": "text", + "content": "4. Experiments and Analysis" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 653, + 288, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 653, + 288, + 712 + ], + "spans": [ + { + "bbox": [ + 46, + 653, + 288, + 712 + ], + "type": "text", + "content": "As the first SFUDA method for panoramic image segmentation, there is no prior method for direct comparison. We thus empirically validate our method by comparing it with the existing UDA and panoramic segmentation methods on three widely used benchmarks." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 464, + 504, + 476 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 464, + 504, + 476 + ], + "spans": [ + { + "bbox": [ + 305, + 464, + 504, + 476 + ], + "type": "text", + "content": "4.1. Datasets and Implementation Details." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 482, + 547, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 482, + 547, + 613 + ], + "spans": [ + { + "bbox": [ + 304, + 482, + 547, + 613 + ], + "type": "text", + "content": "Cityscapes [11] is a real-world dataset collected for autonomous driving that contains street scenes. DensePASS [27] is a panoramic dataset designed for capturing diverse street scenes. SynPASS [50] is a synthetic dataset consisting of 9080 synthetic panoramic images. Stanford2D3D [3] is an indoor panoramic dataset which has 1413 panoramic images. Overall, the experiments are conducted on both real-world (Cityscapes-to-DensePASS, C-to-D, and Stanford2D3D-pinhole-to-Stanford2D3D-panoramic, SPinto-SPan) and synthetic-to-real (SynPASS-to-DensePASS, S-to-D) scenarios." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 623, + 432, + 635 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 623, + 432, + 635 + ], + "spans": [ + { + "bbox": [ + 306, + 623, + 432, + 635 + ], + "type": "text", + "content": "4.2. Experimental Results." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 641, + 547, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 641, + 547, + 712 + ], + "spans": [ + { + "bbox": [ + 304, + 641, + 547, + 712 + ], + "type": "text", + "content": "We first evaluate our proposed framework under the S-to-D scenario. The experimental results are shown in Tab. 1. Our proposed method consistently outperforms source-free UDA methods [25] and [41] and even achieves panoramic semantic segmentation performance closer to that of the UDA method Trans4PASS [50] which utilizes the source data" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "27890" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 52, + 70, + 544, + 178 + ], + "blocks": [ + { + "bbox": [ + 52, + 70, + 544, + 178 + ], + "lines": [ + { + "bbox": [ + 52, + 70, + 544, + 178 + ], + "spans": [ + { + "bbox": [ + 52, + 70, + 544, + 178 + ], + "type": "table", + "html": "
MethodSFmIoUCeilingChairDoorFloorSofaTableWallWindowΔ
PVT-S w/ MPA [49]X57.9585.8551.7618.3990.7835.9365.4375.0040.43-
Trans4PASS w/ MPA [49]X64.5285.0858.7234.9791.1246.2571.7277.5850.75-
Trans4PASS+ [50]X63.7390.6362.3024.7992.6235.7373.1678.7451.78-
Trans4PASS+ w/ MPA [50]X67.1690.0464.0442.8991.7438.3471.4581.2457.54-
SFDA [25]54.7679.4433.2052.0967.3622.5453.6469.3860.46-
Ours w/ b157.6373.8129.9863.6573.4931.7649.2572.8966.22+2.87
Ours w/ b265.7582.8838.0065.8186.7136.3266.1080.2969.88+10.99
", + "image_path": "197af54072faa422c4fff6e530f2010d61c8000877057bc61004858b5a7a3eb7.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 50, + 217, + 288, + 327 + ], + "blocks": [ + { + "bbox": [ + 52, + 186, + 539, + 198 + ], + "lines": [ + { + "bbox": [ + 52, + 186, + 539, + 198 + ], + "spans": [ + { + "bbox": [ + 52, + 186, + 539, + 198 + ], + "type": "text", + "content": "Table 3. Experimental results on indoor Stanford2D3D [3]. The bold denotes the best performance among UDA and SFUDA methods." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 50, + 217, + 288, + 327 + ], + "lines": [ + { + "bbox": [ + 50, + 217, + 288, + 327 + ], + "spans": [ + { + "bbox": [ + 50, + 217, + 288, + 327 + ], + "type": "table", + "html": "
Loss Function CombinationsC-to-DS-to-D
\\( \\mathcal{L}_{sup} \\)\\( \\mathcal{L}_{ppa} \\)\\( \\mathcal{L}_{sft} \\)\\( \\mathcal{L}_{cda} \\)\\( \\mathcal{L}_{bns} \\)mIoUΔmIoUΔ
38.65-35.81-
45.42+6.7738.37+2.56
46.23+7.5838.49+2.68
44.24+5.5938.38+2.57
44.79+6.1438.52+2.71
48.78+10.1341.78+5.97
", + "image_path": "d17ce85e1176d2c29240ac5087d1ac952b6305d7ed62ffd1d5641f3b79d30395.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 363, + 288, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 363, + 288, + 590 + ], + "spans": [ + { + "bbox": [ + 46, + 363, + 288, + 590 + ], + "type": "text", + "content": "in the adaptation procedure. Our proposed method brings significant performance gain of " + }, + { + "bbox": [ + 46, + 363, + 288, + 590 + ], + "type": "inline_equation", + "content": "+3.57\\%" + }, + { + "bbox": [ + 46, + 363, + 288, + 590 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 363, + 288, + 590 + ], + "type": "inline_equation", + "content": "+3.54\\%" + }, + { + "bbox": [ + 46, + 363, + 288, + 590 + ], + "type": "text", + "content": " with SegFormer-B1 backbone then SFDA [25] and DATC [41], respectively. We also provide the TSNE visualization in Fig. 5 (b) and qualitative results in Fig. 4. Apparently, our method gains a significant improvement in distinguishing the pixels in panoramic images in both prediction and high-level feature space. As shown in Tab. 2, we then evaluate our proposed framework under the C-to-D scenario. Our proposed method significantly outperforms source-free methods [25, 41] and some panoramic semantic segmentation methods [43, 46, 48]. Specifically, our method achieves a significant performance gain over SFDA [25] and DTAC [41] by " + }, + { + "bbox": [ + 46, + 363, + 288, + 590 + ], + "type": "inline_equation", + "content": "+6.08\\%" + }, + { + "bbox": [ + 46, + 363, + 288, + 590 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 363, + 288, + 590 + ], + "type": "inline_equation", + "content": "+5.72\\%" + }, + { + "bbox": [ + 46, + 363, + 288, + 590 + ], + "type": "text", + "content": ", respectively. This demonstrates that our proposed method endowed by PPAM and CDAM is more suitable for panoramic semantic segmentation tasks. Furthermore, as shown in the qualitative results in Fig. 4, our method achieves better segmentation in driving-related categories, such as rider and car." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "text", + "content": "We also provide TSNE visualizations [35] in Fig. 5 (a), showing that our proposed method brings significant improvements in distinguishing pixels from different categories in high-level feature space. Additionally, we evaluated our proposed method on the Stanford2D3D [3] dataset and compared it with the SFDA [25] and MPA [50] methods. As shown in the following table, our proposed method significantly outperforms the SFDA by " + }, + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "inline_equation", + "content": "+7.09\\%" + }, + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "text", + "content": " mIoU and is on par with the MPA method using source data " + }, + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "inline_equation", + "content": "(61.85\\%" + }, + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "text", + "content": " vs. " + }, + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "inline_equation", + "content": "67.16\\%)" + }, + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "text", + "content": ". Notably, for some categories, such as door " + }, + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "inline_equation", + "content": "(57.90\\%" + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 309, + 217, + 545, + 252 + ], + "blocks": [ + { + "bbox": [ + 61, + 335, + 272, + 346 + ], + "lines": [ + { + "bbox": [ + 61, + 335, + 272, + 346 + ], + "spans": [ + { + "bbox": [ + 61, + 335, + 272, + 346 + ], + "type": "text", + "content": "Table 4. Ablation study of different module combinations." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 309, + 217, + 545, + 252 + ], + "lines": [ + { + "bbox": [ + 309, + 217, + 545, + 252 + ], + "spans": [ + { + "bbox": [ + 309, + 217, + 545, + 252 + ], + "type": "table", + "html": "
Combinationsτg+τpτg+τfτg+τp+τf
mIoU44.1444.2845.42
", + "image_path": "cbb698b08d6ba9c23e795092f41df038f18af5013d33e193185423d1d7a81479.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 315, + 260, + 535, + 271 + ], + "lines": [ + { + "bbox": [ + 315, + 260, + 535, + 271 + ], + "spans": [ + { + "bbox": [ + 315, + 260, + 535, + 271 + ], + "type": "text", + "content": "Table 5. Ablation study of different prototype combinations." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 305, + 288, + 545, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 288, + 545, + 312 + ], + "spans": [ + { + "bbox": [ + 305, + 288, + 545, + 312 + ], + "type": "text", + "content": "vs. " + }, + { + "bbox": [ + 305, + 288, + 545, + 312 + ], + "type": "inline_equation", + "content": "42.89\\%" + }, + { + "bbox": [ + 305, + 288, + 545, + 312 + ], + "type": "text", + "content": " ) and window " + }, + { + "bbox": [ + 305, + 288, + 545, + 312 + ], + "type": "inline_equation", + "content": "(68.06\\%" + }, + { + "bbox": [ + 305, + 288, + 545, + 312 + ], + "type": "text", + "content": " vs. " + }, + { + "bbox": [ + 305, + 288, + 545, + 312 + ], + "type": "inline_equation", + "content": "57.54\\%)" + }, + { + "bbox": [ + 305, + 288, + 545, + 312 + ], + "type": "text", + "content": ", our method event outperforms the MPA [50]." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 323, + 398, + 335 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 323, + 398, + 335 + ], + "spans": [ + { + "bbox": [ + 306, + 323, + 398, + 335 + ], + "type": "text", + "content": "5. Ablation Study" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 342, + 547, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 342, + 547, + 521 + ], + "spans": [ + { + "bbox": [ + 304, + 342, + 547, + 521 + ], + "type": "text", + "content": "Different Loss Function Combinations. To assess the effectiveness of the proposed modules, we conduct ablation experiments on both real-world and synthetic-to-real scenarios with various loss combinations. All of the proposed modules and loss functions have a positive impact on improving segmentation performance. Notably, our PPAM yields a significant performance gain of " + }, + { + "bbox": [ + 304, + 342, + 547, + 521 + ], + "type": "inline_equation", + "content": "+6.77\\%" + }, + { + "bbox": [ + 304, + 342, + 547, + 521 + ], + "type": "text", + "content": ". This indicates that PPAM alleviates the intricate semantics and distortion problem with the tangent, and our proposed FFP projection is valid. This is further supported by the qualitative results presented in Fig. 4. Additionally, our proposed CDAM achieves a performance gain of " + }, + { + "bbox": [ + 304, + 342, + 547, + 521 + ], + "type": "inline_equation", + "content": "+5.59\\%" + }, + { + "bbox": [ + 304, + 342, + 547, + 521 + ], + "type": "text", + "content": " compared to the source baseline, which means that CDAM imitates the spatial and channel-wise distributions of ERP and FFP features and further addresses the style discrepancy problems." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 522, + 547, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 522, + 547, + 653 + ], + "spans": [ + { + "bbox": [ + 304, + 522, + 547, + 653 + ], + "type": "text", + "content": "Ablation of Different Prototype Combinations. To validate the effectiveness of all the prototypes in PPAM, we conduct experiments on C-to-D using SegFormer-B1 and only " + }, + { + "bbox": [ + 304, + 522, + 547, + 653 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{sup}" + }, + { + "bbox": [ + 304, + 522, + 547, + 653 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 522, + 547, + 653 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{ppa}" + }, + { + "bbox": [ + 304, + 522, + 547, + 653 + ], + "type": "text", + "content": ". The results of the performance with different prototype combinations are presented in Tab. 5. Both prototypes from TP and FFP have a positive effect on PPAM, with " + }, + { + "bbox": [ + 304, + 522, + 547, + 653 + ], + "type": "inline_equation", + "content": "\\tau_{p}" + }, + { + "bbox": [ + 304, + 522, + 547, + 653 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 522, + 547, + 653 + ], + "type": "inline_equation", + "content": "\\tau_{f}" + }, + { + "bbox": [ + 304, + 522, + 547, + 653 + ], + "type": "text", + "content": " resulting in mIoU improvements of " + }, + { + "bbox": [ + 304, + 522, + 547, + 653 + ], + "type": "inline_equation", + "content": "+5.49\\%" + }, + { + "bbox": [ + 304, + 522, + 547, + 653 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 522, + 547, + 653 + ], + "type": "inline_equation", + "content": "+5.63\\%" + }, + { + "bbox": [ + 304, + 522, + 547, + 653 + ], + "type": "text", + "content": ", respectively, compared to the source baseline. When both prototypes are combined together, there is a mIoU gain of " + }, + { + "bbox": [ + 304, + 522, + 547, + 653 + ], + "type": "inline_equation", + "content": "+6.77\\%" + }, + { + "bbox": [ + 304, + 522, + 547, + 653 + ], + "type": "text", + "content": ", indicating that their combination is better for prototype-level adaptation." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 654, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 654, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 654, + 547, + 713 + ], + "type": "text", + "content": "Dual Attention vs. Cross Dual Attention. The dual attention (DA) approach proposed in SFDA [25] aligns the spatial and channel characteristics of features between the fake source and target data. In contrast, our cross dual attention (CDA) approach aligns the distribution between different" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "27891" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 52, + 71, + 286, + 270 + ], + "blocks": [ + { + "bbox": [ + 52, + 71, + 286, + 270 + ], + "lines": [ + { + "bbox": [ + 52, + 71, + 286, + 270 + ], + "spans": [ + { + "bbox": [ + 52, + 71, + 286, + 270 + ], + "type": "image", + "image_path": "58fb62606424e11248d1022b69f80d5a25d367c9d1832cfb58073bd549911574.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 47, + 277, + 288, + 300 + ], + "lines": [ + { + "bbox": [ + 47, + 277, + 288, + 300 + ], + "spans": [ + { + "bbox": [ + 47, + 277, + 288, + 300 + ], + "type": "text", + "content": "Figure 5. TSNE visualization of (a) Cityscapes-to-DensePASS and (b) SynPASS-to-DensePASS." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 50, + 313, + 294, + 356 + ], + "blocks": [ + { + "bbox": [ + 50, + 313, + 294, + 356 + ], + "lines": [ + { + "bbox": [ + 50, + 313, + 294, + 356 + ], + "spans": [ + { + "bbox": [ + 50, + 313, + 294, + 356 + ], + "type": "table", + "html": "
FoVw/o60°72°90°120°180°360°
mIoU38.6544.0344.1644.2844.0241.6540.31
Δ-+5.38+5.51+5.63+5.37+3.00+1.66
", + "image_path": "77660571ebb2e84204d58a906915307d2b3bc4e03ad009015b58f6d796d9988e.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 398, + 287, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 398, + 287, + 495 + ], + "spans": [ + { + "bbox": [ + 46, + 398, + 287, + 495 + ], + "type": "text", + "content": "projections of the same spherical data, specifically ERP and FFP, resulting in more robust and stable knowledge transfer. Moreover, in our SFDA, we obtain spatial and channel characteristics across features, whereas DA operates within features. We also evaluate DA on the C-to-D scenario, and our CDA achieves " + }, + { + "bbox": [ + 46, + 398, + 287, + 495 + ], + "type": "inline_equation", + "content": "44.24\\%" + }, + { + "bbox": [ + 46, + 398, + 287, + 495 + ], + "type": "text", + "content": " mIoU, while DA only reaches " + }, + { + "bbox": [ + 46, + 398, + 287, + 495 + ], + "type": "inline_equation", + "content": "41.53\\%" + }, + { + "bbox": [ + 46, + 398, + 287, + 495 + ], + "type": "text", + "content": " mIoU. This indicates the proposed CDA is better for SFUDA in panoramic semantic segmentation." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 495, + 288, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 495, + 288, + 651 + ], + "spans": [ + { + "bbox": [ + 46, + 495, + 288, + 651 + ], + "type": "text", + "content": "Field-of-view of FFP. Most existing approaches for panoramic semantic segmentation, such as those proposed in [49, 50, 59], primarily focus on alleviating distortion by introducing distortion-aware components and distinct projection strategies. However, as discussed in Sec. 3.2, " + }, + { + "bbox": [ + 46, + 495, + 288, + 651 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 495, + 288, + 651 + ], + "type": "text", + "content": " images contain more intricate semantic information and object correspondence than the pinhole images, resulting in an obvious semantic mismatch between domains. Therefore, we propose the Fixed FoV Pooling (FFP) strategy to address the semantic mismatch. Experimental results show that the fixed FoV is the most influential factor in FFP, with an FoV of " + }, + { + "bbox": [ + 46, + 495, + 288, + 651 + ], + "type": "inline_equation", + "content": "90^{\\circ}" + }, + { + "bbox": [ + 46, + 495, + 288, + 651 + ], + "type": "text", + "content": " achieving the best segmentation performance, as shown in Tab. 6, with a mIoU of " + }, + { + "bbox": [ + 46, + 495, + 288, + 651 + ], + "type": "inline_equation", + "content": "44.28\\%" + }, + { + "bbox": [ + 46, + 495, + 288, + 651 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 652, + 288, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 652, + 288, + 700 + ], + "spans": [ + { + "bbox": [ + 46, + 652, + 288, + 700 + ], + "type": "text", + "content": "Ablation of Hyper-parameters. We now show the influence of hyperparameters " + }, + { + "bbox": [ + 46, + 652, + 288, + 700 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 46, + 652, + 288, + 700 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 652, + 288, + 700 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 46, + 652, + 288, + 700 + ], + "type": "text", + "content": ", which are the weights for the KL loss in CDAM and the MSE loss in PPAM, respectively. The experimental results are provided in Tab. 7." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 701, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 701, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 47, + 701, + 288, + 714 + ], + "type": "text", + "content": "Fine-tuning the Source Model. As the pre-trained model" + } + ] + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 311, + 70, + 553, + 155 + ], + "blocks": [ + { + "bbox": [ + 63, + 363, + 270, + 376 + ], + "lines": [ + { + "bbox": [ + 63, + 363, + 270, + 376 + ], + "spans": [ + { + "bbox": [ + 63, + 363, + 270, + 376 + ], + "type": "text", + "content": "Table 6. Ablation study of the FoV of our proposed FFP." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 311, + 70, + 553, + 155 + ], + "lines": [ + { + "bbox": [ + 311, + 70, + 553, + 155 + ], + "spans": [ + { + "bbox": [ + 311, + 70, + 553, + 155 + ], + "type": "table", + "html": "
γ00.010.020.050.10.2
mIoU38.6542.0543.2443.2844.2443.07
Δ-+3.40+4.59+4.63+5.59+4.42
λ0506080100120
mIoU38.6543.1343.2245.3645.4245.34
Δ-+4.48+4.57+6.71+6.77+6.69
", + "image_path": "7aa3ea0d82e66d82bb6e14d28ba461098541c56cd1cc1353e51034ea2d92a5e8.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 360, + 163, + 490, + 174 + ], + "lines": [ + { + "bbox": [ + 360, + 163, + 490, + 174 + ], + "spans": [ + { + "bbox": [ + 360, + 163, + 490, + 174 + ], + "type": "text", + "content": "Table 7. Ablation study of " + }, + { + "bbox": [ + 360, + 163, + 490, + 174 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 360, + 163, + 490, + 174 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 360, + 163, + 490, + 174 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 360, + 163, + 490, + 174 + ], + "type": "text", + "content": " ." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 304, + 189, + 547, + 405 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 189, + 547, + 405 + ], + "spans": [ + { + "bbox": [ + 304, + 189, + 547, + 405 + ], + "type": "text", + "content": "in the source (pinhole) domain is not an ideal model for the target (panoramic) image domain, we propose to fine-tune the source model with the loss function " + }, + { + "bbox": [ + 304, + 189, + 547, + 405 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{sft}" + }, + { + "bbox": [ + 304, + 189, + 547, + 405 + ], + "type": "text", + "content": ", as described in Sec. 3.2. Tab. 4 demonstrates the effectiveness of the proposed " + }, + { + "bbox": [ + 304, + 189, + 547, + 405 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{sft}" + }, + { + "bbox": [ + 304, + 189, + 547, + 405 + ], + "type": "text", + "content": ". When combined with the prototypical adaptation loss " + }, + { + "bbox": [ + 304, + 189, + 547, + 405 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{ppa}" + }, + { + "bbox": [ + 304, + 189, + 547, + 405 + ], + "type": "text", + "content": ", adding " + }, + { + "bbox": [ + 304, + 189, + 547, + 405 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{sft}" + }, + { + "bbox": [ + 304, + 189, + 547, + 405 + ], + "type": "text", + "content": " results in a " + }, + { + "bbox": [ + 304, + 189, + 547, + 405 + ], + "type": "inline_equation", + "content": "6.77\\%" + }, + { + "bbox": [ + 304, + 189, + 547, + 405 + ], + "type": "text", + "content": " mIoU gain compared with the source baseline of " + }, + { + "bbox": [ + 304, + 189, + 547, + 405 + ], + "type": "inline_equation", + "content": "38.65\\%" + }, + { + "bbox": [ + 304, + 189, + 547, + 405 + ], + "type": "text", + "content": ". We present the performance metrics derived solely from the loss " + }, + { + "bbox": [ + 304, + 189, + 547, + 405 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{sft}" + }, + { + "bbox": [ + 304, + 189, + 547, + 405 + ], + "type": "text", + "content": " of PPAM: C-2-D registers at " + }, + { + "bbox": [ + 304, + 189, + 547, + 405 + ], + "type": "inline_equation", + "content": "44.94\\%" + }, + { + "bbox": [ + 304, + 189, + 547, + 405 + ], + "type": "text", + "content": " while S-2-D records " + }, + { + "bbox": [ + 304, + 189, + 547, + 405 + ], + "type": "inline_equation", + "content": "36.74\\%" + }, + { + "bbox": [ + 304, + 189, + 547, + 405 + ], + "type": "text", + "content": ". These results underscore the efficacy of " + }, + { + "bbox": [ + 304, + 189, + 547, + 405 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{sft}" + }, + { + "bbox": [ + 304, + 189, + 547, + 405 + ], + "type": "text", + "content": " integrated within our PPAM module. Concerning transfer-ability, our " + }, + { + "bbox": [ + 304, + 189, + 547, + 405 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{sft}" + }, + { + "bbox": [ + 304, + 189, + 547, + 405 + ], + "type": "text", + "content": " exhibits compatibility with various projection methods, e.g., cube map. At its core, our fine-tuning loss seeks to align all projection images originating from the same panoramic source, irrespective of the employed projection technique. This intrinsic adaptability facilitates the application of " + }, + { + "bbox": [ + 304, + 189, + 547, + 405 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{sft}" + }, + { + "bbox": [ + 304, + 189, + 547, + 405 + ], + "type": "text", + "content": " across diverse projections. More results refer to the supplementary material." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 415, + 379, + 428 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 415, + 379, + 428 + ], + "spans": [ + { + "bbox": [ + 306, + 415, + 379, + 428 + ], + "type": "text", + "content": "6. Conclusion" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 435, + 547, + 532 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 435, + 547, + 532 + ], + "spans": [ + { + "bbox": [ + 304, + 435, + 547, + 532 + ], + "type": "text", + "content": "In this paper, we investigated a new problem of achieving SFUDA for panoramic semantic segmentation. To this end, we proposed an end-to-end SFUDA framework to address the domain shifts, including semantic mismatch, distortion, and style discrepancies, between pinhole and panoramic domains. Experiments on both real-world and synthetic benchmarks show that our proposed framework outperforms prior approaches and is on par with the methods using source data." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 531, + 547, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 531, + 547, + 662 + ], + "spans": [ + { + "bbox": [ + 304, + 531, + 547, + 662 + ], + "type": "text", + "content": "Limitation and future work. One limitation of our proposed framework is the computational cost brought by the tangent projection during training, and there is still room for improvements in segmentation performance. However, components in our approach such as panoramic prototypes and fixed FoV projection have significant implications for the " + }, + { + "bbox": [ + 304, + 531, + 547, + 662 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 304, + 531, + 547, + 662 + ], + "type": "text", + "content": " vision, especially for the panoramic semantic segmentation. In the future, we plan to utilize the large language models (LLMs) and Multi-modal large language models (MLLMs) to alleviate the domain gaps, such as the semantic mismatches between pinhole and panoramic images." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 662, + 547, + 711 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 662, + 547, + 711 + ], + "spans": [ + { + "bbox": [ + 304, + 662, + 547, + 711 + ], + "type": "text", + "content": "Acknowledgement This paper is supported by the National Natural Science Foundation of China (NSF) under Grant No. NSFC22FYT45 and the Guangzhou City, University and Enterprise Joint Fund under Grant No.SL2022A03J01278." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "27892" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 134 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 134 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 134 + ], + "type": "text", + "content": "[1] Hao Ai, Zidong Cao, Jinjing Zhu, Haotian Bai, Yucheng Chen, and Ling Wang. Deep learning for omnidirectional vision: A survey and new perspectives. arXiv preprint arXiv:2205.10468, 2022. 1" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 137, + 288, + 180 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 137, + 288, + 180 + ], + "spans": [ + { + "bbox": [ + 53, + 137, + 288, + 180 + ], + "type": "text", + "content": "[2] Nikita Araslanov and Stefan Roth. Self-supervised augmentation consistency for adapting semantic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15384-15394, 2021. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 182, + 288, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 182, + 288, + 215 + ], + "spans": [ + { + "bbox": [ + 53, + 182, + 288, + 215 + ], + "type": "text", + "content": "[3] Iro Armeni, Sasha Sax, Amir R Zamir, and Silvio Savarese. Joint 2d-3d-semantic data for indoor scene understanding. arXiv preprint arXiv:1702.01105, 2017. 6, 7" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 217, + 288, + 258 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 217, + 288, + 258 + ], + "spans": [ + { + "bbox": [ + 53, + 217, + 288, + 258 + ], + "type": "text", + "content": "[4] Mathilde Bateson, Hoel Kervadec, Jose Dolz, Herve Lombaert, and Ismail Ben Ayed. Source-free domain adaptation for image segmentation. Medical Image Analysis, 82:102617, 2022. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 262, + 288, + 317 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 262, + 288, + 317 + ], + "spans": [ + { + "bbox": [ + 53, + 262, + 288, + 317 + ], + "type": "text", + "content": "[5] Chaoqi Chen, Weiping Xie, Tingyang Xu, Wenbing Huang, Yu Rong, Xinghao Ding, Yue Huang, and Junzhou Huang. Progressive feature alignment for unsupervised domain adaptation. 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 627-636, 2019. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 319, + 288, + 361 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 319, + 288, + 361 + ], + "spans": [ + { + "bbox": [ + 53, + 319, + 288, + 361 + ], + "type": "text", + "content": "[6] Jialei Chen, Daisuke Deguchi, Chenkai Zhang, Xu Zheng, and Hiroshi Murase. Frozen is better than learning: A new design of prototype-based classifier for semantic segmentation. Available at SSRN 4617170. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 364, + 288, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 364, + 288, + 407 + ], + "spans": [ + { + "bbox": [ + 53, + 364, + 288, + 407 + ], + "type": "text", + "content": "[7] Jialei Chen, Chong Fu, Haoyu Xie, Xu Zheng, Rong Geng, and Chiu-Wing Sham. Uncertainty teacher with dense focal loss for semi-supervised medical image segmentation. Computers in Biology and Medicine, 149:106034, 2022." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 409, + 288, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 409, + 288, + 453 + ], + "spans": [ + { + "bbox": [ + 53, + 409, + 288, + 453 + ], + "type": "text", + "content": "[8] Jialei Chen, Daisuke Deguchi, Chenkai Zhang, Xu Zheng, and Hiroshi Murase. Clip is also a good teacher: A new learning framework for inductive zero-shot semantic segmentation. arXiv preprint arXiv:2310.02296, 2023." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 455, + 288, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 455, + 288, + 498 + ], + "spans": [ + { + "bbox": [ + 53, + 455, + 288, + 498 + ], + "type": "text", + "content": "[9] Minghao Chen, Hongyang Xue, and Deng Cai. Domain adaptation for semantic segmentation with maximum squares loss. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2090-2099, 2019. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 501, + 288, + 554 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 501, + 288, + 554 + ], + "spans": [ + { + "bbox": [ + 48, + 501, + 288, + 554 + ], + "type": "text", + "content": "[10] Jaehoon Choi, Taekyung Kim, and Changick Kim. Self-ensembling with gan-based data augmentation for domain adaptation in semantic segmentation. 2019 IEEE/CVF International Conference on Computer Vision (ICCV), pages 6829–6839, 2019. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 556, + 288, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 556, + 288, + 621 + ], + "spans": [ + { + "bbox": [ + 48, + 556, + 288, + 621 + ], + "type": "text", + "content": "[11] Marius Cordts, Mohamed Omran, Sebastian Ramos, Timo Rehfeld, Markus Enzweiler, Rodrigo Benenson, Uwe Franke, Stefan Roth, and Bernt Schiele. The cityscapes dataset for semantic urban scene understanding. In Proc. of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 6" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 624, + 288, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 624, + 288, + 668 + ], + "spans": [ + { + "bbox": [ + 48, + 624, + 288, + 668 + ], + "type": "text", + "content": "[12] Marc Eder, Mykhailo Shvets, John Lim, and Jan-Michael Frahm. Tangent images for mitigating spherical distortion. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12426-12434, 2020. 3" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 670, + 288, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 670, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 670, + 288, + 713 + ], + "type": "text", + "content": "[13] Francois Fleuret et al. Uncertainty reduction for model adaptation in semantic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9613-9623, 2021. 2" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 713 + ], + "type": "list", + "angle": 0, + "index": 28, + "blocks": [ + { + "bbox": [ + 307, + 73, + 547, + 128 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 547, + 128 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 547, + 128 + ], + "type": "text", + "content": "[14] Xiaoqing Guo, Jie Liu, Tongliang Liu, and Yixuan Yuan. Simt: Handling open-set noise for domain adaptive semantic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7032-7041, 2022. 2, 5" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 129, + 547, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 129, + 547, + 162 + ], + "spans": [ + { + "bbox": [ + 307, + 129, + 547, + 162 + ], + "type": "text", + "content": "[15] Judy Hoffman, Dequan Wang, Fisher Yu, and Trevor Darrell. Fcts in the wild: Pixel-level adversarial and constraint-based adaptation. ArXiv, abs/1612.02649, 2016. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 163, + 547, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 163, + 547, + 206 + ], + "spans": [ + { + "bbox": [ + 308, + 163, + 547, + 206 + ], + "type": "text", + "content": "[16] Judy Hoffman, Eric Tzeng, Taesung Park, Jun-Yan Zhu, Phillip Isola, Kate Saenko, Alexei A. Efros, and Trevor Darrell. Cycada: Cycle-consistent adversarial domain adaptation. In ICML, 2018. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 209, + 547, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 209, + 547, + 262 + ], + "spans": [ + { + "bbox": [ + 308, + 209, + 547, + 262 + ], + "type": "text", + "content": "[17] Lukas Hoyer, Dengxin Dai, and Luc Van Gool. Daformer: Improving network architectures and training strategies for domain-adaptive semantic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9924-9935, 2022. 2, 6" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 264, + 547, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 264, + 547, + 319 + ], + "spans": [ + { + "bbox": [ + 308, + 264, + 547, + 319 + ], + "type": "text", + "content": "[18] Jiaxing Huang, Dayan Guan, Aoran Xiao, and Shijian Lu. Model adaptation: Historical contrastive learning for unsupervised domain adaptation without source data. Advances in Neural Information Processing Systems, 34:3635-3649, 2021. 2, 5" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 320, + 547, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 320, + 547, + 365 + ], + "spans": [ + { + "bbox": [ + 308, + 320, + 547, + 365 + ], + "type": "text", + "content": "[19] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. arXiv preprint arXiv:2304.02643, 2023. 1" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 365, + 547, + 410 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 365, + 547, + 410 + ], + "spans": [ + { + "bbox": [ + 308, + 365, + 547, + 410 + ], + "type": "text", + "content": "[20] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. arXiv preprint arXiv:2304.02643, 2023. 1" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 411, + 547, + 465 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 411, + 547, + 465 + ], + "spans": [ + { + "bbox": [ + 308, + 411, + 547, + 465 + ], + "type": "text", + "content": "[21] Jogendra Nath Kundu, Akshay Kulkarni, Amit Singh, Varun Jampani, and R Venkatesh Babu. Generalize then adapt: Source-free domain adaptive semantic segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7046-7056, 2021. 2, 5" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 468, + 547, + 511 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 468, + 547, + 511 + ], + "spans": [ + { + "bbox": [ + 308, + 468, + 547, + 511 + ], + "type": "text", + "content": "[22] Yunsheng Li, Lu Yuan, and Nuno Vasconcelos. Bidirectional learning for domain adaptation of semantic segmentation. 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 6929-6938, 2019. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 513, + 547, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 513, + 547, + 567 + ], + "spans": [ + { + "bbox": [ + 308, + 513, + 547, + 567 + ], + "type": "text", + "content": "[23] Yuyan Li, Yuliang Guo, Zhixin Yan, Xinyu Huang, Ye Duan, and Liu Ren. Omnifusion: 360 monocular depth estimation via geometry-aware fusion. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2801-2810, 2022. 4" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 568, + 547, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 568, + 547, + 612 + ], + "spans": [ + { + "bbox": [ + 308, + 568, + 547, + 612 + ], + "type": "text", + "content": "[24] Mengyi Liu, Shuhui Wang, Yulan Guo, Yuan He, and Hui Xue. Pano-sfmlearner: Self-supervised multi-task learning of depth and semantics in panoramic videos. IEEE Signal Processing Letters, 28:832-836, 2021. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 613, + 547, + 657 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 613, + 547, + 657 + ], + "spans": [ + { + "bbox": [ + 308, + 613, + 547, + 657 + ], + "type": "text", + "content": "[25] Yang Liu, Wei Zhang, and Jun Wang. Source-free domain adaptation for semantic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1215-1224, 2021. 2, 3, 5, 6, 7" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 658, + 547, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 658, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 308, + 658, + 547, + 713 + ], + "type": "text", + "content": "[26] Yawei Luo, Liang Zheng, Tao Guan, Junqing Yu, and Yi Yang. Taking a closer look at domain shift: Category-level adversaries for semantics consistent domain adaptation. 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2502-2511, 2019. 2" + } + ] + } + ], + "index": 27 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "27893" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 712 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 288, + 139 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 288, + 139 + ], + "type": "text", + "content": "[27] Chaoxiang Ma, Jiaming Zhang, Kailun Yang, Alina Roitberg, and Rainer Stiefelhagen. Densepass: Dense panoramic semantic segmentation via unsupervised domain adaptation with attention-augmented context exchange. In 2021 IEEE International Intelligent Transportation Systems Conference (ITSC), pages 2766-2772. IEEE, 2021. 6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 140, + 288, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 140, + 288, + 184 + ], + "spans": [ + { + "bbox": [ + 48, + 140, + 288, + 184 + ], + "type": "text", + "content": "[28] Luke Melas-Kyriazi and Arjun K. Manrai. Pixmatch: Unsupervised domain adaptation via pixelwise consistency training. 2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 12430-12440, 2021. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 186, + 288, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 186, + 288, + 239 + ], + "spans": [ + { + "bbox": [ + 48, + 186, + 288, + 239 + ], + "type": "text", + "content": "[29] Zak Murez, Soheil Kolouri, David J. Kriegman, Ravi Ramamoorthi, and Kyungnam Kim. Image to image translation for domain adaptation. 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4500-4509, 2018. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 241, + 288, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 241, + 288, + 297 + ], + "spans": [ + { + "bbox": [ + 48, + 241, + 288, + 297 + ], + "type": "text", + "content": "[30] Fei Pan, Inkyu Shin, Francois Rameau, Seokju Lee, and In So Kweon. Unsupervised intra-domain adaptation for semantic segmentation through self-supervision. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3764-3773, 2020. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 298, + 288, + 353 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 298, + 288, + 353 + ], + "spans": [ + { + "bbox": [ + 48, + 298, + 288, + 353 + ], + "type": "text", + "content": "[31] Swami Sankaranarayanan, Yogesh Balaji, Arpit Jain, Ser-Nam Lim, and Rama Chellappa. Learning from synthetic data: Addressing domain shift for semantic segmentation. 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3752-3761, 2018. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 354, + 288, + 408 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 354, + 288, + 408 + ], + "spans": [ + { + "bbox": [ + 48, + 354, + 288, + 408 + ], + "type": "text", + "content": "[32] Weifa Shen, Qixiong Wang, Hongxiang Jiang, Sen Li, and Jihao Yin. Unsupervised domain adaptation for semantic segmentation via self-supervision. In 2021 IEEE International Geoscience and Remote Sensing Symposium IGARSS, pages 2747-2750. IEEE, 2021. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 410, + 288, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 410, + 288, + 453 + ], + "spans": [ + { + "bbox": [ + 48, + 410, + 288, + 453 + ], + "type": "text", + "content": "[33] Serban Stan and Mohammad Rostami. Unsupervised model adaptation for continual semantic segmentation. In Proceedings of the AAAI conference on artificial intelligence, pages 2593-2601, 2021. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 456, + 288, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 456, + 288, + 510 + ], + "spans": [ + { + "bbox": [ + 48, + 456, + 288, + 510 + ], + "type": "text", + "content": "[34] Yi-Hsuan Tsai, Wei-Chih Hung, Samuel Schulter, Kihyuk Sohn, Ming-Hsuan Yang, and Manmohan Chandraker. Learning to adapt structured output space for semantic segmentation. 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7472-7481, 2018. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 512, + 288, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 512, + 288, + 544 + ], + "spans": [ + { + "bbox": [ + 48, + 512, + 288, + 544 + ], + "type": "text", + "content": "[35] Laurens Van der Maaten and Geoffrey Hinton. Visualizing data using t-sne. Journal of machine learning research, 9(11), 2008. 7" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 545, + 288, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 545, + 288, + 601 + ], + "spans": [ + { + "bbox": [ + 48, + 545, + 288, + 601 + ], + "type": "text", + "content": "[36] Tuan-Hung Vu, Himalaya Jain, Maxime Bucher, Matthieu Cord, and Patrick Pérez. Advent: Adversarial entropy minimization for domain adaptation in semantic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2517-2526, 2019. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 602, + 288, + 657 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 602, + 288, + 657 + ], + "spans": [ + { + "bbox": [ + 48, + 602, + 288, + 657 + ], + "type": "text", + "content": "[37] Tuan-Hung Vu, Himalaya Jain, Maxime Bucher, Matthieu Cord, and Patrick Pérez. Dada: Depth-aware domain adaptation in semantic segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7364-7373, 2019. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 658, + 288, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 658, + 288, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 658, + 288, + 712 + ], + "type": "text", + "content": "[38] Qin Wang, Dengxin Dai, Lukas Hoyer, Olga Fink, and Luc Van Gool. Domain adaptive semantic segmentation with self-supervised depth estimation. 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pages 8495-8505, 2021. 2" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 714 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 307, + 73, + 547, + 137 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 547, + 137 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 547, + 137 + ], + "type": "text", + "content": "[39] Wenhai Wang, Enze Xie, Xiang Li, Deng-Ping Fan, Kaitao Song, Ding Liang, Tong Lu, Ping Luo, and Ling Shao. Pyramid vision transformer: A versatile backbone for dense prediction without convolutions. In Proceedings of the IEEE/CVF international conference on computer vision, pages 568-578, 2021. 5" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 139, + 547, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 139, + 547, + 182 + ], + "spans": [ + { + "bbox": [ + 307, + 139, + 547, + 182 + ], + "type": "text", + "content": "[40] Haoyu Xie, Chong Fu, Xu Zheng, Yu Zheng, Chiu-Wing Sham, and Xingwei Wang. Adversarial co-training for semantic segmentation over medical images. Computers in biology and medicine, 157:106736, 2023. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 184, + 547, + 238 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 184, + 547, + 238 + ], + "spans": [ + { + "bbox": [ + 307, + 184, + 547, + 238 + ], + "type": "text", + "content": "[41] Cheng-Yu Yang, Yuan-Jhe Kuo, and Chiou-Ting Hsu. Source free domain adaptation for semantic segmentation via distribution transfer and adaptive class-balanced self-training. In 2022 IEEE International Conference on Multimedia and Expo (ICME), pages 1-6. IEEE, 2022. 2, 3, 5, 6, 7" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 239, + 547, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 239, + 547, + 282 + ], + "spans": [ + { + "bbox": [ + 307, + 239, + 547, + 282 + ], + "type": "text", + "content": "[42] Kailun Yang, Xinxin Hu, Luis M Bergasa, Eduardo Romero, and Kaiwei Wang. Pass: Panoramic annular semantic segmentation. IEEE Transactions on Intelligent Transportation Systems, 21(10):4171-4185, 2019. 1" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 283, + 547, + 327 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 283, + 547, + 327 + ], + "spans": [ + { + "bbox": [ + 307, + 283, + 547, + 327 + ], + "type": "text", + "content": "[43] Kailun Yang, Xinxin Hu, Yicheng Fang, Kaiwei Wang, and Rainer Stiefelhagen. Omnisupervised omnidirectional semantic segmentation. IEEE Transactions on Intelligent Transportation Systems, 2020. 1, 7" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 327, + 547, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 327, + 547, + 380 + ], + "spans": [ + { + "bbox": [ + 307, + 327, + 547, + 380 + ], + "type": "text", + "content": "[44] Mucong Ye, Jing Zhang, Jinpeng Ouyang, and Ding Yuan. Source data-free unsupervised domain adaptation for semantic segmentation. In Proceedings of the 29th ACM International Conference on Multimedia, pages 2233-2242, 2021. 2, 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 383, + 547, + 436 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 383, + 547, + 436 + ], + "spans": [ + { + "bbox": [ + 307, + 383, + 547, + 436 + ], + "type": "text", + "content": "[45] Hao-Wei Yeh, Baoyao Yang, Pong C Yuen, and Tatsuya Harada. Sofa: Source-data-free feature alignment for unsupervised domain adaptation. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 474–483, 2021. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 437, + 547, + 503 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 437, + 547, + 503 + ], + "spans": [ + { + "bbox": [ + 307, + 437, + 547, + 503 + ], + "type": "text", + "content": "[46] Xiangyu Yue, Zangwei Zheng, Shanghang Zhang, Yang Gao, Trevor Darrell, Kurt Keutzer, and Alberto Sangiovanni Vincentelli. Prototypical cross-domain self-supervised learning for few-shot unsupervised domain adaptation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13834-13844, 2021. 1, 7" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 504, + 547, + 568 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 504, + 547, + 568 + ], + "spans": [ + { + "bbox": [ + 307, + 504, + 547, + 568 + ], + "type": "text", + "content": "[47] Cheng Zhang, Zhaopeng Cui, Cai Chen, Shuaicheng Liu, Bing Zeng, Hujun Bao, and Yinda Zhang. Deeppanoocontext: Panoramic 3d scene understanding with holistic scene context graph and relation-based optimization. 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pages 12612-12621, 2021. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 570, + 547, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 570, + 547, + 624 + ], + "spans": [ + { + "bbox": [ + 307, + 570, + 547, + 624 + ], + "type": "text", + "content": "[48] Jiaming Zhang, Chaoxiang Ma, Kailun Yang, Alina Roitberg, Kunyu Peng, and Rainer Stiefelhagen. Transfer beyond the field of view: Dense panoramic semantic segmentation via unsupervised domain adaptation. IEEE Transactions on Intelligent Transportation Systems, 2021. 1, 7" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 625, + 547, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 625, + 547, + 690 + ], + "spans": [ + { + "bbox": [ + 307, + 625, + 547, + 690 + ], + "type": "text", + "content": "[49] Jiaming Zhang, Kailun Yang, Chaoxiang Ma, Simon Reiβ, Kunyu Peng, and Rainer Stiefelhagen. Bending reality: Distortion-aware transformers for adapting to panoramic semantic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16917-16927, 2022. 1, 2, 3, 4, 6, 7, 8" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 691, + 547, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 691, + 547, + 714 + ], + "spans": [ + { + "bbox": [ + 307, + 691, + 547, + 714 + ], + "type": "text", + "content": "[50] Jiaming Zhang, Kailun Yang, Hao Shi, Simon Reiβ, Kunyu Peng, Chaoxiang Ma, Haodong Fu, Kaiwei Wang, and Rainer" + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "27894" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 663 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 67, + 72, + 287, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 287, + 116 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 287, + 116 + ], + "type": "text", + "content": "Stiefelhagen. Behind every domain there is a shift: Adapting distortion-aware vision transformers for panoramic semantic segmentation. arXiv preprint arXiv:2207.11860, 2022. 1, 2, 4, 6, 7, 8" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 118, + 288, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 118, + 288, + 183 + ], + "spans": [ + { + "bbox": [ + 48, + 118, + 288, + 183 + ], + "type": "text", + "content": "[51] Pan Zhang, Bo Zhang, Ting Zhang, Dong Chen, Yong Wang, and Fang Wen. Prototypical pseudo label denoising and target structure learning for domain adaptive semantic segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 12414-12424, 2021. 2, 5" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 185, + 288, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 185, + 288, + 228 + ], + "spans": [ + { + "bbox": [ + 48, + 185, + 288, + 228 + ], + "type": "text", + "content": "[52] Qiming Zhang, Jing Zhang, Wei Liu, and Dacheng Tao. Category anchor-guided unsupervised domain adaptation for semantic segmentation. Advances in neural information processing systems, 32, 2019. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 230, + 288, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 230, + 288, + 274 + ], + "spans": [ + { + "bbox": [ + 48, + 230, + 288, + 274 + ], + "type": "text", + "content": "[53] Yang Zhang, Philip David, and Boqing Gong. Curriculum domain adaptation for semantic segmentation of urban scenes. 2017 IEEE International Conference on Computer Vision (ICCV), pages 2039-2049, 2017. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 275, + 288, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 275, + 288, + 319 + ], + "spans": [ + { + "bbox": [ + 48, + 275, + 288, + 319 + ], + "type": "text", + "content": "[54] Yuyang Zhao, Zhun Zhong, Zhiming Luo, Gim Hee Lee, and Nicu Sebe. Source-free open compound domain adaptation in semantic segmentation. IEEE Transactions on Circuits and Systems for Video Technology, 32(10):7019-7032, 2022. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 319, + 288, + 363 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 319, + 288, + 363 + ], + "spans": [ + { + "bbox": [ + 48, + 319, + 288, + 363 + ], + "type": "text", + "content": "[55] Xu Zheng, Chong Fu, Haoyu Xie, Jialei Chen, Xingwei Wang, and Chiu-Wing Sham. Uncertainty-aware deep co-training for semi-supervised medical image segmentation. Computers in Biology and Medicine, 149:106051, 2022. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 365, + 288, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 365, + 288, + 407 + ], + "spans": [ + { + "bbox": [ + 48, + 365, + 288, + 407 + ], + "type": "text", + "content": "[56] Xu Zheng, Yunhao Luo, Hao Wang, Chong Fu, and Lin Wang. Transformer-cnn cohort: Semi-supervised semantic segmentation by the best of both students. arXiv preprint arXiv:2209.02178, 2022." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 409, + 288, + 442 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 409, + 288, + 442 + ], + "spans": [ + { + "bbox": [ + 48, + 409, + 288, + 442 + ], + "type": "text", + "content": "[57] Xu Zheng, Yunhao Luo, Pengyuan Zhou, and Lin Wang. Distilling efficient vision transformers from cnns for semantic segmentation. arXiv preprint arXiv:2310.07265, 2023. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 443, + 288, + 497 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 443, + 288, + 497 + ], + "spans": [ + { + "bbox": [ + 48, + 443, + 288, + 497 + ], + "type": "text", + "content": "[58] Xu Zheng, Tianbo Pan, Yunhao Luo, and Lin Wang. Look at the neighbor: Distortion-aware unsupervised domain adaptation for panoramic semantic segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 18687-18698, 2023. 2, 6" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 498, + 288, + 552 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 498, + 288, + 552 + ], + "spans": [ + { + "bbox": [ + 48, + 498, + 288, + 552 + ], + "type": "text", + "content": "[59] Xu Zheng, Jinjing Zhu, Yexin Liu, Zidong Cao, Chong Fu, and Lin Wang. Both style and distortion matter: Dual-path unsupervised domain adaptation for panoramic semantic segmentation. arXiv preprint arXiv:2303.14360, 2023. 1, 2, 4, 6, 8" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 554, + 288, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 554, + 288, + 609 + ], + "spans": [ + { + "bbox": [ + 48, + 554, + 288, + 609 + ], + "type": "text", + "content": "[60] Jinjing Zhu, Yunhao Luo, Xu Zheng, Hao Wang, and Lin Wang. A good student is cooperative and reliable: Cnn-transformer collaborative learning for semantic segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11720-11730, 2023. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 610, + 288, + 663 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 610, + 288, + 663 + ], + "spans": [ + { + "bbox": [ + 48, + 610, + 288, + 663 + ], + "type": "text", + "content": "[61] Yang Zou, Zhiding Yu, BVK Kumar, and Jinsong Wang. Unsupervised domain adaptation for semantic segmentation via class-balanced self-training. In Proceedings of the European conference on computer vision (ECCV), pages 289-305, 2018. 2" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "27895" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/Semantics-aware Motion Retargeting with Vision-Language Models/94f0a721-55ce-4d79-a7e3-1129e082a51d_content_list.json b/2024/Semantics-aware Motion Retargeting with Vision-Language Models/94f0a721-55ce-4d79-a7e3-1129e082a51d_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..26dedc6f1b228d8c969af7db853b7236c91ec756 --- /dev/null +++ b/2024/Semantics-aware Motion Retargeting with Vision-Language Models/94f0a721-55ce-4d79-a7e3-1129e082a51d_content_list.json @@ -0,0 +1,1550 @@ +[ + { + "type": "text", + "text": "Semantics-aware Motion Retargeting with Vision-Language Models", + "text_level": 1, + "bbox": [ + 142, + 130, + 826, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Haodong Zhang $^{1*}$ Zhike Chen $^{1*}$ Haocheng Xu $^{1}$ Lei Hao $^{2}$ Xiaofei Wu $^{2}$ Songcen Xu $^{2}$ Zhensong Zhang $^{2}$ Yue Wang $^{1}$ Rong Xiong $^{1\\dagger}$ Zhejiang University ${}^{2}$ Huawei Noah's Ark Lab", + "bbox": [ + 176, + 179, + 792, + 234 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 268, + 313, + 284 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Capturing and preserving motion semantics is essential to motion retargeting between animation characters. However, most of the previous works neglect the semantic information or rely on human-designed joint-level representations. Here, we present a novel Semantics-aware Motion reTargeting (SMT) method with the advantage of vision-language models to extract and maintain meaningful motion semantics. We utilize a differentiable module to render 3D motions. Then the high-level motion semantics are incorporated into the motion retargeting process by feeding the vision-language model with the rendered images and aligning the extracted semantic embeddings. To ensure the preservation of fine-grained motion details and high-level semantics, we adopt a two-stage pipeline consisting of skeleton-aware pre-training and fine-tuning with semantics and geometry constraints. Experimental results show the effectiveness of the proposed method in producing high-quality motion retargeting results while accurately preserving motion semantics. Project page can be found at https://sites.google.com/view/smtnet.", + "bbox": [ + 75, + 300, + 473, + 603 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 630, + 209, + 645 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "3D animation characters have extensive application in animation production, virtual reality, and various other domains. These characters are animated using motion data, resulting in lifelike and immersive animations. Nevertheless, acquiring motion data for each character can be a costly endeavor. Therefore, the ability to retarget existing motion data for new characters holds immense importance. The goal of motion retargeting is to transfer existing motion data to new characters following motion feature extraction and integration processes, which ensure the preservation of the original motion's characteristics.", + "bbox": [ + 75, + 655, + 468, + 820 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Semantics encompasses the meaningful and contextually relevant information conveyed in motion and plays a critical role in ensuring the realism and vividness of the anima", + "bbox": [ + 75, + 821, + 468, + 867 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/bd9e13161b93841733ced865f78598c1d6a977e14cdaba3f18310fb1838f555d.jpg", + "image_caption": [ + "Figure 1. Comparison with previous motion retargeting methods. (a) Previous works rely on human-designed joint distance matrix [25] or self-contacts between mesh vertices [23] to ensure semantics preservation. (b) Ours work enforces human-level motion semantics consistency with the extensive knowledge of vision-language models. (c) Comparison of motion quality and semantics preservation on the Mixamo dataset [1]. Our method achieves the best motion quality and semantics consistency." + ], + "image_footnote": [], + "bbox": [ + 500, + 267, + 700, + 430 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/e549d6320b67a13aa9b070cb0d500c4433aa93e45dd2ca4d66e68f52b2a81134.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 704, + 268, + 890, + 430 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "tion characters. Preservation of motion semantics can enhance the efficiency of motion retargeting by reducing the need for time-consuming manual adjustments and refinements. However, previous methods [2, 15, 22] are mainly based on retargeting of joint positions and make less use of the extraction of semantic information. They focus on trajectory-level motion retargeting with few attention to motion semantics. Consequently, this leads to a significant loss of motion semantics and necessitates the labor-intensive intervention of animation artists for manual trajectory adjustments. Recent advancements have introduced self-contacts [23] and joint distance matrices [25] as the representation of motion semantics. Nevertheless, self-contacts are not applicable to non-contact semantics and require intricate vertex correspondence. The human-designed joint distance matrices primarily focus on joint relative relationships and still lack consideration of high-level semantic information.", + "bbox": [ + 496, + 579, + 892, + 835 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "To address the intricate task of capturing and preserving motion semantics, we introduce a new perspective: the most general and comprehensive form of motion semantics is human-level natural language, reflecting the user's intu", + "bbox": [ + 498, + 839, + 893, + 902 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*These authors contributed equally to this work", + "bbox": [ + 94, + 875, + 346, + 887 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "† Corresponding author: rxiong@zju.edu.cn", + "bbox": [ + 96, + 887, + 326, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "2155", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "itive understanding of motion. However, the main challenge of human-level motion semantics representation lies in the scarcity of labelled data. It is difficult and expensive to label sufficient semantic textual descriptions for motion data.", + "bbox": [ + 75, + 90, + 468, + 151 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this paper, we introduce the incorporation of robust, state-of-the-art vision-language models to provide semantic guidance to the motion retargeting network. In the absence of labelled semantic data, we leverage the capabilities of a vision-language model to serve as a semantic supervisor in an unsupervised manner, which can extract motion semantics in a more intuitive way, as illustrated in Fig. 1. This approach offers a solution to the challenge of the limited availability of labelled semantic datasets for motion retargeting. To establish a connection between the vision-language model and motion semantics extraction, we employ the differentiable skinning and rendering modules to translate 3D motions into image sequences. Subsequently, we adopt visual question answering with guiding questions to inquire about the most relevant motion semantics from the vision-language model.", + "bbox": [ + 75, + 152, + 470, + 393 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To guarantee the preservation of motion semantics during motion retargeting, we introduce a semantics consistency loss that enforces the semantic embeddings of the targeted motion to closely align with those of the source motion. For dense semantic supervision and computational efficiency, we utilize latent features extracted by the vision-language model as the semantic embeddings instead of textual descriptions. To alleviate the non-linearity of the semantics consistency loss, we introduce a two-stage training approach. We categorize motion information into two distinct levels: the skeletal level and the semantic level. Our approach involves pre-training the motion retargeting network at the skeletal level, which is then further refined and fine-tuned at the semantic level with the power of vision-language models. To the best of our knowledge, we are the first to leverage the extensive capability of vision-language models for the task of semantics-aware motion retargeting.", + "bbox": [ + 75, + 396, + 470, + 652 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To summarize, the contributions of our work include:", + "bbox": [ + 96, + 654, + 449, + 667 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We introduce an innovative framework that leverages the expertise of vision-language models as a semantic supervisor to tackle the challenge of limited labelled semantic data for the task of motion retargeting.", + "- We propose to use differentiable skinning and rendering to translate from the motion domain to the image domain and perform guiding visual question answering to obtain human-level semantic representation.", + "- We design a semantics consistency loss to maintain motion semantics and introduce an effective two-stage training pipeline consisting of pre-training at the skeletal level and fine-tuning at the semantic level.", + "- Our model achieves state-of-the-art performance in the challenging task of semantics-aware motion retargeting, delivering exceptional performance marked by high" + ], + "bbox": [ + 76, + 674, + 468, + 900 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "quality motion and superior semantics consistency.", + "bbox": [ + 513, + 90, + 849, + 106 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Works", + "text_level": 1, + "bbox": [ + 500, + 119, + 648, + 135 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Optimization-based Motion Retargeting. Motion retargeting is a technique to adapt existing motion data from a source character to a target character with different bone proportions, mesh skins, and skeletal structures. Early works formulate motion retargeting as a constrained optimization problem [4, 6, 11, 18]. Gleicher et al. [6] introduced a motion retargeting method, which identifies motion features as constraints and computes an adapted motion using a space-time constraint solver to preserve the desirable qualities. Lee et al. [11] proposed a method to adapt existing motion of a human-like character to have the desired features with specified constraints and combined a hierarchical curve fitting technique with inverse kinematics. Nonetheless, these methods necessitate the tedious and time-consuming process of formulating human-designed constraints for specific motion sequences.", + "bbox": [ + 496, + 143, + 890, + 386 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Learning-based Motion Retargeting. With the rise of deep learning, researchers have been developing learning-based motion retargeting methods in recent years [2, 9, 15, 22, 23, 25]. Villegas et al. [22] presented a recurrent neural network architecture, which incorporates a forward kinematics layer and cycle consistency loss for unsupervised motion retargetting. Aberman et al. [2] designed a skeleton-aware network with differentiable convolution, pooling, and unpooling operators to transform various homeomorphic skeletons into a primary skeleton for cross-structural motion retargeting. However, these methods tend to concentrate on trajectory-level motion retargeting with limited consideration for motion semantics, which often results in a notable loss of motion semantics and increase the heavy burden of manual adjustments to the trajectories. To address these problems, Zhang et al. [25] presented a residual retargeting network that uses a skeleton-aware module to preserve motion semantics and a shape-aware module to reduce interpenetration and contact missing. While this method successfully preserves joint relative relationships, it still falls short in addressing high-level motion semantics.", + "bbox": [ + 496, + 387, + 890, + 703 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Vision-Language Models. Vision-language models have empowered various vision-language tasks, including visual question answering and image captioning. Tevet et al. [20] introduced a human motion generation model that aligns the latent space with that of the Contrastive Language-Image Pre-training (CLIP) model. Li et al. [13] proposed a pretraining strategy from off-the-shelf frozen pre-trained image encoders and frozen large language models for vision-to-language generative learning. Zhu et al. [27] presented a vision-language model, which uses one projection layer to align a frozen visual encoder with a frozen advanced large language models (LLM). However, these efforts primarily concentrate on vision-language tasks, leaving the question", + "bbox": [ + 496, + 704, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2156", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/c14d3f4806670441419f84b705ce55d23b7c4d74c2f021432b6dd6694aa6c582.jpg", + "image_caption": [ + "Stage I: Skeleton-aware Pre-training" + ], + "image_footnote": [], + "bbox": [ + 101, + 89, + 869, + 268 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/69ec7727da1edbc17c8a750293f6acfb0d272504d6e5f0d1f8352703faec249d.jpg", + "image_caption": [ + "Figure 2. Model Architecture. Our semantics-aware motion retargeting framework employs a two-stage pipeline. Initially, the retargeting network consisting of multiple spatial-temporal graph convolution layers is trained at the skeletal level to establish a base model. Subsequently, this model undergoes further refinement and fine-tuning at the semantic level by the alignment of latent semantic embeddings of the source and target, leveraging the extensive knowledge of vision-language models. The latent semantic embedding is extracted by guiding visual question answering. Additionally, the geometry constraints are also enforced during fine-tuning to avoid interpenetration." + ], + "image_footnote": [], + "bbox": [ + 99, + 284, + 346, + 368 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/6bf8c1ef8f6296d6854894bb056aa820762d9293e2c39fd0d75a3383c649cc35.jpg", + "image_caption": [ + "Stage II: Semantics & Geometry Fine-tuning" + ], + "image_footnote": [], + "bbox": [ + 354, + 284, + 870, + 369 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "of how to effectively employ vision-language models to guide motion retargeting as an open and unexplored area.", + "bbox": [ + 75, + 468, + 468, + 498 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Human motion synthesis. Human motion synthesis is a domain related to motion retargeting, which aims to synthesize realistic and lifelike human motions from random noise or other inputs with generative networks. Guo et al. [7] proposed to generate human motion sequences based on action type. Guo et al. [8] presented a temporal variational autoencoder to synthesize human motions from text input. Tevet et al. [21] introduced a diffusion-based generative model for human motion generation. As comparison, we focus on the task of motion retargeting, where existing motion data is transferred from a source character to a target character.", + "bbox": [ + 75, + 500, + 468, + 667 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 76, + 683, + 166, + 698 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Overview", + "text_level": 1, + "bbox": [ + 76, + 708, + 187, + 723 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We present a novel semantic-aware motion retargeting method, as illustrated in Fig 2. In contrast to previous methods that neglect motion semantics [2, 15, 22] or rely on human-designed joint-level representations [25], our approach integrates natural language descriptions from vision-language models to offer an explicit and comprehensive semantic representation of character motions, thereby maintaining the preservation of semantic consistency.", + "bbox": [ + 75, + 733, + 468, + 853 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Task definition. Given a source motion sequence, consisting of the skeleton motion and its associated skinning geometry, as well as a target character in the reference pose (e.g.,", + "bbox": [ + 75, + 854, + 468, + 900 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "T-posed), the objective of motion retargeting is to generate the target motion while preserving crucial motion characteristics, such as joint trajectory similarity and motion semantics, and satisfying geometry constraints.", + "bbox": [ + 496, + 468, + 890, + 529 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Graph representation. The skeleton motion sequence can be modelled as a sequence of graphs according to the skeleton hierarchy where each node corresponds to a joint and each edge represents a directed connection between joints. Assume that the motion sequence has $T$ frames in total and the animation characters have $N$ nodes and $M$ edges. In our approach, we consider motion data as node features $\\mathbf{Q} \\in \\mathbb{R}^{T \\times N \\times 9}$ , which encompasses the 6D joint rotation representation [26] and 3D joint positions. Additionally, we utilize skeleton hierarchy information as edge features $\\mathbf{E} \\in \\mathbb{R}^{M \\times 3}$ , which consists of the 3D position offset between each joint and its parent joint.", + "bbox": [ + 496, + 532, + 892, + 715 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Two-stage training. The motion of animation characters can be divided into skeletal movements and skinned movements, represented by skeletal joints and skinned vertices respectively. The skinned movements can be derived from the skeletal movements through the linear blend skinning algorithm [12]. Therefore, motion retargeting at the skeletal level can effectively downscale the data and reduce the complexity of the problem. However, this simplification process can lead to the loss of motion semantics and violations of geometry constraints. To address these issues, we employ a two-stage pipeline. Initially, we pre-train a skeleton-aware network to ensure a general initialization for motion retard", + "bbox": [ + 496, + 719, + 892, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "2157", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "getting without considering motion semantics and geometry constraints. Subsequently, we fine-tune the pre-trained network for each source-target character pair with the vision-language model to maintain semantic consistency and enforce geometry constraints to prevent interpenetrations.", + "bbox": [ + 75, + 90, + 468, + 167 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. Skeleton-aware Pre-training", + "text_level": 1, + "bbox": [ + 76, + 174, + 331, + 191 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Retargeting network. We propose a retargeting network consisting of a graph motion encoder and a graph motion decoder for motion retargeting. The motion encoder $\\mathcal{F}_{\\theta}$ encodes the motion data $\\mathbf{Q}_A$ of the source character A into the latent motion embedding $\\mathbf{Z}_A$ . Then, the motion decoder $\\mathcal{F}_{\\phi}$ generates the joint angles $\\mathbf{Q}_B$ of the target character B based on the latent features. Both the motion encoder and decoder are composed of multiple graph convolutions. More details are available in the supplementary materials.", + "bbox": [ + 75, + 198, + 468, + 333 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {Z} _ {A} = \\mathcal {F} _ {\\theta} (\\mathbf {Q} _ {A}, \\mathbf {E} _ {A})\n$$\n", + "text_format": "latex", + "bbox": [ + 207, + 345, + 339, + 362 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {Q} _ {B} = \\mathcal {F} _ {\\phi} (\\mathbf {Z} _ {A}, \\mathbf {E} _ {B}) \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 207, + 362, + 468, + 380 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In the first phase, we train the motion encoder and decoder at the skeletal level to establish a robust initialization for motion retargeting. Following the unsupervised learning setting in [22], we train the network with the reconstruction loss, cycle consistency loss, adversarial loss, and joint relationship loss. The overall objective function for skeleton-aware pre-training is defined as follows:", + "bbox": [ + 75, + 383, + 468, + 489 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {s k e l} = \\lambda_ {r} \\mathcal {L} _ {r e c} + \\lambda_ {c} \\mathcal {L} _ {c y c} + \\lambda_ {a} \\mathcal {L} _ {a d v} + \\lambda_ {j} \\mathcal {L} _ {j d m} \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 102, + 513, + 468, + 530 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The reconstruction loss $\\mathcal{L}_{rec}$ encourages the retargeted motion to match the source motion when the target character is the same as the source character. Let $\\mathbf{Q}_{A,t}$ be the motion data of source character A at frame $t$ , and $\\hat{\\mathbf{Q}}_{A,t}^{rec}$ be the reconstructed motion. Then $\\mathcal{L}_{rec}$ is defined as:", + "bbox": [ + 75, + 537, + 468, + 614 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {r e c} = \\sum_ {t} \\left| \\left| \\hat {\\mathbf {Q}} _ {A, t} ^ {r e c} - \\mathbf {Q} _ {A, t} \\right| \\right| _ {2} ^ {2} \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 171, + 621, + 468, + 656 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The cycle consistency loss $\\mathcal{L}_{cyc}$ promotes the consistency of retargeted motion from the source character A to the target character B and then back to the source character A, ensuring it remains in line with the original motion. Let $\\hat{\\mathbf{Q}}_{A,t}^{cyc}$ represent the retargeted motion. Then $\\mathcal{L}_{cyc}$ is defined as:", + "bbox": [ + 75, + 664, + 468, + 742 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {c y c} = \\sum_ {t} \\left| \\left| \\hat {\\mathbf {Q}} _ {A, t} ^ {c y c} - \\mathbf {Q} _ {A, t} \\right| \\right| _ {2} ^ {2} \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 171, + 750, + 468, + 785 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The adversarial loss $\\mathcal{L}_{adv}$ is calculated by a discriminator network, which utilizes the unpaired data of the target character to learn how to distinguish whether the motions are real or fake. Let $\\mathcal{F}_{\\gamma}$ be the discriminator network, and $\\mathbf{Q}_{B,t}$ be the retargeted motion at frame $t$ . Then it is defined as:", + "bbox": [ + 75, + 789, + 468, + 864 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {a d v} = \\sum_ {t} \\log \\left(1 - \\mathcal {F} _ {\\gamma} \\left(\\mathbf {Q} _ {B, t}\\right)\\right) \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 161, + 872, + 468, + 902 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The joint relationship loss $\\mathcal{L}_{jdm}$ is calculated by the joint distance matrix (JDM) $\\mathbf{D} \\in \\mathbb{R}^{N \\times N}$ , which represents the relative positional relationships of the joints. The element $d_{i,j}$ of $\\mathbf{D}$ represents the Euclidean distance between joint $i$ and joint $j$ . We extract the joint distance matrix from the target character and compare it with the source character. Then $\\mathcal{L}_{jdm}$ is defined as:", + "bbox": [ + 498, + 90, + 890, + 196 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {j d m} = \\sum_ {t} \\left| \\left| \\eta (\\mathbf {D} _ {A, t}) - \\eta (\\mathbf {D} _ {B, t}) \\right| \\right| _ {2} ^ {2} \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 571, + 208, + 890, + 238 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\eta(.)$ is an L1 normalization performed on each row of the distance matrix. This normalization operation eliminates the difference in bone length to some extent.", + "bbox": [ + 498, + 242, + 890, + 287 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3. Semantics & Geometry Fine-tuning", + "text_level": 1, + "bbox": [ + 500, + 295, + 808, + 311 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In the second phase, we fine-tune the pre-trained retargeting network for each source-target character pair to preserve motion semantics and satisfy geometry constraints. The motion semantics is maintained by the semantics consistency loss, which aligns the semantic embeddings extracted from a vision-language model for both the source and target. Additionally, the geometry constraint is satisfied by minimizing the interpenetration loss. The overall objective function for fine-tuning is outlined as follows:", + "bbox": [ + 498, + 319, + 890, + 455 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {f i n e}} = \\lambda_ {s} \\mathcal {L} _ {\\text {s e m}} + \\lambda_ {p} \\mathcal {L} _ {\\text {p e n}} \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 599, + 469, + 890, + 486 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Differentiable skinning & rendering. To make the finetuning process differentiable for gradient back-propagation, we first use the differentiable linear blend skinning algorithm [12], denoted as $\\mathcal{F}_{lbs}$ , to transform the target joint angles $\\mathbf{Q}_B$ into skinned motions $\\mathbf{V}_B$ , represented by 3D mesh vertices. Subsequently, we employ the differentiable projection function $\\mathcal{F}_{proj}$ as introduced in [16] to convert the skinned motions into 2D images $\\mathbf{I}_B$ . A limitation for the differentiable rendering process is that when projecting the 3D skinned mesh onto 2D images, the depth information is lost. To obtain a comprehensive semantic representation of the motion, we render the character from multiple perspectives and then combine the extracted features, following the Non-rigid Shape Fitting task in [16].", + "bbox": [ + 498, + 488, + 890, + 700 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {I} _ {A} = \\mathcal {F} _ {p r o j} \\left(\\mathcal {F} _ {l b s} \\left(\\mathbf {Q} _ {A}\\right)\\right)\n$$\n", + "text_format": "latex", + "bbox": [ + 617, + 712, + 774, + 728 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {I} _ {B} = \\mathcal {F} _ {p r o j} \\left(\\mathcal {F} _ {l b s} (\\mathbf {Q} _ {B})\\right)\n$$\n", + "text_format": "latex", + "bbox": [ + 617, + 727, + 774, + 747 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "", + "bbox": [ + 890, + 723, + 893, + 734 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Frozen vision-language model. To obtain an explicit and reliable semantic feature of the motion, we employ a frozen vision-language model as our semantic supervisor. Current 3D vision-language datasets [3, 28] mainly focus on the occupation or the segmentation of the object in a spatial scene like rooms, and thus the state-of-the-art 3D vision-language models [28] lack prior knowledge relevant to animation characters. In contrast, 2D vision-language models achieve better results in semantic tasks, such as image captioning, visual question answering and image-text", + "bbox": [ + 498, + 750, + 890, + 901 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "2158", + "bbox": [ + 480, + 944, + 514, + 955 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/d2c109697a3715ea97365be98876fb3fa3c49e189dc3a65adc55a802494945f6.jpg", + "image_caption": [ + "Figure 3. An example of guiding visual question answering." + ], + "image_footnote": [], + "bbox": [ + 80, + 89, + 472, + 251 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "retrieval, and provides cleaner and richer semantics [24]. Therefore, we utilize a frozen 2D vision-language model to extract latent embeddings of motion semantics. The frozen 2D vision-language model employed in our work is BLIP-2 [14], which incorporates a lightweight querying transformer as a bridge between the off-the-shelf frozen pre-trained image encoder and the frozen large language model.", + "bbox": [ + 75, + 294, + 468, + 400 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Prompt design. Since the vision-language model has the capability to extract rich information from images, it is possible that the extracted features might contain redundant details, such as the appearance of the character. To guide the vision-language model to obtain semantic embedding relevant to character motions, we adopt a guiding visual question answering approach for motion semantics extraction, as depicted in Fig. 3. We believe that there is a strong correlation between motion semantics and hand movements. To acquire a more comprehensive description of the motion, we initially provide a guiding question to BLIP-2: \"Where are the hands of the character?\" Subsequently, we introduce a new question and combine it with the first answer as the input to BLIP-2: \"[The answers to the first question generated by the vision-language model] What is the character in the image doing?\" For more details, please refer to the supplementary materials.", + "bbox": [ + 75, + 400, + 468, + 657 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Latent semantic embedding. We opt to align the latent semantic embeddings of the source and target generated by the vision-language model rather than relying on textual descriptions, specifically leveraging the encoder output of the large language model. This approach enables us to acquire a more accurate and denser representation, while also mitigating computational costs and the non-linearity of the training objective caused by the large number of parameters of the vision-language model. Let $\\mathbf{E}_A$ and $\\mathbf{E}_B$ be the latent semantic embeddings of the source and target motions, $\\mathcal{F}_{\\omega}$ be the frozen pre-trained image encoder, $\\mathcal{F}_{\\sigma}$ be the frozen querying transformer, $\\mathcal{F}_{\\psi}$ be the encoder of the frozen large language model, and context be the question.", + "bbox": [ + 75, + 657, + 470, + 854 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\begin{array}{l} \\mathbf {E} _ {A} = \\mathcal {F} _ {\\psi} \\left(\\mathcal {F} _ {\\sigma} \\left(\\mathcal {F} _ {\\omega} (\\mathbf {I} _ {A}), \\text {c o n t e x t}\\right)\\right) \\\\ \\overline {{\\mathbf {E}}} = \\overline {{\\mathbf {F}}} \\left(\\overline {{\\mathbf {F}}} \\left(\\overline {{\\mathbf {F}}} (\\mathbf {I} _ {A}), \\text {c o n t e x t}\\right)\\right) \\end{array} \\tag {9} \\\\ \\mathbf {E} _ {B} = \\mathcal {F} _ {\\psi} (\\mathcal {F} _ {\\sigma} (\\mathcal {F} _ {\\omega} (\\mathbf {I} _ {B}), c o n t e x t)) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 156, + 867, + 468, + 902 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Fine-tuning with semantics consistency. As illustrated in Fig. 2, our approach aligns the latent semantic embeddings of both the source and target motions in an unsupervised manner, ensuring a high degree of semantic consistency in the retargeted results. The semantics consistency loss $\\mathcal{L}_{sem}$ is calculated using the mean square error and it is defined as follows:", + "bbox": [ + 498, + 90, + 893, + 196 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {s e m} = \\sum_ {t} \\| \\mathbf {E} _ {A, t} - \\mathbf {E} _ {B, t} \\| _ {2} ^ {2} \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 596, + 208, + 890, + 239 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Fine-tuning with geometry constraints. From our observations, most interpenetration problems occur between the limbs and the main body. To address this, we incorporate the signed distance field between the limb vertices and the body mesh as the interpenetration loss. First, we convert the skeleton motion output from the network into mesh vertices using the linear blend skinning method [12]. Then, the interpenetration loss is defined as follows:", + "bbox": [ + 498, + 244, + 890, + 367 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {p e n} = \\sum_ {t} R e L U (- \\Phi_ {b, t} (\\mathbf {V} _ {l, t})) \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 583, + 378, + 890, + 409 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\Phi_b$ indicates the signed distance field function, $\\mathbf{V}_l$ is the vertices of the limbs. If the vertex locates inside the body, the value of the function is less than zero. Therefore, we use the $ReLU$ function to penalize the inner vertices.", + "bbox": [ + 498, + 415, + 892, + 476 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 500, + 489, + 633, + 507 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Settings", + "text_level": 1, + "bbox": [ + 500, + 513, + 598, + 531 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Datasets. We train and evaluate our method on the Mixamo dataset [1], an extensive repository of animations performed by various 3D virtual characters with distinct skeletons and geometry shapes. The training set we use to pretrain our skeleton aware module is the same as that used in [2], which contains 1646 motions performed by 7 characters. It's important to note that the Mixamo dataset does not provide clean ground truth data, since many of the motion sequences suffer from interpenetration issues and semantic information loss. To mitigate this, we have carefully selected a subset of motion sequences that are both semantically clean and free of interpenetration issues for fine-tuning and testing. Our fine-tuning process involves retargeting 15 clean motions including 3127 frames, originally performed by 3 source characters, namely \"Y Bot\", \"X Bot\", and \"Ortiz\", onto 3 target characters, including \"Aj\", \"Kaya\", and \"Mousey\". Then we evaluate the performance of our model on the task of retargeting 30 additional motions that are previously unseen in the training set and fine-tuning sets. More details could be found in the supplementary materials.", + "bbox": [ + 496, + 537, + 890, + 840 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Implementation details. The hyper-parameters $\\lambda_r, \\lambda_c, \\lambda_a, \\lambda_j, \\lambda_p, \\lambda_s$ for pre-training and fine-tuning loss functions are set to 10.0, 1.0, 0.1, 1.0, 1.0, 0.1. For semantics fine-tuning, we use BLIP-2 [14] with pre-trained FlanT5-XXL", + "bbox": [ + 498, + 840, + 893, + 901 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "2159", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "[5] large language model. To extract the semantic representation of the motion, we render animation from three perspectives, including the front view, left view and right view. The fine-tuning process takes 25 epochs with 5 clean motion sequences of the source character for each target character. During pre-training and fine-tuning, we use an Adam optimizer to optimize the retargeting network. Please refer to the supplementary materials for more details.", + "bbox": [ + 75, + 90, + 467, + 210 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Evaluation metrics. We evaluate the performance of our method across three key dimensions: skeleton, geometry, and semantics. At the skeletal level, we measure the Mean Square Error (MSE) between retargeted joint positions and the ground truth provided by Mixamo, analyzing both the global and the local joint positions. At the geometric level, we evaluate the interpenetration percentage (PEN). At the semantic level, we utilize the Image-Text Matching (ITM) score, Fréchet inception distance (FID) and semantics consistency loss (SCL) as metrics. The ITM score quantifies the visual-semantic similarity between the source textual description and the rendered retargeted motion. FID is calculated between the semantic embedding distribution of retargeted motion and source motion. More details are provided in the supplementary materials.", + "bbox": [ + 75, + 212, + 470, + 441 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Comparison with State of the Arts", + "text_level": 1, + "bbox": [ + 76, + 452, + 375, + 468 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Quantitative. In this section, we conduct a comparative analysis of our method against the state-of-the-art approaches as illustrated in Tab. 1. The baseline methods include R2ET [25], SAN [2], NKN [22] and the Copy strategy. The Copy strategy achieves the lowest local MSE because the ground truth data in the Mixamo dataset are not entirely clean, and many of them are generated by copying rotations. As a result, this strategy comes at the cost of semantic loss and interpenetration issues. SAN [2] and NKN [22] focus on skeleton-level motion features, which results in a high interpenetration rate and relatively low semantics preservation. R2ET [25] treats motion semantics as the joint distance matrix and mesh distance field, which helps it obtain better motion semantics than SAN and Copy. Nevertheless, there is still a gap between the human-designed distance matrix and the human-level semantics. Notably, our model exhibits the best interpenetration rate and semantics preservation among all methods, showcasing the capability of the proposed method in producing high-quality retargeted motions with semantics consistency.", + "bbox": [ + 75, + 477, + 468, + 779 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Qualitative. In Fig. 4, we visualize the text descriptions of the motions and the qualitative comparison between the state-of-the-arts and our method. SAN [2] and Copy neglect the preservation of semantics and have severe interpenetration. R2ET [25] utilizes joint distance matrix as semantics representation and fails to capture high-level semantic information. For example, the salute motion retargeted by R2ET [25] appears more like a hand-up motion. As a comparison,", + "bbox": [ + 75, + 780, + 468, + 902 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/2b82f0a5a5983c28b764dadc23600be325d82a9c9e9245e1eafee3844373281c.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodMSE ↓MSElc ↓Pen.% ↓ITM ↑FID ↓SCL ↓
Source--4.430.796--
GT--9.060.58226.991.331
Copy-0.0059.030.58126.581.327
NKN [22]0.3260.2318.710.57527.791.414
SAN [2]0.4350.2559.740.56128.331.448
R2ET [25]0.4990.4967.620.6435.4690.405
Ours0.2840.2293.500.6800.4360.143
", + "bbox": [ + 498, + 88, + 890, + 207 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/3f37e06acad8ecddeb20ed992af0b49d22e67417caca59727deb5a925f0362a9.jpg", + "table_caption": [ + "Table 1. Quantitative comparison with the state-of-the-arts. $\\mathrm{MSE}^{lc}$ denotes the local MSE. ITM indicates the image-text matching score. FID is Fréchet inception distance of motion semantics. SCL is the semantics consistency loss." + ], + "table_footnote": [], + "table_body": "
MethodMSE ↓MSElc ↓Pen.% ↓ITM ↑FID ↓SCL ↓
SMTtws0.2480.1298.370.5867.7270.769
SMTtwf7.7987.0830.440.43256.5313.29
SMTtwa0.3350.2885.360.6582.8260.266
SMTfwp0.4390.3681.220.5977.2410.583
SMTfwi5.4184.5764.410.55278.4618.96
SMTfwq0.7390.5174.560.6682.4970.191
SMTOurs0.2840.2293.500.6800.4360.143
", + "bbox": [ + 500, + 273, + 890, + 390 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 2. Ablation study. $\\mathrm{SMT}_{tws}$ is the network trained with only skeleton-aware pre-training. $\\mathrm{SMT}_{twf}$ is the network trained with only semantics and geometry fine-tuning. $\\mathrm{SMT}_{twa}$ is the network trained in one stage. $\\mathrm{SMT}_{fwp}$ is the network fine-tuned with only the interpenetration loss. $\\mathrm{SMT}_{fwi}$ is the network fine-tuned with image features. $\\mathrm{SMT}_{fwq}$ is the network fine-tuned with the features of the querying transformer.", + "bbox": [ + 498, + 393, + 890, + 491 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "our method is able to successfully preserve high-level motion semantics leveraging the vision-language model. We observe that our approach reaches the best results among all methods, achieving more reliable semantics preservation and lower interpenetration rates. It suggests that with semantics and geometry fine-tuning, our method could effectively solve interpenetration issues together with semantics preservation.", + "bbox": [ + 496, + 501, + 890, + 623 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3. Ablation Studies", + "text_level": 1, + "bbox": [ + 500, + 633, + 663, + 648 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Skeleton-aware pre-training. The proposed method can be divided into two stage: pre-training and fine-tuning. To illustrate the importance of skeleton-aware pre-training, we evaluate the network trained with only the semantics consistency loss and the interpenetration loss in Tab. 2, denoted as $\\mathrm{SMT}_{twf}$ . The network trained without skeleton-aware pretraining performs worst in MSE and semantics preservation. A reasonable explanation is that the semantics consistency loss is highly non-linear, so it is important to pre-train the network at the skeletal level to provide better initial values. We also visualize qualitative results in Fig. 5.", + "bbox": [ + 496, + 657, + 890, + 824 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Semantics & geometry fine-tuning. We also conduct ablation study to illustrate the importance of semantics and geometry fine-tuning in Tab. 2. We first evaluate the performance of the skeleton-aware model without fine-tuning, denoted as $\\mathrm{SMT}_{tws}$ . Though it reaches the best global posi", + "bbox": [ + 496, + 824, + 890, + 902 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "2160", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/67d286d86772b680eeddbe7dbdd2a2bf855b305691d26c3b7e03dbdc18835f4f.jpg", + "image_caption": [ + "Figure 4. Qualitative comparison. The results demonstrate that our method can effectively preserve semantics while the baseline methods suffer from interpenetration or semantic information loss. From the first column to the last column are the source motion, the Copy strategy, NKN [22], SAN [2], R2ET [25], our method and text descriptions, respectively." + ], + "image_footnote": [], + "bbox": [ + 76, + 93, + 893, + 400 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/2cefa79331f00b4de3501751f6fd2d2bc1d78bc2156f2a7c10e76f96b04c4e6d.jpg", + "image_caption": [ + "Figure 5. The qualitative comparison of ablation study between the network without fine-tuning (TWS), the network trained with only semantics and geometry fine-tuning (TWF), the network trained with all loss functions (TWA), the network fine-tuned with only the interpenetration loss (FWP) and our full model (All)." + ], + "image_footnote": [], + "bbox": [ + 94, + 460, + 447, + 667 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "tion MSE, it suffers from interpenetration and semantic information loss because of the low-quality motion data provided by Mixamo. We next evaluate the network fine-tuned with only the interpenetration loss, denoted as $\\mathrm{SMT}_{fwp}$ . This version results in a significant boost in terms of penetration rate. However, the gradient of interpenetration loss is only relevant with the face normals of the geometry mesh without considering the semantic information conveyed in the motion. It indicates the importance of the semantic consistency loss that makes the network reach a better balance", + "bbox": [ + 75, + 750, + 472, + 900 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "between interpenetration and semantics. We also try to train the network with all loss functions in one stage, denoted as $\\mathrm{SMT}_{twa}$ . However, it is challenging for the model to acquire general knowledge of interpenetration and semantics that is suitable for every character with limited data. Therefore, training the model with skeleton-aware pre-training and fine-tuning it with semantics consistency and geometry constraints for each target character remains a more reasonable and data-efficient strategy.", + "bbox": [ + 496, + 463, + 890, + 599 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Latent semantic embedding. The vision-language model used for semantic extraction can be divided into three parts: the image encoder from CLIP [19], the querying transformer and the large language model. In Tab. 2, we compare the feature outputted by the image encoder, the querying transformer and the encoder of the large language model, denoted as $\\mathrm{SMT}_{fwi}$ , $\\mathrm{SMT}_{fwq}$ , and $\\mathrm{SMT}_{Ours}$ , respectively. The results show that the image feature performs worse since it is greatly affected by the appearance of the character. It indicates that with the help of the large language model, the semantic representation better focuses on the semantic meaning of the motion instead of the character's visual appearance. Therefore, the encoder output of the large language model is more suitable for semantic embedding. More details can be found in the supplementary materials.", + "bbox": [ + 496, + 606, + 892, + 833 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Prompt design. To validate the importance of guiding visual question answering, we compare the textual descriptions generated by visual question answering with and without guiding questions as well as image captioning. The re", + "bbox": [ + 496, + 839, + 890, + 902 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "2161", + "bbox": [ + 482, + 944, + 513, + 955 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/06dd6b0b0ab33fa1a55868a80afc687cc42688a78a6088cc89698e1acbf6ac3d.jpg", + "image_caption": [ + "Image Captioning", + "Visual Question Answering" + ], + "image_footnote": [ + "A 3d model of a boy wearing glasses and a hat.", + "Q: What is the character doing?", + "A: The character is praying." + ], + "bbox": [ + 80, + 89, + 184, + 170 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/89686dcf50707ed7e14930dbdac4e66d787a29b9ad437e6d61383caac35da9e3.jpg", + "image_caption": [ + "Image Captioning", + "Visual Question Answering", + "Figure 6. Text descriptions generated by different ways. The guiding visual question answering yields more comprehensive results." + ], + "image_footnote": [ + "A 3d model of a robot running on a cheeked floor.", + "Q: What is the character doing? \nA: The character is running on a checkered floor." + ], + "bbox": [ + 78, + 176, + 184, + 258 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/2ac2256d22a7e955f6a30ba90a569076e2396410284e0fcb87ca4c00023820b5.jpg", + "table_caption": [], + "table_footnote": [ + "Table 3. User study results. We collect 100 comparisons in three aspects. Our method gets highest scores in the overall quality as well as semantics preservation." + ], + "table_body": "
MethodQuality ↑Smoothness ↑Semantics ↑
Copy0.720.860.71
NKN [22]0.650.800.66
SAN [2]0.690.820.67
R2ET [25]0.800.610.85
Ours0.890.800.92
", + "bbox": [ + 81, + 301, + 457, + 407 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "sults in Fig. 6 indicate that using guiding questions for visual question answering yields the most comprehensive and reasonable text descriptions for motion semantics. Compared with image captioning that uses the vision-language model to generate text description directly from images, the answers from visual question answering task can be guided by the designed question to focus on motion semantics.", + "bbox": [ + 75, + 458, + 468, + 564 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.4. User Study", + "text_level": 1, + "bbox": [ + 76, + 573, + 199, + 589 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We conduct a user study to evaluate the performance of our method against the baseline methods. Human subjects are given 12 videos. Each video includes one source skinned motion and five anonymous skinned results. The retargeted results are randomly placed. We ask subjects to rate the results out of 1.0 in three aspects: overall quality, motion smoothness and semantics preservation. We collect a total of 100 comparisons. During the evaluation, users are required to extract semantic meaning from the source motion themselves and then evaluate the preservation of retargeted motions. In general, more than $92\\%$ of subjects prefer the retargeting results of our method.", + "bbox": [ + 75, + 595, + 468, + 777 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.5. Retargeting Motion from Human Videos", + "text_level": 1, + "bbox": [ + 76, + 786, + 424, + 801 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this section, we evaluate our motion retargeting approach from human videos in the human3.6M [10] dataset. Video retargeting involves two stages: human pose estimation from video and motion retargeting. However, inaccuracies in estimating body postures may result in semantic information loss and thus accumulation of errors in the entire", + "bbox": [ + 75, + 810, + 468, + 898 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/31a5234b3dd525a192b0a0e30eebda6fa48e4919aaca3bb3d5291bf467c490ce.jpg", + "image_caption": [ + "Guiding Visual Question Answering" + ], + "image_footnote": [ + "Q: Where are the hands of the character?" + ], + "bbox": [ + 519, + 89, + 633, + 176 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/6b583f7206f29c0b1fc775e655fae1efd11ecf2e6f701ff2617c580822706bd8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 637, + 89, + 750, + 176 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/8f56ffaf23669bc2d89939f221262149d7058f8c5b811f3796ffd00f378fcaf0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 754, + 89, + 867, + 176 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/90c070f166b3ff245499e227a911f1815b1940b8b7385812cb83bc7354f8103a.jpg", + "image_caption": [ + "Guiding Visual Question Answering", + "Figure 7. We retarget from human motion clips in the human3.6M [10] dataset. The retargeted motions are free from interpenetration and preserve semantics well." + ], + "image_footnote": [ + "Q: Where are the hands of the character? A: Holding a ball.", + "Q: What is the character doing?", + "A: The character is trying to throw a ball with both hands on the right side of his body." + ], + "bbox": [ + 519, + 179, + 633, + 267 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/6e07af2e5d7ced8a5d3c5c66f78180d8a2e50126bd1e2e0cee03ada486231a7d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 637, + 180, + 750, + 267 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/1be675d4ea3fdae8b9bd00e2d57b8dc35130e1f7e72e0afc87a8a71c3a69667a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 754, + 180, + 867, + 267 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "retargeting process. Therefore, we first get the estimated human pose from [17]. Then we utilize the vision-language model to extract the semantic embedding of the original video and calculate the semantic consistency loss to optimize the joint angles acquired from the retargeting process directly. In Fig. 7, we show our results of motion retargeting from human videos to Mixamo characters.", + "bbox": [ + 498, + 318, + 890, + 422 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusions", + "text_level": 1, + "bbox": [ + 500, + 436, + 625, + 452 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this paper, we present a novel semantics-aware motion retargeting method that leverages the capabilities of vision-language models to extract semantic embeddings and facilitate the preservation of motion semantics. This approach offers a promising solution to the challenge of lacking labelled semantic data for motion. Our proposed method involves a two-stage process that integrates skeleton-level motion characteristics and semantics-level consistency along with geometry constraints. Experimental results demonstrate that our approach excels in generating high-quality retargeted motions with semantics consistency.", + "bbox": [ + 496, + 462, + 890, + 628 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Limitations. The main limitation is the performance of the vision-language model in extracting motion semantics. Without the support of motion semantic datasets of sufficient data size and quality, we rely on the model pre-trained on large image-text datasets. Although the model achieves some remarkable results in motion semantics extraction, there is still room for improvement. In addition, the projection of 3D motion into 2D images loses spatial information and affects the performance.", + "bbox": [ + 496, + 628, + 890, + 763 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Future work. Compared with 2D vision-language models, 3D vision-language models have the advantage of capturing spatial relationships directly. Therefore, fine-tuning 3D vision-language models to make them more suitable for the task of motion semantics extraction is worth exploring in our future work.", + "bbox": [ + 496, + 765, + 890, + 853 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgements. This work was supported by the National Nature Science Foundation of China under Grant 62173293.", + "bbox": [ + 496, + 854, + 890, + 898 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "2162", + "bbox": [ + 480, + 944, + 514, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Adobe's mixamo. https://www MIXamo.com/. Accessed: 2023-02-08.", + "[2] Kfir Aberman, Peizhuo Li, Dani Lischinski, Olga Sorkine-Hornung, Daniel Cohen-Or, and Baoquan Chen. Skeleton-aware networks for deep motion retargeting. ACM Transactions on Graphics (TOG), 39(4):62-1, 2020.", + "[3] Daichi Azuma, Taiki Miyanishi, Shuhei Kurita, and Motoaki Kawanabe. Scanqa: 3d question answering for spatial scene understanding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022.", + "[4] Kwang-Jin Choi and Hyeong-Seok Ko. Online motion retargeting. The Journal of Visualization and Computer Animation, 11(5):223-235, 2000.", + "[5] Hyung Won Chung, Le Hou, Shayne Longpre, Barret Zoph, Yi Tay, William Fedus, Yunxuan Li, Xuezhi Wang, Mostafa Dehghani, Siddhartha Brahma, et al. Scaling instruction-finetuned language models. arXiv preprint arXiv:2210.11416, 2022.", + "[6] Michael Gleicher. Retargetting motion to new characters. In Proceedings of the 25th annual conference on Computer graphics and interactive techniques, pages 33-42, 1998.", + "[7] Chuan Guo, Xinxin Zuo, Sen Wang, Shihao Zou, Qingyao Sun, Annan Deng, Minglun Gong, and Li Cheng. Action2motion: Conditioned generation of 3d human motions. In Proceedings of the 28th ACM International Conference on Multimedia, pages 2021-2029, 2020.", + "[8] Chuan Guo, Shihao Zou, Xinxin Zuo, Sen Wang, Wei Ji, Xingyu Li, and Li Cheng. Generating diverse and natural 3d human motions from text. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5152-5161, 2022.", + "[9] Lei Hu, Zihao Zhang, Chongyang Zhong, Boyuan Jiang, and Shihong Xia. Pose-aware attention network for flexible motion retargeting by body part. IEEE Transactions on Visualization and Computer Graphics, pages 1-17, 2023.", + "[10] Catalin Ionescu, Dragos Papava, Vlad Olaru, and Cristian Sminchisescu. Human3.6m: Large scale datasets and predictive methods for 3d human sensing in natural environments. IEEE Transactions on Pattern Analysis and Machine Intelligence, 36(7):1325-1339, 2014.", + "[11] Jehee Lee and Sung Yong Shin. A hierarchical approach to interactive motion editing for human-like figures. In Proceedings of the 26th annual conference on Computer graphics and interactive techniques, pages 39-48, 1999.", + "[12] John P Lewis, Matt Cordner, and Nickson Fong. Pose space deformation: a unified approach to shape interpolation and skeleton-driven deformation. In Proceedings of the 27th annual conference on Computer graphics and interactive techniques, pages 165-172, 2000.", + "[13] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. arXiv preprint arXiv:2301.12597, 2023.", + "[14] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. BLIP-2: bootstrapping language-image pre-training with" + ], + "bbox": [ + 78, + 114, + 470, + 901 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "frozen image encoders and large language models. In ICML, 2023.", + "[15] Jongin Lim, Hyung Jin Chang, and Jin Young Choi. Pmnet: Learning of disentangled pose and movement for unsupervised motion retargeting. In BMVC, page 7, 2019.", + "[16] Shichen Liu, Tianye Li, Weikai Chen, and Hao Li. Soft rasterizer: A differentiable renderer for image-based 3d reasoning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7708-7717, 2019.", + "[17] Gyeongsik Moon, Hongsuk Choi, and Kyoung Mu Lee. Accurate 3d hand pose estimation for whole-body 3d human mesh estimation. In Computer Vision and Pattern Recognition Workshop (CVPRW), 2022.", + "[18] Zoran Popović and Andrew Witkin. Physically based motion transformation. In Proceedings of the 26th annual conference on Computer graphics and interactive techniques, pages 11-20, 1999.", + "[19] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021.", + "[20] Guy Tevet, Brian Gordon, Amir Hertz, Amit H Bermano, and Daniel Cohen-Or. Motionclip: Exposing human motion generation to clip space. In European Conference on Computer Vision, pages 358–374. Springer, 2022.", + "[21] Guy Tevet, Sigal Raab, Brian Gordon, Yonatan Shafir, Daniel Cohen-Or, and Amit H Bermano. Human motion diffusion model. arXiv preprint arXiv:2209.14916, 2022.", + "[22] Ruben Villegas, Jimei Yang, Duygu Ceylan, and Honglak Lee. Neural kinematic networks for unsupervised motion retargeting. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 8639-8648, 2018.", + "[23] Ruben Villegas, Duygu Ceylan, Aaron Hertzmann, Jimei Yang, and Jun Saito. Contact-aware retargeting of skinned motion. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9720-9729, 2021.", + "[24] Zhengyuan Yang, Songyang Zhang, Liwei Wang, and Jiebo Luo. Sat: 2d semantics assisted training for 3d visual grounding. In ICCV, 2021.", + "[25] Jiaxu Zhang, Junwu Weng, Di Kang, Fang Zhao, Shaoli Huang, Xuefei Zhe, Linchao Bao, Ying Shan, Jue Wang, and Zhigang Tu. Skinned motion retargeting with residual perception of motion semantics & geometry. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13864-13872, 2023.", + "[26] Yi Zhou, Connelly Barnes, Jingwan Lu, Jimei Yang, and Hao Li. On the continuity of rotation representations in neural networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5745-5753, 2019.", + "[27] Deyao Zhu, Jun Chen, Xiaogian Shen, Xiang Li, and Mohamed Elhoseiny. Minigpt-4: Enhancing vision-language understanding with advanced large language models. arXiv preprint arXiv:2304.10592, 2023." + ], + "bbox": [ + 503, + 92, + 890, + 901 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "2163", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 8 + }, + { + "type": "ref_text", + "text": "[28] Ziyu Zhu, Xiaojian Ma, Yixin Chen, Zhidong Deng, Siyuan Huang, and Qing Li. 3d-vista: Pre-trained transformer for 3d vision and text alignment. ICCV, 2023.", + "bbox": [ + 78, + 90, + 470, + 135 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "2164", + "bbox": [ + 482, + 945, + 516, + 955 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2024/Semantics-aware Motion Retargeting with Vision-Language Models/94f0a721-55ce-4d79-a7e3-1129e082a51d_model.json b/2024/Semantics-aware Motion Retargeting with Vision-Language Models/94f0a721-55ce-4d79-a7e3-1129e082a51d_model.json new file mode 100644 index 0000000000000000000000000000000000000000..ac8e818461b0bc433e77bc1bc7e195953eed758d --- /dev/null +++ b/2024/Semantics-aware Motion Retargeting with Vision-Language Models/94f0a721-55ce-4d79-a7e3-1129e082a51d_model.json @@ -0,0 +1,2090 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.812, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.143, + 0.131, + 0.828, + 0.154 + ], + "angle": 0, + "content": "Semantics-aware Motion Retargeting with Vision-Language Models" + }, + { + "type": "text", + "bbox": [ + 0.177, + 0.18, + 0.794, + 0.236 + ], + "angle": 0, + "content": "Haodong Zhang\\(^{1*}\\) Zhike Chen\\(^{1*}\\) Haocheng Xu\\(^{1}\\) Lei Hao\\(^{2}\\) Xiaofei Wu\\(^{2}\\) Songcen Xu\\(^{2}\\) Zhensong Zhang\\(^{2}\\) Yue Wang\\(^{1}\\) Rong Xiong\\(^{1\\dagger}\\) Zhejiang University \\({}^{2}\\)Huawei Noah's Ark Lab" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.269, + 0.314, + 0.285 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.301, + 0.474, + 0.604 + ], + "angle": 0, + "content": "Capturing and preserving motion semantics is essential to motion retargeting between animation characters. However, most of the previous works neglect the semantic information or rely on human-designed joint-level representations. Here, we present a novel Semantics-aware Motion reTargeting (SMT) method with the advantage of vision-language models to extract and maintain meaningful motion semantics. We utilize a differentiable module to render 3D motions. Then the high-level motion semantics are incorporated into the motion retargeting process by feeding the vision-language model with the rendered images and aligning the extracted semantic embeddings. To ensure the preservation of fine-grained motion details and high-level semantics, we adopt a two-stage pipeline consisting of skeleton-aware pre-training and fine-tuning with semantics and geometry constraints. Experimental results show the effectiveness of the proposed method in producing high-quality motion retargeting results while accurately preserving motion semantics. Project page can be found at https://sites.google.com/view/smtnet." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.631, + 0.21, + 0.646 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.656, + 0.47, + 0.821 + ], + "angle": 0, + "content": "3D animation characters have extensive application in animation production, virtual reality, and various other domains. These characters are animated using motion data, resulting in lifelike and immersive animations. Nevertheless, acquiring motion data for each character can be a costly endeavor. Therefore, the ability to retarget existing motion data for new characters holds immense importance. The goal of motion retargeting is to transfer existing motion data to new characters following motion feature extraction and integration processes, which ensure the preservation of the original motion's characteristics." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.822, + 0.47, + 0.868 + ], + "angle": 0, + "content": "Semantics encompasses the meaningful and contextually relevant information conveyed in motion and plays a critical role in ensuring the realism and vividness of the anima" + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.268, + 0.701, + 0.431 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.705, + 0.269, + 0.892, + 0.431 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.443, + 0.895, + 0.555 + ], + "angle": 0, + "content": "Figure 1. Comparison with previous motion retargeting methods. (a) Previous works rely on human-designed joint distance matrix [25] or self-contacts between mesh vertices [23] to ensure semantics preservation. (b) Ours work enforces human-level motion semantics consistency with the extensive knowledge of vision-language models. (c) Comparison of motion quality and semantics preservation on the Mixamo dataset [1]. Our method achieves the best motion quality and semantics consistency." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.58, + 0.893, + 0.837 + ], + "angle": 0, + "content": "tion characters. Preservation of motion semantics can enhance the efficiency of motion retargeting by reducing the need for time-consuming manual adjustments and refinements. However, previous methods [2, 15, 22] are mainly based on retargeting of joint positions and make less use of the extraction of semantic information. They focus on trajectory-level motion retargeting with few attention to motion semantics. Consequently, this leads to a significant loss of motion semantics and necessitates the labor-intensive intervention of animation artists for manual trajectory adjustments. Recent advancements have introduced self-contacts [23] and joint distance matrices [25] as the representation of motion semantics. Nevertheless, self-contacts are not applicable to non-contact semantics and require intricate vertex correspondence. The human-designed joint distance matrices primarily focus on joint relative relationships and still lack consideration of high-level semantic information." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.84, + 0.894, + 0.903 + ], + "angle": 0, + "content": "To address the intricate task of capturing and preserving motion semantics, we introduce a new perspective: the most general and comprehensive form of motion semantics is human-level natural language, reflecting the user's intu" + }, + { + "type": "page_footnote", + "bbox": [ + 0.096, + 0.875, + 0.348, + 0.888 + ], + "angle": 0, + "content": "*These authors contributed equally to this work" + }, + { + "type": "page_footnote", + "bbox": [ + 0.097, + 0.888, + 0.327, + 0.9 + ], + "angle": 0, + "content": "† Corresponding author: rxiong@zju.edu.cn" + }, + { + "type": "list", + "bbox": [ + 0.096, + 0.875, + 0.348, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "2155" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.152 + ], + "angle": 0, + "content": "itive understanding of motion. However, the main challenge of human-level motion semantics representation lies in the scarcity of labelled data. It is difficult and expensive to label sufficient semantic textual descriptions for motion data." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.153, + 0.471, + 0.395 + ], + "angle": 0, + "content": "In this paper, we introduce the incorporation of robust, state-of-the-art vision-language models to provide semantic guidance to the motion retargeting network. In the absence of labelled semantic data, we leverage the capabilities of a vision-language model to serve as a semantic supervisor in an unsupervised manner, which can extract motion semantics in a more intuitive way, as illustrated in Fig. 1. This approach offers a solution to the challenge of the limited availability of labelled semantic datasets for motion retargeting. To establish a connection between the vision-language model and motion semantics extraction, we employ the differentiable skinning and rendering modules to translate 3D motions into image sequences. Subsequently, we adopt visual question answering with guiding questions to inquire about the most relevant motion semantics from the vision-language model." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.397, + 0.471, + 0.653 + ], + "angle": 0, + "content": "To guarantee the preservation of motion semantics during motion retargeting, we introduce a semantics consistency loss that enforces the semantic embeddings of the targeted motion to closely align with those of the source motion. For dense semantic supervision and computational efficiency, we utilize latent features extracted by the vision-language model as the semantic embeddings instead of textual descriptions. To alleviate the non-linearity of the semantics consistency loss, we introduce a two-stage training approach. We categorize motion information into two distinct levels: the skeletal level and the semantic level. Our approach involves pre-training the motion retargeting network at the skeletal level, which is then further refined and fine-tuned at the semantic level with the power of vision-language models. To the best of our knowledge, we are the first to leverage the extensive capability of vision-language models for the task of semantics-aware motion retargeting." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.655, + 0.45, + 0.669 + ], + "angle": 0, + "content": "To summarize, the contributions of our work include:" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.675, + 0.469, + 0.735 + ], + "angle": 0, + "content": "- We introduce an innovative framework that leverages the expertise of vision-language models as a semantic supervisor to tackle the challenge of limited labelled semantic data for the task of motion retargeting." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.736, + 0.469, + 0.795 + ], + "angle": 0, + "content": "- We propose to use differentiable skinning and rendering to translate from the motion domain to the image domain and perform guiding visual question answering to obtain human-level semantic representation." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.796, + 0.469, + 0.855 + ], + "angle": 0, + "content": "- We design a semantics consistency loss to maintain motion semantics and introduce an effective two-stage training pipeline consisting of pre-training at the skeletal level and fine-tuning at the semantic level." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.857, + 0.469, + 0.901 + ], + "angle": 0, + "content": "- Our model achieves state-of-the-art performance in the challenging task of semantics-aware motion retargeting, delivering exceptional performance marked by high" + }, + { + "type": "list", + "bbox": [ + 0.078, + 0.675, + 0.469, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.092, + 0.851, + 0.107 + ], + "angle": 0, + "content": "quality motion and superior semantics consistency." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.12, + 0.65, + 0.136 + ], + "angle": 0, + "content": "2. Related Works" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.145, + 0.892, + 0.387 + ], + "angle": 0, + "content": "Optimization-based Motion Retargeting. Motion retargeting is a technique to adapt existing motion data from a source character to a target character with different bone proportions, mesh skins, and skeletal structures. Early works formulate motion retargeting as a constrained optimization problem [4, 6, 11, 18]. Gleicher et al. [6] introduced a motion retargeting method, which identifies motion features as constraints and computes an adapted motion using a space-time constraint solver to preserve the desirable qualities. Lee et al. [11] proposed a method to adapt existing motion of a human-like character to have the desired features with specified constraints and combined a hierarchical curve fitting technique with inverse kinematics. Nonetheless, these methods necessitate the tedious and time-consuming process of formulating human-designed constraints for specific motion sequences." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.388, + 0.892, + 0.704 + ], + "angle": 0, + "content": "Learning-based Motion Retargeting. With the rise of deep learning, researchers have been developing learning-based motion retargeting methods in recent years [2, 9, 15, 22, 23, 25]. Villegas et al. [22] presented a recurrent neural network architecture, which incorporates a forward kinematics layer and cycle consistency loss for unsupervised motion retargetting. Aberman et al. [2] designed a skeleton-aware network with differentiable convolution, pooling, and unpooling operators to transform various homeomorphic skeletons into a primary skeleton for cross-structural motion retargeting. However, these methods tend to concentrate on trajectory-level motion retargeting with limited consideration for motion semantics, which often results in a notable loss of motion semantics and increase the heavy burden of manual adjustments to the trajectories. To address these problems, Zhang et al. [25] presented a residual retargeting network that uses a skeleton-aware module to preserve motion semantics and a shape-aware module to reduce interpenetration and contact missing. While this method successfully preserves joint relative relationships, it still falls short in addressing high-level motion semantics." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.705, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Vision-Language Models. Vision-language models have empowered various vision-language tasks, including visual question answering and image captioning. Tevet et al. [20] introduced a human motion generation model that aligns the latent space with that of the Contrastive Language-Image Pre-training (CLIP) model. Li et al. [13] proposed a pretraining strategy from off-the-shelf frozen pre-trained image encoders and frozen large language models for vision-to-language generative learning. Zhu et al. [27] presented a vision-language model, which uses one projection layer to align a frozen visual encoder with a frozen advanced large language models (LLM). However, these efforts primarily concentrate on vision-language tasks, leaving the question" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "2156" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.102, + 0.09, + 0.87, + 0.27 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.101, + 0.272, + 0.285, + 0.284 + ], + "angle": 0, + "content": "Stage I: Skeleton-aware Pre-training" + }, + { + "type": "image_caption", + "bbox": [ + 0.453, + 0.273, + 0.677, + 0.285 + ], + "angle": 0, + "content": "Stage II: Semantics & Geometry Fine-tuning" + }, + { + "type": "image", + "bbox": [ + 0.1, + 0.285, + 0.348, + 0.369 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.355, + 0.285, + 0.871, + 0.37 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.383, + 0.892, + 0.455 + ], + "angle": 0, + "content": "Figure 2. Model Architecture. Our semantics-aware motion retargeting framework employs a two-stage pipeline. Initially, the retargeting network consisting of multiple spatial-temporal graph convolution layers is trained at the skeletal level to establish a base model. Subsequently, this model undergoes further refinement and fine-tuning at the semantic level by the alignment of latent semantic embeddings of the source and target, leveraging the extensive knowledge of vision-language models. The latent semantic embedding is extracted by guiding visual question answering. Additionally, the geometry constraints are also enforced during fine-tuning to avoid interpenetration." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.469, + 0.469, + 0.499 + ], + "angle": 0, + "content": "of how to effectively employ vision-language models to guide motion retargeting as an open and unexplored area." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.501, + 0.47, + 0.668 + ], + "angle": 0, + "content": "Human motion synthesis. Human motion synthesis is a domain related to motion retargeting, which aims to synthesize realistic and lifelike human motions from random noise or other inputs with generative networks. Guo et al. [7] proposed to generate human motion sequences based on action type. Guo et al. [8] presented a temporal variational autoencoder to synthesize human motions from text input. Tevet et al. [21] introduced a diffusion-based generative model for human motion generation. As comparison, we focus on the task of motion retargeting, where existing motion data is transferred from a source character to a target character." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.684, + 0.168, + 0.699 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.709, + 0.188, + 0.724 + ], + "angle": 0, + "content": "3.1. Overview" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.734, + 0.469, + 0.854 + ], + "angle": 0, + "content": "We present a novel semantic-aware motion retargeting method, as illustrated in Fig 2. In contrast to previous methods that neglect motion semantics [2, 15, 22] or rely on human-designed joint-level representations [25], our approach integrates natural language descriptions from vision-language models to offer an explicit and comprehensive semantic representation of character motions, thereby maintaining the preservation of semantic consistency." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.856, + 0.469, + 0.901 + ], + "angle": 0, + "content": "Task definition. Given a source motion sequence, consisting of the skeleton motion and its associated skinning geometry, as well as a target character in the reference pose (e.g.," + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.469, + 0.892, + 0.53 + ], + "angle": 0, + "content": "T-posed), the objective of motion retargeting is to generate the target motion while preserving crucial motion characteristics, such as joint trajectory similarity and motion semantics, and satisfying geometry constraints." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.534, + 0.893, + 0.716 + ], + "angle": 0, + "content": "Graph representation. The skeleton motion sequence can be modelled as a sequence of graphs according to the skeleton hierarchy where each node corresponds to a joint and each edge represents a directed connection between joints. Assume that the motion sequence has \\( T \\) frames in total and the animation characters have \\( N \\) nodes and \\( M \\) edges. In our approach, we consider motion data as node features \\( \\mathbf{Q} \\in \\mathbb{R}^{T \\times N \\times 9} \\), which encompasses the 6D joint rotation representation [26] and 3D joint positions. Additionally, we utilize skeleton hierarchy information as edge features \\( \\mathbf{E} \\in \\mathbb{R}^{M \\times 3} \\), which consists of the 3D position offset between each joint and its parent joint." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.72, + 0.893, + 0.901 + ], + "angle": 0, + "content": "Two-stage training. The motion of animation characters can be divided into skeletal movements and skinned movements, represented by skeletal joints and skinned vertices respectively. The skinned movements can be derived from the skeletal movements through the linear blend skinning algorithm [12]. Therefore, motion retargeting at the skeletal level can effectively downscale the data and reduce the complexity of the problem. However, this simplification process can lead to the loss of motion semantics and violations of geometry constraints. To address these issues, we employ a two-stage pipeline. Initially, we pre-train a skeleton-aware network to ensure a general initialization for motion retard" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "2157" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.168 + ], + "angle": 0, + "content": "getting without considering motion semantics and geometry constraints. Subsequently, we fine-tune the pre-trained network for each source-target character pair with the vision-language model to maintain semantic consistency and enforce geometry constraints to prevent interpenetrations." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.175, + 0.332, + 0.192 + ], + "angle": 0, + "content": "3.2. Skeleton-aware Pre-training" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.199, + 0.47, + 0.334 + ], + "angle": 0, + "content": "Retargeting network. We propose a retargeting network consisting of a graph motion encoder and a graph motion decoder for motion retargeting. The motion encoder \\(\\mathcal{F}_{\\theta}\\) encodes the motion data \\(\\mathbf{Q}_A\\) of the source character A into the latent motion embedding \\(\\mathbf{Z}_A\\). Then, the motion decoder \\(\\mathcal{F}_{\\phi}\\) generates the joint angles \\(\\mathbf{Q}_B\\) of the target character B based on the latent features. Both the motion encoder and decoder are composed of multiple graph convolutions. More details are available in the supplementary materials." + }, + { + "type": "equation", + "bbox": [ + 0.209, + 0.347, + 0.341, + 0.363 + ], + "angle": 0, + "content": "\\[\n\\mathbf {Z} _ {A} = \\mathcal {F} _ {\\theta} (\\mathbf {Q} _ {A}, \\mathbf {E} _ {A})\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.208, + 0.363, + 0.469, + 0.381 + ], + "angle": 0, + "content": "\\[\n\\mathbf {Q} _ {B} = \\mathcal {F} _ {\\phi} (\\mathbf {Z} _ {A}, \\mathbf {E} _ {B}) \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.385, + 0.469, + 0.491 + ], + "angle": 0, + "content": "In the first phase, we train the motion encoder and decoder at the skeletal level to establish a robust initialization for motion retargeting. Following the unsupervised learning setting in [22], we train the network with the reconstruction loss, cycle consistency loss, adversarial loss, and joint relationship loss. The overall objective function for skeleton-aware pre-training is defined as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.104, + 0.514, + 0.469, + 0.531 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {s k e l} = \\lambda_ {r} \\mathcal {L} _ {r e c} + \\lambda_ {c} \\mathcal {L} _ {c y c} + \\lambda_ {a} \\mathcal {L} _ {a d v} + \\lambda_ {j} \\mathcal {L} _ {j d m} \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.539, + 0.469, + 0.616 + ], + "angle": 0, + "content": "The reconstruction loss \\(\\mathcal{L}_{rec}\\) encourages the retargeted motion to match the source motion when the target character is the same as the source character. Let \\(\\mathbf{Q}_{A,t}\\) be the motion data of source character A at frame \\(t\\), and \\(\\hat{\\mathbf{Q}}_{A,t}^{rec}\\) be the reconstructed motion. Then \\(\\mathcal{L}_{rec}\\) is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.173, + 0.622, + 0.469, + 0.657 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {r e c} = \\sum_ {t} \\left| \\left| \\hat {\\mathbf {Q}} _ {A, t} ^ {r e c} - \\mathbf {Q} _ {A, t} \\right| \\right| _ {2} ^ {2} \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.665, + 0.469, + 0.743 + ], + "angle": 0, + "content": "The cycle consistency loss \\(\\mathcal{L}_{cyc}\\) promotes the consistency of retargeted motion from the source character A to the target character B and then back to the source character A, ensuring it remains in line with the original motion. Let \\(\\hat{\\mathbf{Q}}_{A,t}^{cyc}\\) represent the retargeted motion. Then \\(\\mathcal{L}_{cyc}\\) is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.173, + 0.751, + 0.469, + 0.786 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {c y c} = \\sum_ {t} \\left| \\left| \\hat {\\mathbf {Q}} _ {A, t} ^ {c y c} - \\mathbf {Q} _ {A, t} \\right| \\right| _ {2} ^ {2} \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.79, + 0.469, + 0.866 + ], + "angle": 0, + "content": "The adversarial loss \\(\\mathcal{L}_{adv}\\) is calculated by a discriminator network, which utilizes the unpaired data of the target character to learn how to distinguish whether the motions are real or fake. Let \\(\\mathcal{F}_{\\gamma}\\) be the discriminator network, and \\(\\mathbf{Q}_{B,t}\\) be the retargeted motion at frame \\(t\\). Then it is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.163, + 0.873, + 0.469, + 0.904 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {a d v} = \\sum_ {t} \\log \\left(1 - \\mathcal {F} _ {\\gamma} \\left(\\mathbf {Q} _ {B, t}\\right)\\right) \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.092, + 0.892, + 0.198 + ], + "angle": 0, + "content": "The joint relationship loss \\(\\mathcal{L}_{jdm}\\) is calculated by the joint distance matrix (JDM) \\(\\mathbf{D} \\in \\mathbb{R}^{N \\times N}\\), which represents the relative positional relationships of the joints. The element \\(d_{i,j}\\) of \\(\\mathbf{D}\\) represents the Euclidean distance between joint \\(i\\) and joint \\(j\\). We extract the joint distance matrix from the target character and compare it with the source character. Then \\(\\mathcal{L}_{jdm}\\) is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.573, + 0.209, + 0.892, + 0.239 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {j d m} = \\sum_ {t} \\left| \\left| \\eta (\\mathbf {D} _ {A, t}) - \\eta (\\mathbf {D} _ {B, t}) \\right| \\right| _ {2} ^ {2} \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.243, + 0.892, + 0.289 + ], + "angle": 0, + "content": "where \\(\\eta(.)\\) is an L1 normalization performed on each row of the distance matrix. This normalization operation eliminates the difference in bone length to some extent." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.296, + 0.81, + 0.312 + ], + "angle": 0, + "content": "3.3. Semantics & Geometry Fine-tuning" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.32, + 0.892, + 0.456 + ], + "angle": 0, + "content": "In the second phase, we fine-tune the pre-trained retargeting network for each source-target character pair to preserve motion semantics and satisfy geometry constraints. The motion semantics is maintained by the semantics consistency loss, which aligns the semantic embeddings extracted from a vision-language model for both the source and target. Additionally, the geometry constraint is satisfied by minimizing the interpenetration loss. The overall objective function for fine-tuning is outlined as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.601, + 0.47, + 0.891, + 0.487 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {f i n e}} = \\lambda_ {s} \\mathcal {L} _ {\\text {s e m}} + \\lambda_ {p} \\mathcal {L} _ {\\text {p e n}} \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.489, + 0.892, + 0.701 + ], + "angle": 0, + "content": "Differentiable skinning & rendering. To make the finetuning process differentiable for gradient back-propagation, we first use the differentiable linear blend skinning algorithm [12], denoted as \\(\\mathcal{F}_{lbs}\\), to transform the target joint angles \\(\\mathbf{Q}_B\\) into skinned motions \\(\\mathbf{V}_B\\), represented by 3D mesh vertices. Subsequently, we employ the differentiable projection function \\(\\mathcal{F}_{proj}\\) as introduced in [16] to convert the skinned motions into 2D images \\(\\mathbf{I}_B\\). A limitation for the differentiable rendering process is that when projecting the 3D skinned mesh onto 2D images, the depth information is lost. To obtain a comprehensive semantic representation of the motion, we render the character from multiple perspectives and then combine the extracted features, following the Non-rigid Shape Fitting task in [16]." + }, + { + "type": "equation", + "bbox": [ + 0.618, + 0.713, + 0.775, + 0.729 + ], + "angle": 0, + "content": "\\[\n\\mathbf {I} _ {A} = \\mathcal {F} _ {p r o j} \\left(\\mathcal {F} _ {l b s} \\left(\\mathbf {Q} _ {A}\\right)\\right)\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.618, + 0.728, + 0.775, + 0.748 + ], + "angle": 0, + "content": "\\[\n\\mathbf {I} _ {B} = \\mathcal {F} _ {p r o j} \\left(\\mathcal {F} _ {l b s} (\\mathbf {Q} _ {B})\\right)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.892, + 0.724, + 0.894, + 0.735 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.75, + 0.892, + 0.902 + ], + "angle": 0, + "content": "Frozen vision-language model. To obtain an explicit and reliable semantic feature of the motion, we employ a frozen vision-language model as our semantic supervisor. Current 3D vision-language datasets [3, 28] mainly focus on the occupation or the segmentation of the object in a spatial scene like rooms, and thus the state-of-the-art 3D vision-language models [28] lack prior knowledge relevant to animation characters. In contrast, 2D vision-language models achieve better results in semantic tasks, such as image captioning, visual question answering and image-text" + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "2158" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.081, + 0.09, + 0.473, + 0.252 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.094, + 0.264, + 0.454, + 0.28 + ], + "angle": 0, + "content": "Figure 3. An example of guiding visual question answering." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.295, + 0.47, + 0.401 + ], + "angle": 0, + "content": "retrieval, and provides cleaner and richer semantics [24]. Therefore, we utilize a frozen 2D vision-language model to extract latent embeddings of motion semantics. The frozen 2D vision-language model employed in our work is BLIP-2 [14], which incorporates a lightweight querying transformer as a bridge between the off-the-shelf frozen pre-trained image encoder and the frozen large language model." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.401, + 0.47, + 0.658 + ], + "angle": 0, + "content": "Prompt design. Since the vision-language model has the capability to extract rich information from images, it is possible that the extracted features might contain redundant details, such as the appearance of the character. To guide the vision-language model to obtain semantic embedding relevant to character motions, we adopt a guiding visual question answering approach for motion semantics extraction, as depicted in Fig. 3. We believe that there is a strong correlation between motion semantics and hand movements. To acquire a more comprehensive description of the motion, we initially provide a guiding question to BLIP-2: \"Where are the hands of the character?\" Subsequently, we introduce a new question and combine it with the first answer as the input to BLIP-2: \"[The answers to the first question generated by the vision-language model] What is the character in the image doing?\" For more details, please refer to the supplementary materials." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.659, + 0.471, + 0.856 + ], + "angle": 0, + "content": "Latent semantic embedding. We opt to align the latent semantic embeddings of the source and target generated by the vision-language model rather than relying on textual descriptions, specifically leveraging the encoder output of the large language model. This approach enables us to acquire a more accurate and denser representation, while also mitigating computational costs and the non-linearity of the training objective caused by the large number of parameters of the vision-language model. Let \\(\\mathbf{E}_A\\) and \\(\\mathbf{E}_B\\) be the latent semantic embeddings of the source and target motions, \\(\\mathcal{F}_{\\omega}\\) be the frozen pre-trained image encoder, \\(\\mathcal{F}_{\\sigma}\\) be the frozen querying transformer, \\(\\mathcal{F}_{\\psi}\\) be the encoder of the frozen large language model, and context be the question." + }, + { + "type": "equation", + "bbox": [ + 0.158, + 0.868, + 0.469, + 0.904 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\begin{array}{l} \\mathbf {E} _ {A} = \\mathcal {F} _ {\\psi} \\left(\\mathcal {F} _ {\\sigma} \\left(\\mathcal {F} _ {\\omega} (\\mathbf {I} _ {A}), \\text {c o n t e x t}\\right)\\right) \\\\ \\overline {{\\mathbf {E}}} = \\overline {{\\mathbf {F}}} \\left(\\overline {{\\mathbf {F}}} \\left(\\overline {{\\mathbf {F}}} (\\mathbf {I} _ {A}), \\text {c o n t e x t}\\right)\\right) \\end{array} \\tag {9} \\\\ \\mathbf {E} _ {B} = \\mathcal {F} _ {\\psi} (\\mathcal {F} _ {\\sigma} (\\mathcal {F} _ {\\omega} (\\mathbf {I} _ {B}), c o n t e x t)) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.092, + 0.894, + 0.198 + ], + "angle": 0, + "content": "Fine-tuning with semantics consistency. As illustrated in Fig. 2, our approach aligns the latent semantic embeddings of both the source and target motions in an unsupervised manner, ensuring a high degree of semantic consistency in the retargeted results. The semantics consistency loss \\(\\mathcal{L}_{sem}\\) is calculated using the mean square error and it is defined as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.598, + 0.209, + 0.892, + 0.241 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {s e m} = \\sum_ {t} \\| \\mathbf {E} _ {A, t} - \\mathbf {E} _ {B, t} \\| _ {2} ^ {2} \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.246, + 0.892, + 0.368 + ], + "angle": 0, + "content": "Fine-tuning with geometry constraints. From our observations, most interpenetration problems occur between the limbs and the main body. To address this, we incorporate the signed distance field between the limb vertices and the body mesh as the interpenetration loss. First, we convert the skeleton motion output from the network into mesh vertices using the linear blend skinning method [12]. Then, the interpenetration loss is defined as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.584, + 0.379, + 0.892, + 0.41 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {p e n} = \\sum_ {t} R e L U (- \\Phi_ {b, t} (\\mathbf {V} _ {l, t})) \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.416, + 0.893, + 0.477 + ], + "angle": 0, + "content": "where \\(\\Phi_b\\) indicates the signed distance field function, \\(\\mathbf{V}_l\\) is the vertices of the limbs. If the vertex locates inside the body, the value of the function is less than zero. Therefore, we use the \\(ReLU\\) function to penalize the inner vertices." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.491, + 0.634, + 0.508 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.515, + 0.599, + 0.532 + ], + "angle": 0, + "content": "4.1. Settings" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.538, + 0.892, + 0.841 + ], + "angle": 0, + "content": "Datasets. We train and evaluate our method on the Mixamo dataset [1], an extensive repository of animations performed by various 3D virtual characters with distinct skeletons and geometry shapes. The training set we use to pretrain our skeleton aware module is the same as that used in [2], which contains 1646 motions performed by 7 characters. It's important to note that the Mixamo dataset does not provide clean ground truth data, since many of the motion sequences suffer from interpenetration issues and semantic information loss. To mitigate this, we have carefully selected a subset of motion sequences that are both semantically clean and free of interpenetration issues for fine-tuning and testing. Our fine-tuning process involves retargeting 15 clean motions including 3127 frames, originally performed by 3 source characters, namely \"Y Bot\", \"X Bot\", and \"Ortiz\", onto 3 target characters, including \"Aj\", \"Kaya\", and \"Mousey\". Then we evaluate the performance of our model on the task of retargeting 30 additional motions that are previously unseen in the training set and fine-tuning sets. More details could be found in the supplementary materials." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.841, + 0.894, + 0.902 + ], + "angle": 0, + "content": "Implementation details. The hyper-parameters \\(\\lambda_r, \\lambda_c, \\lambda_a, \\lambda_j, \\lambda_p, \\lambda_s\\) for pre-training and fine-tuning loss functions are set to 10.0, 1.0, 0.1, 1.0, 1.0, 0.1. For semantics fine-tuning, we use BLIP-2 [14] with pre-trained FlanT5-XXL" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "2159" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.468, + 0.212 + ], + "angle": 0, + "content": "[5] large language model. To extract the semantic representation of the motion, we render animation from three perspectives, including the front view, left view and right view. The fine-tuning process takes 25 epochs with 5 clean motion sequences of the source character for each target character. During pre-training and fine-tuning, we use an Adam optimizer to optimize the retargeting network. Please refer to the supplementary materials for more details." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.213, + 0.471, + 0.442 + ], + "angle": 0, + "content": "Evaluation metrics. We evaluate the performance of our method across three key dimensions: skeleton, geometry, and semantics. At the skeletal level, we measure the Mean Square Error (MSE) between retargeted joint positions and the ground truth provided by Mixamo, analyzing both the global and the local joint positions. At the geometric level, we evaluate the interpenetration percentage (PEN). At the semantic level, we utilize the Image-Text Matching (ITM) score, Fréchet inception distance (FID) and semantics consistency loss (SCL) as metrics. The ITM score quantifies the visual-semantic similarity between the source textual description and the rendered retargeted motion. FID is calculated between the semantic embedding distribution of retargeted motion and source motion. More details are provided in the supplementary materials." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.453, + 0.377, + 0.469 + ], + "angle": 0, + "content": "4.2. Comparison with State of the Arts" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.478, + 0.47, + 0.78 + ], + "angle": 0, + "content": "Quantitative. In this section, we conduct a comparative analysis of our method against the state-of-the-art approaches as illustrated in Tab. 1. The baseline methods include R2ET [25], SAN [2], NKN [22] and the Copy strategy. The Copy strategy achieves the lowest local MSE because the ground truth data in the Mixamo dataset are not entirely clean, and many of them are generated by copying rotations. As a result, this strategy comes at the cost of semantic loss and interpenetration issues. SAN [2] and NKN [22] focus on skeleton-level motion features, which results in a high interpenetration rate and relatively low semantics preservation. R2ET [25] treats motion semantics as the joint distance matrix and mesh distance field, which helps it obtain better motion semantics than SAN and Copy. Nevertheless, there is still a gap between the human-designed distance matrix and the human-level semantics. Notably, our model exhibits the best interpenetration rate and semantics preservation among all methods, showcasing the capability of the proposed method in producing high-quality retargeted motions with semantics consistency." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.781, + 0.47, + 0.903 + ], + "angle": 0, + "content": "Qualitative. In Fig. 4, we visualize the text descriptions of the motions and the qualitative comparison between the state-of-the-arts and our method. SAN [2] and Copy neglect the preservation of semantics and have severe interpenetration. R2ET [25] utilizes joint distance matrix as semantics representation and fails to capture high-level semantic information. For example, the salute motion retargeted by R2ET [25] appears more like a hand-up motion. As a comparison," + }, + { + "type": "table", + "bbox": [ + 0.499, + 0.089, + 0.891, + 0.208 + ], + "angle": 0, + "content": "
MethodMSE ↓MSElc ↓Pen.% ↓ITM ↑FID ↓SCL ↓
Source--4.430.796--
GT--9.060.58226.991.331
Copy-0.0059.030.58126.581.327
NKN [22]0.3260.2318.710.57527.791.414
SAN [2]0.4350.2559.740.56128.331.448
R2ET [25]0.4990.4967.620.6435.4690.405
Ours0.2840.2293.500.6800.4360.143
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.212, + 0.892, + 0.268 + ], + "angle": 0, + "content": "Table 1. Quantitative comparison with the state-of-the-arts. \\(\\mathrm{MSE}^{lc}\\) denotes the local MSE. ITM indicates the image-text matching score. FID is Fréchet inception distance of motion semantics. SCL is the semantics consistency loss." + }, + { + "type": "table", + "bbox": [ + 0.5, + 0.275, + 0.891, + 0.391 + ], + "angle": 0, + "content": "
MethodMSE ↓MSElc ↓Pen.% ↓ITM ↑FID ↓SCL ↓
SMTtws0.2480.1298.370.5867.7270.769
SMTtwf7.7987.0830.440.43256.5313.29
SMTtwa0.3350.2885.360.6582.8260.266
SMTfwp0.4390.3681.220.5977.2410.583
SMTfwi5.4184.5764.410.55278.4618.96
SMTfwq0.7390.5174.560.6682.4970.191
SMTOurs0.2840.2293.500.6800.4360.143
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.394, + 0.892, + 0.492 + ], + "angle": 0, + "content": "Table 2. Ablation study. \\(\\mathrm{SMT}_{tws}\\) is the network trained with only skeleton-aware pre-training. \\(\\mathrm{SMT}_{twf}\\) is the network trained with only semantics and geometry fine-tuning. \\(\\mathrm{SMT}_{twa}\\) is the network trained in one stage. \\(\\mathrm{SMT}_{fwp}\\) is the network fine-tuned with only the interpenetration loss. \\(\\mathrm{SMT}_{fwi}\\) is the network fine-tuned with image features. \\(\\mathrm{SMT}_{fwq}\\) is the network fine-tuned with the features of the querying transformer." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.502, + 0.892, + 0.624 + ], + "angle": 0, + "content": "our method is able to successfully preserve high-level motion semantics leveraging the vision-language model. We observe that our approach reaches the best results among all methods, achieving more reliable semantics preservation and lower interpenetration rates. It suggests that with semantics and geometry fine-tuning, our method could effectively solve interpenetration issues together with semantics preservation." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.635, + 0.665, + 0.65 + ], + "angle": 0, + "content": "4.3. Ablation Studies" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.659, + 0.892, + 0.825 + ], + "angle": 0, + "content": "Skeleton-aware pre-training. The proposed method can be divided into two stage: pre-training and fine-tuning. To illustrate the importance of skeleton-aware pre-training, we evaluate the network trained with only the semantics consistency loss and the interpenetration loss in Tab. 2, denoted as \\(\\mathrm{SMT}_{twf}\\). The network trained without skeleton-aware pretraining performs worst in MSE and semantics preservation. A reasonable explanation is that the semantics consistency loss is highly non-linear, so it is important to pre-train the network at the skeletal level to provide better initial values. We also visualize qualitative results in Fig. 5." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.825, + 0.892, + 0.903 + ], + "angle": 0, + "content": "Semantics & geometry fine-tuning. We also conduct ablation study to illustrate the importance of semantics and geometry fine-tuning in Tab. 2. We first evaluate the performance of the skeleton-aware model without fine-tuning, denoted as \\(\\mathrm{SMT}_{tws}\\). Though it reaches the best global posi" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "2160" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.078, + 0.094, + 0.895, + 0.401 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.409, + 0.893, + 0.452 + ], + "angle": 0, + "content": "Figure 4. Qualitative comparison. The results demonstrate that our method can effectively preserve semantics while the baseline methods suffer from interpenetration or semantic information loss. From the first column to the last column are the source motion, the Copy strategy, NKN [22], SAN [2], R2ET [25], our method and text descriptions, respectively." + }, + { + "type": "image", + "bbox": [ + 0.095, + 0.461, + 0.449, + 0.668 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.674, + 0.47, + 0.743 + ], + "angle": 0, + "content": "Figure 5. The qualitative comparison of ablation study between the network without fine-tuning (TWS), the network trained with only semantics and geometry fine-tuning (TWF), the network trained with all loss functions (TWA), the network fine-tuned with only the interpenetration loss (FWP) and our full model (All)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.75, + 0.473, + 0.901 + ], + "angle": 0, + "content": "tion MSE, it suffers from interpenetration and semantic information loss because of the low-quality motion data provided by Mixamo. We next evaluate the network fine-tuned with only the interpenetration loss, denoted as \\(\\mathrm{SMT}_{fwp}\\). This version results in a significant boost in terms of penetration rate. However, the gradient of interpenetration loss is only relevant with the face normals of the geometry mesh without considering the semantic information conveyed in the motion. It indicates the importance of the semantic consistency loss that makes the network reach a better balance" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.464, + 0.892, + 0.6 + ], + "angle": 0, + "content": "between interpenetration and semantics. We also try to train the network with all loss functions in one stage, denoted as \\(\\mathrm{SMT}_{twa}\\). However, it is challenging for the model to acquire general knowledge of interpenetration and semantics that is suitable for every character with limited data. Therefore, training the model with skeleton-aware pre-training and fine-tuning it with semantics consistency and geometry constraints for each target character remains a more reasonable and data-efficient strategy." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.607, + 0.893, + 0.834 + ], + "angle": 0, + "content": "Latent semantic embedding. The vision-language model used for semantic extraction can be divided into three parts: the image encoder from CLIP [19], the querying transformer and the large language model. In Tab. 2, we compare the feature outputted by the image encoder, the querying transformer and the encoder of the large language model, denoted as \\(\\mathrm{SMT}_{fwi}\\), \\(\\mathrm{SMT}_{fwq}\\), and \\(\\mathrm{SMT}_{Ours}\\), respectively. The results show that the image feature performs worse since it is greatly affected by the appearance of the character. It indicates that with the help of the large language model, the semantic representation better focuses on the semantic meaning of the motion instead of the character's visual appearance. Therefore, the encoder output of the large language model is more suitable for semantic embedding. More details can be found in the supplementary materials." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.892, + 0.903 + ], + "angle": 0, + "content": "Prompt design. To validate the importance of guiding visual question answering, we compare the textual descriptions generated by visual question answering with and without guiding questions as well as image captioning. The re" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.514, + 0.957 + ], + "angle": 0, + "content": "2161" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.081, + 0.09, + 0.186, + 0.171 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.194, + 0.098, + 0.253, + 0.106 + ], + "angle": 0, + "content": "Image Captioning" + }, + { + "type": "image_footnote", + "bbox": [ + 0.194, + 0.107, + 0.288, + 0.123 + ], + "angle": 0, + "content": "A 3d model of a boy wearing glasses and a hat." + }, + { + "type": "image_caption", + "bbox": [ + 0.324, + 0.107, + 0.438, + 0.114 + ], + "angle": 0, + "content": "Guiding Visual Question Answering" + }, + { + "type": "image_footnote", + "bbox": [ + 0.324, + 0.114, + 0.464, + 0.121 + ], + "angle": 0, + "content": "Q: Where are the hands of the character?" + }, + { + "type": "image_caption", + "bbox": [ + 0.194, + 0.14, + 0.281, + 0.147 + ], + "angle": 0, + "content": "Visual Question Answering" + }, + { + "type": "image_footnote", + "bbox": [ + 0.194, + 0.148, + 0.3, + 0.156 + ], + "angle": 0, + "content": "Q: What is the character doing?" + }, + { + "type": "image_footnote", + "bbox": [ + 0.194, + 0.156, + 0.285, + 0.165 + ], + "angle": 0, + "content": "A: The character is praying." + }, + { + "type": "list", + "bbox": [ + 0.194, + 0.148, + 0.3, + 0.165 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.325, + 0.192, + 0.438, + 0.2 + ], + "angle": 0, + "content": "Guiding Visual Question Answering" + }, + { + "type": "image_footnote", + "bbox": [ + 0.324, + 0.201, + 0.463, + 0.215 + ], + "angle": 0, + "content": "Q: Where are the hands of the character? A: Holding a ball." + }, + { + "type": "image_footnote", + "bbox": [ + 0.324, + 0.215, + 0.432, + 0.222 + ], + "angle": 0, + "content": "Q: What is the character doing?" + }, + { + "type": "image_footnote", + "bbox": [ + 0.324, + 0.222, + 0.458, + 0.244 + ], + "angle": 0, + "content": "A: The character is trying to throw a ball with both hands on the right side of his body." + }, + { + "type": "list", + "bbox": [ + 0.324, + 0.201, + 0.463, + 0.244 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.08, + 0.177, + 0.186, + 0.259 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.194, + 0.184, + 0.253, + 0.192 + ], + "angle": 0, + "content": "Image Captioning" + }, + { + "type": "image_footnote", + "bbox": [ + 0.194, + 0.193, + 0.307, + 0.209 + ], + "angle": 0, + "content": "A 3d model of a robot running on a cheeked floor." + }, + { + "type": "image_caption", + "bbox": [ + 0.194, + 0.223, + 0.281, + 0.23 + ], + "angle": 0, + "content": "Visual Question Answering" + }, + { + "type": "image_footnote", + "bbox": [ + 0.194, + 0.232, + 0.301, + 0.257 + ], + "angle": 0, + "content": "Q: What is the character doing? \nA: The character is running on a checkered floor." + }, + { + "type": "image_caption", + "bbox": [ + 0.077, + 0.267, + 0.469, + 0.295 + ], + "angle": 0, + "content": "Figure 6. Text descriptions generated by different ways. The guiding visual question answering yields more comprehensive results." + }, + { + "type": "table", + "bbox": [ + 0.082, + 0.302, + 0.459, + 0.408 + ], + "angle": 0, + "content": "
MethodQuality ↑Smoothness ↑Semantics ↑
Copy0.720.860.71
NKN [22]0.650.800.66
SAN [2]0.690.820.67
R2ET [25]0.800.610.85
Ours0.890.800.92
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.077, + 0.412, + 0.469, + 0.454 + ], + "angle": 0, + "content": "Table 3. User study results. We collect 100 comparisons in three aspects. Our method gets highest scores in the overall quality as well as semantics preservation." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.459, + 0.469, + 0.565 + ], + "angle": 0, + "content": "sults in Fig. 6 indicate that using guiding questions for visual question answering yields the most comprehensive and reasonable text descriptions for motion semantics. Compared with image captioning that uses the vision-language model to generate text description directly from images, the answers from visual question answering task can be guided by the designed question to focus on motion semantics." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.574, + 0.2, + 0.59 + ], + "angle": 0, + "content": "4.4. User Study" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.597, + 0.47, + 0.779 + ], + "angle": 0, + "content": "We conduct a user study to evaluate the performance of our method against the baseline methods. Human subjects are given 12 videos. Each video includes one source skinned motion and five anonymous skinned results. The retargeted results are randomly placed. We ask subjects to rate the results out of 1.0 in three aspects: overall quality, motion smoothness and semantics preservation. We collect a total of 100 comparisons. During the evaluation, users are required to extract semantic meaning from the source motion themselves and then evaluate the preservation of retargeted motions. In general, more than \\(92\\%\\) of subjects prefer the retargeting results of our method." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.787, + 0.425, + 0.803 + ], + "angle": 0, + "content": "4.5. Retargeting Motion from Human Videos" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.811, + 0.469, + 0.9 + ], + "angle": 0, + "content": "In this section, we evaluate our motion retargeting approach from human videos in the human3.6M [10] dataset. Video retargeting involves two stages: human pose estimation from video and motion retargeting. However, inaccuracies in estimating body postures may result in semantic information loss and thus accumulation of errors in the entire" + }, + { + "type": "image", + "bbox": [ + 0.521, + 0.09, + 0.635, + 0.177 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.638, + 0.09, + 0.751, + 0.178 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.755, + 0.09, + 0.869, + 0.178 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.521, + 0.18, + 0.635, + 0.268 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.638, + 0.181, + 0.751, + 0.268 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.755, + 0.181, + 0.869, + 0.268 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.5, + 0.273, + 0.892, + 0.315 + ], + "angle": 0, + "content": "Figure 7. We retarget from human motion clips in the human3.6M [10] dataset. The retargeted motions are free from interpenetration and preserve semantics well." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.319, + 0.892, + 0.424 + ], + "angle": 0, + "content": "retargeting process. Therefore, we first get the estimated human pose from [17]. Then we utilize the vision-language model to extract the semantic embedding of the original video and calculate the semantic consistency loss to optimize the joint angles acquired from the retargeting process directly. In Fig. 7, we show our results of motion retargeting from human videos to Mixamo characters." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.438, + 0.627, + 0.453 + ], + "angle": 0, + "content": "5. Conclusions" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.463, + 0.892, + 0.629 + ], + "angle": 0, + "content": "In this paper, we present a novel semantics-aware motion retargeting method that leverages the capabilities of vision-language models to extract semantic embeddings and facilitate the preservation of motion semantics. This approach offers a promising solution to the challenge of lacking labelled semantic data for motion. Our proposed method involves a two-stage process that integrates skeleton-level motion characteristics and semantics-level consistency along with geometry constraints. Experimental results demonstrate that our approach excels in generating high-quality retargeted motions with semantics consistency." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.63, + 0.892, + 0.765 + ], + "angle": 0, + "content": "Limitations. The main limitation is the performance of the vision-language model in extracting motion semantics. Without the support of motion semantic datasets of sufficient data size and quality, we rely on the model pre-trained on large image-text datasets. Although the model achieves some remarkable results in motion semantics extraction, there is still room for improvement. In addition, the projection of 3D motion into 2D images loses spatial information and affects the performance." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.766, + 0.892, + 0.854 + ], + "angle": 0, + "content": "Future work. Compared with 2D vision-language models, 3D vision-language models have the advantage of capturing spatial relationships directly. Therefore, fine-tuning 3D vision-language models to make them more suitable for the task of motion semantics extraction is worth exploring in our future work." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.856, + 0.892, + 0.9 + ], + "angle": 0, + "content": "Acknowledgements. This work was supported by the National Nature Science Foundation of China under Grant 62173293." + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "2162" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.105 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.116, + 0.47, + 0.142 + ], + "angle": 0, + "content": "[1] Adobe's mixamo. https://www MIXamo.com/. Accessed: 2023-02-08." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.145, + 0.47, + 0.2 + ], + "angle": 0, + "content": "[2] Kfir Aberman, Peizhuo Li, Dani Lischinski, Olga Sorkine-Hornung, Daniel Cohen-Or, and Baoquan Chen. Skeleton-aware networks for deep motion retargeting. ACM Transactions on Graphics (TOG), 39(4):62-1, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.202, + 0.471, + 0.258 + ], + "angle": 0, + "content": "[3] Daichi Azuma, Taiki Miyanishi, Shuhei Kurita, and Motoaki Kawanabe. Scanqa: 3d question answering for spatial scene understanding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.26, + 0.47, + 0.3 + ], + "angle": 0, + "content": "[4] Kwang-Jin Choi and Hyeong-Seok Ko. Online motion retargeting. The Journal of Visualization and Computer Animation, 11(5):223-235, 2000." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.303, + 0.471, + 0.372 + ], + "angle": 0, + "content": "[5] Hyung Won Chung, Le Hou, Shayne Longpre, Barret Zoph, Yi Tay, William Fedus, Yunxuan Li, Xuezhi Wang, Mostafa Dehghani, Siddhartha Brahma, et al. Scaling instruction-finetuned language models. arXiv preprint arXiv:2210.11416, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.374, + 0.471, + 0.416 + ], + "angle": 0, + "content": "[6] Michael Gleicher. Retargetting motion to new characters. In Proceedings of the 25th annual conference on Computer graphics and interactive techniques, pages 33-42, 1998." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.418, + 0.47, + 0.487 + ], + "angle": 0, + "content": "[7] Chuan Guo, Xinxin Zuo, Sen Wang, Shihao Zou, Qingyao Sun, Annan Deng, Minglun Gong, and Li Cheng. Action2motion: Conditioned generation of 3d human motions. In Proceedings of the 28th ACM International Conference on Multimedia, pages 2021-2029, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.489, + 0.47, + 0.558 + ], + "angle": 0, + "content": "[8] Chuan Guo, Shihao Zou, Xinxin Zuo, Sen Wang, Wei Ji, Xingyu Li, and Li Cheng. Generating diverse and natural 3d human motions from text. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5152-5161, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.56, + 0.47, + 0.615 + ], + "angle": 0, + "content": "[9] Lei Hu, Zihao Zhang, Chongyang Zhong, Boyuan Jiang, and Shihong Xia. Pose-aware attention network for flexible motion retargeting by body part. IEEE Transactions on Visualization and Computer Graphics, pages 1-17, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.617, + 0.47, + 0.686 + ], + "angle": 0, + "content": "[10] Catalin Ionescu, Dragos Papava, Vlad Olaru, and Cristian Sminchisescu. Human3.6m: Large scale datasets and predictive methods for 3d human sensing in natural environments. IEEE Transactions on Pattern Analysis and Machine Intelligence, 36(7):1325-1339, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.688, + 0.47, + 0.743 + ], + "angle": 0, + "content": "[11] Jehee Lee and Sung Yong Shin. A hierarchical approach to interactive motion editing for human-like figures. In Proceedings of the 26th annual conference on Computer graphics and interactive techniques, pages 39-48, 1999." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.746, + 0.47, + 0.814 + ], + "angle": 0, + "content": "[12] John P Lewis, Matt Cordner, and Nickson Fong. Pose space deformation: a unified approach to shape interpolation and skeleton-driven deformation. In Proceedings of the 27th annual conference on Computer graphics and interactive techniques, pages 165-172, 2000." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.817, + 0.47, + 0.871 + ], + "angle": 0, + "content": "[13] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. arXiv preprint arXiv:2301.12597, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.873, + 0.47, + 0.902 + ], + "angle": 0, + "content": "[14] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. BLIP-2: bootstrapping language-image pre-training with" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.116, + 0.471, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.892, + 0.12 + ], + "angle": 0, + "content": "frozen image encoders and large language models. In ICML, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.122, + 0.892, + 0.164 + ], + "angle": 0, + "content": "[15] Jongin Lim, Hyung Jin Chang, and Jin Young Choi. Pmnet: Learning of disentangled pose and movement for unsupervised motion retargeting. In BMVC, page 7, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.165, + 0.892, + 0.221 + ], + "angle": 0, + "content": "[16] Shichen Liu, Tianye Li, Weikai Chen, and Hao Li. Soft rasterizer: A differentiable renderer for image-based 3d reasoning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7708-7717, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.222, + 0.892, + 0.277 + ], + "angle": 0, + "content": "[17] Gyeongsik Moon, Hongsuk Choi, and Kyoung Mu Lee. Accurate 3d hand pose estimation for whole-body 3d human mesh estimation. In Computer Vision and Pattern Recognition Workshop (CVPRW), 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.279, + 0.892, + 0.333 + ], + "angle": 0, + "content": "[18] Zoran Popović and Andrew Witkin. Physically based motion transformation. In Proceedings of the 26th annual conference on Computer graphics and interactive techniques, pages 11-20, 1999." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.335, + 0.892, + 0.418 + ], + "angle": 0, + "content": "[19] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.42, + 0.892, + 0.476 + ], + "angle": 0, + "content": "[20] Guy Tevet, Brian Gordon, Amir Hertz, Amit H Bermano, and Daniel Cohen-Or. Motionclip: Exposing human motion generation to clip space. In European Conference on Computer Vision, pages 358–374. Springer, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.477, + 0.892, + 0.518 + ], + "angle": 0, + "content": "[21] Guy Tevet, Sigal Raab, Brian Gordon, Yonatan Shafir, Daniel Cohen-Or, and Amit H Bermano. Human motion diffusion model. arXiv preprint arXiv:2209.14916, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.52, + 0.892, + 0.587 + ], + "angle": 0, + "content": "[22] Ruben Villegas, Jimei Yang, Duygu Ceylan, and Honglak Lee. Neural kinematic networks for unsupervised motion retargeting. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 8639-8648, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.591, + 0.892, + 0.646 + ], + "angle": 0, + "content": "[23] Ruben Villegas, Duygu Ceylan, Aaron Hertzmann, Jimei Yang, and Jun Saito. Contact-aware retargeting of skinned motion. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9720-9729, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.648, + 0.892, + 0.689 + ], + "angle": 0, + "content": "[24] Zhengyuan Yang, Songyang Zhang, Liwei Wang, and Jiebo Luo. Sat: 2d semantics assisted training for 3d visual grounding. In ICCV, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.691, + 0.892, + 0.773 + ], + "angle": 0, + "content": "[25] Jiaxu Zhang, Junwu Weng, Di Kang, Fang Zhao, Shaoli Huang, Xuefei Zhe, Linchao Bao, Ying Shan, Jue Wang, and Zhigang Tu. Skinned motion retargeting with residual perception of motion semantics & geometry. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13864-13872, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.775, + 0.892, + 0.843 + ], + "angle": 0, + "content": "[26] Yi Zhou, Connelly Barnes, Jingwan Lu, Jimei Yang, and Hao Li. On the continuity of rotation representations in neural networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5745-5753, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.846, + 0.892, + 0.902 + ], + "angle": 0, + "content": "[27] Deyao Zhu, Jun Chen, Xiaogian Shen, Xiang Li, and Mohamed Elhoseiny. Minigpt-4: Enhancing vision-language understanding with advanced large language models. arXiv preprint arXiv:2304.10592, 2023." + }, + { + "type": "list", + "bbox": [ + 0.504, + 0.093, + 0.892, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "2163" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.092, + 0.472, + 0.136 + ], + "angle": 0, + "content": "[28] Ziyu Zhu, Xiaojian Ma, Yixin Chen, Zhidong Deng, Siyuan Huang, and Qing Li. 3d-vista: Pre-trained transformer for 3d vision and text alignment. ICCV, 2023." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.517, + 0.957 + ], + "angle": 0, + "content": "2164" + } + ] +] \ No newline at end of file diff --git a/2024/Semantics-aware Motion Retargeting with Vision-Language Models/94f0a721-55ce-4d79-a7e3-1129e082a51d_origin.pdf b/2024/Semantics-aware Motion Retargeting with Vision-Language Models/94f0a721-55ce-4d79-a7e3-1129e082a51d_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..8682e9180b505c859bcd5f7716584e04c2e6cc14 --- /dev/null +++ b/2024/Semantics-aware Motion Retargeting with Vision-Language Models/94f0a721-55ce-4d79-a7e3-1129e082a51d_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca9f042e08123a95fb7638fbd5236cc8f5123593f55ae4f83925ffb309512599 +size 5629243 diff --git a/2024/Semantics-aware Motion Retargeting with Vision-Language Models/full.md b/2024/Semantics-aware Motion Retargeting with Vision-Language Models/full.md new file mode 100644 index 0000000000000000000000000000000000000000..de9ed2a6573c2f85d5d5b7f02d0cc5e511ff8e81 --- /dev/null +++ b/2024/Semantics-aware Motion Retargeting with Vision-Language Models/full.md @@ -0,0 +1,307 @@ +# Semantics-aware Motion Retargeting with Vision-Language Models + +Haodong Zhang $^{1*}$ Zhike Chen $^{1*}$ Haocheng Xu $^{1}$ Lei Hao $^{2}$ Xiaofei Wu $^{2}$ Songcen Xu $^{2}$ Zhensong Zhang $^{2}$ Yue Wang $^{1}$ Rong Xiong $^{1\dagger}$ Zhejiang University ${}^{2}$ Huawei Noah's Ark Lab + +# Abstract + +Capturing and preserving motion semantics is essential to motion retargeting between animation characters. However, most of the previous works neglect the semantic information or rely on human-designed joint-level representations. Here, we present a novel Semantics-aware Motion reTargeting (SMT) method with the advantage of vision-language models to extract and maintain meaningful motion semantics. We utilize a differentiable module to render 3D motions. Then the high-level motion semantics are incorporated into the motion retargeting process by feeding the vision-language model with the rendered images and aligning the extracted semantic embeddings. To ensure the preservation of fine-grained motion details and high-level semantics, we adopt a two-stage pipeline consisting of skeleton-aware pre-training and fine-tuning with semantics and geometry constraints. Experimental results show the effectiveness of the proposed method in producing high-quality motion retargeting results while accurately preserving motion semantics. Project page can be found at https://sites.google.com/view/smtnet. + +# 1. Introduction + +3D animation characters have extensive application in animation production, virtual reality, and various other domains. These characters are animated using motion data, resulting in lifelike and immersive animations. Nevertheless, acquiring motion data for each character can be a costly endeavor. Therefore, the ability to retarget existing motion data for new characters holds immense importance. The goal of motion retargeting is to transfer existing motion data to new characters following motion feature extraction and integration processes, which ensure the preservation of the original motion's characteristics. + +Semantics encompasses the meaningful and contextually relevant information conveyed in motion and plays a critical role in ensuring the realism and vividness of the anima + +![](images/bd9e13161b93841733ced865f78598c1d6a977e14cdaba3f18310fb1838f555d.jpg) +Figure 1. Comparison with previous motion retargeting methods. (a) Previous works rely on human-designed joint distance matrix [25] or self-contacts between mesh vertices [23] to ensure semantics preservation. (b) Ours work enforces human-level motion semantics consistency with the extensive knowledge of vision-language models. (c) Comparison of motion quality and semantics preservation on the Mixamo dataset [1]. Our method achieves the best motion quality and semantics consistency. + +![](images/e549d6320b67a13aa9b070cb0d500c4433aa93e45dd2ca4d66e68f52b2a81134.jpg) + +tion characters. Preservation of motion semantics can enhance the efficiency of motion retargeting by reducing the need for time-consuming manual adjustments and refinements. However, previous methods [2, 15, 22] are mainly based on retargeting of joint positions and make less use of the extraction of semantic information. They focus on trajectory-level motion retargeting with few attention to motion semantics. Consequently, this leads to a significant loss of motion semantics and necessitates the labor-intensive intervention of animation artists for manual trajectory adjustments. Recent advancements have introduced self-contacts [23] and joint distance matrices [25] as the representation of motion semantics. Nevertheless, self-contacts are not applicable to non-contact semantics and require intricate vertex correspondence. The human-designed joint distance matrices primarily focus on joint relative relationships and still lack consideration of high-level semantic information. + +To address the intricate task of capturing and preserving motion semantics, we introduce a new perspective: the most general and comprehensive form of motion semantics is human-level natural language, reflecting the user's intu + +itive understanding of motion. However, the main challenge of human-level motion semantics representation lies in the scarcity of labelled data. It is difficult and expensive to label sufficient semantic textual descriptions for motion data. + +In this paper, we introduce the incorporation of robust, state-of-the-art vision-language models to provide semantic guidance to the motion retargeting network. In the absence of labelled semantic data, we leverage the capabilities of a vision-language model to serve as a semantic supervisor in an unsupervised manner, which can extract motion semantics in a more intuitive way, as illustrated in Fig. 1. This approach offers a solution to the challenge of the limited availability of labelled semantic datasets for motion retargeting. To establish a connection between the vision-language model and motion semantics extraction, we employ the differentiable skinning and rendering modules to translate 3D motions into image sequences. Subsequently, we adopt visual question answering with guiding questions to inquire about the most relevant motion semantics from the vision-language model. + +To guarantee the preservation of motion semantics during motion retargeting, we introduce a semantics consistency loss that enforces the semantic embeddings of the targeted motion to closely align with those of the source motion. For dense semantic supervision and computational efficiency, we utilize latent features extracted by the vision-language model as the semantic embeddings instead of textual descriptions. To alleviate the non-linearity of the semantics consistency loss, we introduce a two-stage training approach. We categorize motion information into two distinct levels: the skeletal level and the semantic level. Our approach involves pre-training the motion retargeting network at the skeletal level, which is then further refined and fine-tuned at the semantic level with the power of vision-language models. To the best of our knowledge, we are the first to leverage the extensive capability of vision-language models for the task of semantics-aware motion retargeting. + +To summarize, the contributions of our work include: + +- We introduce an innovative framework that leverages the expertise of vision-language models as a semantic supervisor to tackle the challenge of limited labelled semantic data for the task of motion retargeting. +- We propose to use differentiable skinning and rendering to translate from the motion domain to the image domain and perform guiding visual question answering to obtain human-level semantic representation. +- We design a semantics consistency loss to maintain motion semantics and introduce an effective two-stage training pipeline consisting of pre-training at the skeletal level and fine-tuning at the semantic level. +- Our model achieves state-of-the-art performance in the challenging task of semantics-aware motion retargeting, delivering exceptional performance marked by high + +quality motion and superior semantics consistency. + +# 2. Related Works + +Optimization-based Motion Retargeting. Motion retargeting is a technique to adapt existing motion data from a source character to a target character with different bone proportions, mesh skins, and skeletal structures. Early works formulate motion retargeting as a constrained optimization problem [4, 6, 11, 18]. Gleicher et al. [6] introduced a motion retargeting method, which identifies motion features as constraints and computes an adapted motion using a space-time constraint solver to preserve the desirable qualities. Lee et al. [11] proposed a method to adapt existing motion of a human-like character to have the desired features with specified constraints and combined a hierarchical curve fitting technique with inverse kinematics. Nonetheless, these methods necessitate the tedious and time-consuming process of formulating human-designed constraints for specific motion sequences. + +Learning-based Motion Retargeting. With the rise of deep learning, researchers have been developing learning-based motion retargeting methods in recent years [2, 9, 15, 22, 23, 25]. Villegas et al. [22] presented a recurrent neural network architecture, which incorporates a forward kinematics layer and cycle consistency loss for unsupervised motion retargetting. Aberman et al. [2] designed a skeleton-aware network with differentiable convolution, pooling, and unpooling operators to transform various homeomorphic skeletons into a primary skeleton for cross-structural motion retargeting. However, these methods tend to concentrate on trajectory-level motion retargeting with limited consideration for motion semantics, which often results in a notable loss of motion semantics and increase the heavy burden of manual adjustments to the trajectories. To address these problems, Zhang et al. [25] presented a residual retargeting network that uses a skeleton-aware module to preserve motion semantics and a shape-aware module to reduce interpenetration and contact missing. While this method successfully preserves joint relative relationships, it still falls short in addressing high-level motion semantics. + +Vision-Language Models. Vision-language models have empowered various vision-language tasks, including visual question answering and image captioning. Tevet et al. [20] introduced a human motion generation model that aligns the latent space with that of the Contrastive Language-Image Pre-training (CLIP) model. Li et al. [13] proposed a pretraining strategy from off-the-shelf frozen pre-trained image encoders and frozen large language models for vision-to-language generative learning. Zhu et al. [27] presented a vision-language model, which uses one projection layer to align a frozen visual encoder with a frozen advanced large language models (LLM). However, these efforts primarily concentrate on vision-language tasks, leaving the question + +![](images/c14d3f4806670441419f84b705ce55d23b7c4d74c2f021432b6dd6694aa6c582.jpg) +Stage I: Skeleton-aware Pre-training + +![](images/69ec7727da1edbc17c8a750293f6acfb0d272504d6e5f0d1f8352703faec249d.jpg) +Figure 2. Model Architecture. Our semantics-aware motion retargeting framework employs a two-stage pipeline. Initially, the retargeting network consisting of multiple spatial-temporal graph convolution layers is trained at the skeletal level to establish a base model. Subsequently, this model undergoes further refinement and fine-tuning at the semantic level by the alignment of latent semantic embeddings of the source and target, leveraging the extensive knowledge of vision-language models. The latent semantic embedding is extracted by guiding visual question answering. Additionally, the geometry constraints are also enforced during fine-tuning to avoid interpenetration. + +![](images/6bf8c1ef8f6296d6854894bb056aa820762d9293e2c39fd0d75a3383c649cc35.jpg) +Stage II: Semantics & Geometry Fine-tuning + +of how to effectively employ vision-language models to guide motion retargeting as an open and unexplored area. + +Human motion synthesis. Human motion synthesis is a domain related to motion retargeting, which aims to synthesize realistic and lifelike human motions from random noise or other inputs with generative networks. Guo et al. [7] proposed to generate human motion sequences based on action type. Guo et al. [8] presented a temporal variational autoencoder to synthesize human motions from text input. Tevet et al. [21] introduced a diffusion-based generative model for human motion generation. As comparison, we focus on the task of motion retargeting, where existing motion data is transferred from a source character to a target character. + +# 3. Method + +# 3.1. Overview + +We present a novel semantic-aware motion retargeting method, as illustrated in Fig 2. In contrast to previous methods that neglect motion semantics [2, 15, 22] or rely on human-designed joint-level representations [25], our approach integrates natural language descriptions from vision-language models to offer an explicit and comprehensive semantic representation of character motions, thereby maintaining the preservation of semantic consistency. + +Task definition. Given a source motion sequence, consisting of the skeleton motion and its associated skinning geometry, as well as a target character in the reference pose (e.g., + +T-posed), the objective of motion retargeting is to generate the target motion while preserving crucial motion characteristics, such as joint trajectory similarity and motion semantics, and satisfying geometry constraints. + +Graph representation. The skeleton motion sequence can be modelled as a sequence of graphs according to the skeleton hierarchy where each node corresponds to a joint and each edge represents a directed connection between joints. Assume that the motion sequence has $T$ frames in total and the animation characters have $N$ nodes and $M$ edges. In our approach, we consider motion data as node features $\mathbf{Q} \in \mathbb{R}^{T \times N \times 9}$ , which encompasses the 6D joint rotation representation [26] and 3D joint positions. Additionally, we utilize skeleton hierarchy information as edge features $\mathbf{E} \in \mathbb{R}^{M \times 3}$ , which consists of the 3D position offset between each joint and its parent joint. + +Two-stage training. The motion of animation characters can be divided into skeletal movements and skinned movements, represented by skeletal joints and skinned vertices respectively. The skinned movements can be derived from the skeletal movements through the linear blend skinning algorithm [12]. Therefore, motion retargeting at the skeletal level can effectively downscale the data and reduce the complexity of the problem. However, this simplification process can lead to the loss of motion semantics and violations of geometry constraints. To address these issues, we employ a two-stage pipeline. Initially, we pre-train a skeleton-aware network to ensure a general initialization for motion retard + +getting without considering motion semantics and geometry constraints. Subsequently, we fine-tune the pre-trained network for each source-target character pair with the vision-language model to maintain semantic consistency and enforce geometry constraints to prevent interpenetrations. + +# 3.2. Skeleton-aware Pre-training + +Retargeting network. We propose a retargeting network consisting of a graph motion encoder and a graph motion decoder for motion retargeting. The motion encoder $\mathcal{F}_{\theta}$ encodes the motion data $\mathbf{Q}_A$ of the source character A into the latent motion embedding $\mathbf{Z}_A$ . Then, the motion decoder $\mathcal{F}_{\phi}$ generates the joint angles $\mathbf{Q}_B$ of the target character B based on the latent features. Both the motion encoder and decoder are composed of multiple graph convolutions. More details are available in the supplementary materials. + +$$ +\mathbf {Z} _ {A} = \mathcal {F} _ {\theta} (\mathbf {Q} _ {A}, \mathbf {E} _ {A}) +$$ + +$$ +\mathbf {Q} _ {B} = \mathcal {F} _ {\phi} (\mathbf {Z} _ {A}, \mathbf {E} _ {B}) \tag {1} +$$ + +In the first phase, we train the motion encoder and decoder at the skeletal level to establish a robust initialization for motion retargeting. Following the unsupervised learning setting in [22], we train the network with the reconstruction loss, cycle consistency loss, adversarial loss, and joint relationship loss. The overall objective function for skeleton-aware pre-training is defined as follows: + +$$ +\mathcal {L} _ {s k e l} = \lambda_ {r} \mathcal {L} _ {r e c} + \lambda_ {c} \mathcal {L} _ {c y c} + \lambda_ {a} \mathcal {L} _ {a d v} + \lambda_ {j} \mathcal {L} _ {j d m} \tag {2} +$$ + +The reconstruction loss $\mathcal{L}_{rec}$ encourages the retargeted motion to match the source motion when the target character is the same as the source character. Let $\mathbf{Q}_{A,t}$ be the motion data of source character A at frame $t$ , and $\hat{\mathbf{Q}}_{A,t}^{rec}$ be the reconstructed motion. Then $\mathcal{L}_{rec}$ is defined as: + +$$ +\mathcal {L} _ {r e c} = \sum_ {t} \left| \left| \hat {\mathbf {Q}} _ {A, t} ^ {r e c} - \mathbf {Q} _ {A, t} \right| \right| _ {2} ^ {2} \tag {3} +$$ + +The cycle consistency loss $\mathcal{L}_{cyc}$ promotes the consistency of retargeted motion from the source character A to the target character B and then back to the source character A, ensuring it remains in line with the original motion. Let $\hat{\mathbf{Q}}_{A,t}^{cyc}$ represent the retargeted motion. Then $\mathcal{L}_{cyc}$ is defined as: + +$$ +\mathcal {L} _ {c y c} = \sum_ {t} \left| \left| \hat {\mathbf {Q}} _ {A, t} ^ {c y c} - \mathbf {Q} _ {A, t} \right| \right| _ {2} ^ {2} \tag {4} +$$ + +The adversarial loss $\mathcal{L}_{adv}$ is calculated by a discriminator network, which utilizes the unpaired data of the target character to learn how to distinguish whether the motions are real or fake. Let $\mathcal{F}_{\gamma}$ be the discriminator network, and $\mathbf{Q}_{B,t}$ be the retargeted motion at frame $t$ . Then it is defined as: + +$$ +\mathcal {L} _ {a d v} = \sum_ {t} \log \left(1 - \mathcal {F} _ {\gamma} \left(\mathbf {Q} _ {B, t}\right)\right) \tag {5} +$$ + +The joint relationship loss $\mathcal{L}_{jdm}$ is calculated by the joint distance matrix (JDM) $\mathbf{D} \in \mathbb{R}^{N \times N}$ , which represents the relative positional relationships of the joints. The element $d_{i,j}$ of $\mathbf{D}$ represents the Euclidean distance between joint $i$ and joint $j$ . We extract the joint distance matrix from the target character and compare it with the source character. Then $\mathcal{L}_{jdm}$ is defined as: + +$$ +\mathcal {L} _ {j d m} = \sum_ {t} \left| \left| \eta (\mathbf {D} _ {A, t}) - \eta (\mathbf {D} _ {B, t}) \right| \right| _ {2} ^ {2} \tag {6} +$$ + +where $\eta(.)$ is an L1 normalization performed on each row of the distance matrix. This normalization operation eliminates the difference in bone length to some extent. + +# 3.3. Semantics & Geometry Fine-tuning + +In the second phase, we fine-tune the pre-trained retargeting network for each source-target character pair to preserve motion semantics and satisfy geometry constraints. The motion semantics is maintained by the semantics consistency loss, which aligns the semantic embeddings extracted from a vision-language model for both the source and target. Additionally, the geometry constraint is satisfied by minimizing the interpenetration loss. The overall objective function for fine-tuning is outlined as follows: + +$$ +\mathcal {L} _ {\text {f i n e}} = \lambda_ {s} \mathcal {L} _ {\text {s e m}} + \lambda_ {p} \mathcal {L} _ {\text {p e n}} \tag {7} +$$ + +Differentiable skinning & rendering. To make the finetuning process differentiable for gradient back-propagation, we first use the differentiable linear blend skinning algorithm [12], denoted as $\mathcal{F}_{lbs}$ , to transform the target joint angles $\mathbf{Q}_B$ into skinned motions $\mathbf{V}_B$ , represented by 3D mesh vertices. Subsequently, we employ the differentiable projection function $\mathcal{F}_{proj}$ as introduced in [16] to convert the skinned motions into 2D images $\mathbf{I}_B$ . A limitation for the differentiable rendering process is that when projecting the 3D skinned mesh onto 2D images, the depth information is lost. To obtain a comprehensive semantic representation of the motion, we render the character from multiple perspectives and then combine the extracted features, following the Non-rigid Shape Fitting task in [16]. + +$$ +\mathbf {I} _ {A} = \mathcal {F} _ {p r o j} \left(\mathcal {F} _ {l b s} \left(\mathbf {Q} _ {A}\right)\right) +$$ + +$$ +\mathbf {I} _ {B} = \mathcal {F} _ {p r o j} \left(\mathcal {F} _ {l b s} (\mathbf {Q} _ {B})\right) +$$ + +Frozen vision-language model. To obtain an explicit and reliable semantic feature of the motion, we employ a frozen vision-language model as our semantic supervisor. Current 3D vision-language datasets [3, 28] mainly focus on the occupation or the segmentation of the object in a spatial scene like rooms, and thus the state-of-the-art 3D vision-language models [28] lack prior knowledge relevant to animation characters. In contrast, 2D vision-language models achieve better results in semantic tasks, such as image captioning, visual question answering and image-text + +![](images/d2c109697a3715ea97365be98876fb3fa3c49e189dc3a65adc55a802494945f6.jpg) +Figure 3. An example of guiding visual question answering. + +retrieval, and provides cleaner and richer semantics [24]. Therefore, we utilize a frozen 2D vision-language model to extract latent embeddings of motion semantics. The frozen 2D vision-language model employed in our work is BLIP-2 [14], which incorporates a lightweight querying transformer as a bridge between the off-the-shelf frozen pre-trained image encoder and the frozen large language model. + +Prompt design. Since the vision-language model has the capability to extract rich information from images, it is possible that the extracted features might contain redundant details, such as the appearance of the character. To guide the vision-language model to obtain semantic embedding relevant to character motions, we adopt a guiding visual question answering approach for motion semantics extraction, as depicted in Fig. 3. We believe that there is a strong correlation between motion semantics and hand movements. To acquire a more comprehensive description of the motion, we initially provide a guiding question to BLIP-2: "Where are the hands of the character?" Subsequently, we introduce a new question and combine it with the first answer as the input to BLIP-2: "[The answers to the first question generated by the vision-language model] What is the character in the image doing?" For more details, please refer to the supplementary materials. + +Latent semantic embedding. We opt to align the latent semantic embeddings of the source and target generated by the vision-language model rather than relying on textual descriptions, specifically leveraging the encoder output of the large language model. This approach enables us to acquire a more accurate and denser representation, while also mitigating computational costs and the non-linearity of the training objective caused by the large number of parameters of the vision-language model. Let $\mathbf{E}_A$ and $\mathbf{E}_B$ be the latent semantic embeddings of the source and target motions, $\mathcal{F}_{\omega}$ be the frozen pre-trained image encoder, $\mathcal{F}_{\sigma}$ be the frozen querying transformer, $\mathcal{F}_{\psi}$ be the encoder of the frozen large language model, and context be the question. + +$$ +\begin{array}{l} \begin{array}{l} \mathbf {E} _ {A} = \mathcal {F} _ {\psi} \left(\mathcal {F} _ {\sigma} \left(\mathcal {F} _ {\omega} (\mathbf {I} _ {A}), \text {c o n t e x t}\right)\right) \\ \overline {{\mathbf {E}}} = \overline {{\mathbf {F}}} \left(\overline {{\mathbf {F}}} \left(\overline {{\mathbf {F}}} (\mathbf {I} _ {A}), \text {c o n t e x t}\right)\right) \end{array} \tag {9} \\ \mathbf {E} _ {B} = \mathcal {F} _ {\psi} (\mathcal {F} _ {\sigma} (\mathcal {F} _ {\omega} (\mathbf {I} _ {B}), c o n t e x t)) \\ \end{array} +$$ + +Fine-tuning with semantics consistency. As illustrated in Fig. 2, our approach aligns the latent semantic embeddings of both the source and target motions in an unsupervised manner, ensuring a high degree of semantic consistency in the retargeted results. The semantics consistency loss $\mathcal{L}_{sem}$ is calculated using the mean square error and it is defined as follows: + +$$ +\mathcal {L} _ {s e m} = \sum_ {t} \| \mathbf {E} _ {A, t} - \mathbf {E} _ {B, t} \| _ {2} ^ {2} \tag {10} +$$ + +Fine-tuning with geometry constraints. From our observations, most interpenetration problems occur between the limbs and the main body. To address this, we incorporate the signed distance field between the limb vertices and the body mesh as the interpenetration loss. First, we convert the skeleton motion output from the network into mesh vertices using the linear blend skinning method [12]. Then, the interpenetration loss is defined as follows: + +$$ +\mathcal {L} _ {p e n} = \sum_ {t} R e L U (- \Phi_ {b, t} (\mathbf {V} _ {l, t})) \tag {11} +$$ + +where $\Phi_b$ indicates the signed distance field function, $\mathbf{V}_l$ is the vertices of the limbs. If the vertex locates inside the body, the value of the function is less than zero. Therefore, we use the $ReLU$ function to penalize the inner vertices. + +# 4. Experiments + +# 4.1. Settings + +Datasets. We train and evaluate our method on the Mixamo dataset [1], an extensive repository of animations performed by various 3D virtual characters with distinct skeletons and geometry shapes. The training set we use to pretrain our skeleton aware module is the same as that used in [2], which contains 1646 motions performed by 7 characters. It's important to note that the Mixamo dataset does not provide clean ground truth data, since many of the motion sequences suffer from interpenetration issues and semantic information loss. To mitigate this, we have carefully selected a subset of motion sequences that are both semantically clean and free of interpenetration issues for fine-tuning and testing. Our fine-tuning process involves retargeting 15 clean motions including 3127 frames, originally performed by 3 source characters, namely "Y Bot", "X Bot", and "Ortiz", onto 3 target characters, including "Aj", "Kaya", and "Mousey". Then we evaluate the performance of our model on the task of retargeting 30 additional motions that are previously unseen in the training set and fine-tuning sets. More details could be found in the supplementary materials. + +Implementation details. The hyper-parameters $\lambda_r, \lambda_c, \lambda_a, \lambda_j, \lambda_p, \lambda_s$ for pre-training and fine-tuning loss functions are set to 10.0, 1.0, 0.1, 1.0, 1.0, 0.1. For semantics fine-tuning, we use BLIP-2 [14] with pre-trained FlanT5-XXL + +[5] large language model. To extract the semantic representation of the motion, we render animation from three perspectives, including the front view, left view and right view. The fine-tuning process takes 25 epochs with 5 clean motion sequences of the source character for each target character. During pre-training and fine-tuning, we use an Adam optimizer to optimize the retargeting network. Please refer to the supplementary materials for more details. + +Evaluation metrics. We evaluate the performance of our method across three key dimensions: skeleton, geometry, and semantics. At the skeletal level, we measure the Mean Square Error (MSE) between retargeted joint positions and the ground truth provided by Mixamo, analyzing both the global and the local joint positions. At the geometric level, we evaluate the interpenetration percentage (PEN). At the semantic level, we utilize the Image-Text Matching (ITM) score, Fréchet inception distance (FID) and semantics consistency loss (SCL) as metrics. The ITM score quantifies the visual-semantic similarity between the source textual description and the rendered retargeted motion. FID is calculated between the semantic embedding distribution of retargeted motion and source motion. More details are provided in the supplementary materials. + +# 4.2. Comparison with State of the Arts + +Quantitative. In this section, we conduct a comparative analysis of our method against the state-of-the-art approaches as illustrated in Tab. 1. The baseline methods include R2ET [25], SAN [2], NKN [22] and the Copy strategy. The Copy strategy achieves the lowest local MSE because the ground truth data in the Mixamo dataset are not entirely clean, and many of them are generated by copying rotations. As a result, this strategy comes at the cost of semantic loss and interpenetration issues. SAN [2] and NKN [22] focus on skeleton-level motion features, which results in a high interpenetration rate and relatively low semantics preservation. R2ET [25] treats motion semantics as the joint distance matrix and mesh distance field, which helps it obtain better motion semantics than SAN and Copy. Nevertheless, there is still a gap between the human-designed distance matrix and the human-level semantics. Notably, our model exhibits the best interpenetration rate and semantics preservation among all methods, showcasing the capability of the proposed method in producing high-quality retargeted motions with semantics consistency. + +Qualitative. In Fig. 4, we visualize the text descriptions of the motions and the qualitative comparison between the state-of-the-arts and our method. SAN [2] and Copy neglect the preservation of semantics and have severe interpenetration. R2ET [25] utilizes joint distance matrix as semantics representation and fails to capture high-level semantic information. For example, the salute motion retargeted by R2ET [25] appears more like a hand-up motion. As a comparison, + +
MethodMSE ↓MSElc ↓Pen.% ↓ITM ↑FID ↓SCL ↓
Source--4.430.796--
GT--9.060.58226.991.331
Copy-0.0059.030.58126.581.327
NKN [22]0.3260.2318.710.57527.791.414
SAN [2]0.4350.2559.740.56128.331.448
R2ET [25]0.4990.4967.620.6435.4690.405
Ours0.2840.2293.500.6800.4360.143
+ +Table 1. Quantitative comparison with the state-of-the-arts. $\mathrm{MSE}^{lc}$ denotes the local MSE. ITM indicates the image-text matching score. FID is Fréchet inception distance of motion semantics. SCL is the semantics consistency loss. + +
MethodMSE ↓MSElc ↓Pen.% ↓ITM ↑FID ↓SCL ↓
SMTtws0.2480.1298.370.5867.7270.769
SMTtwf7.7987.0830.440.43256.5313.29
SMTtwa0.3350.2885.360.6582.8260.266
SMTfwp0.4390.3681.220.5977.2410.583
SMTfwi5.4184.5764.410.55278.4618.96
SMTfwq0.7390.5174.560.6682.4970.191
SMTOurs0.2840.2293.500.6800.4360.143
+ +Table 2. Ablation study. $\mathrm{SMT}_{tws}$ is the network trained with only skeleton-aware pre-training. $\mathrm{SMT}_{twf}$ is the network trained with only semantics and geometry fine-tuning. $\mathrm{SMT}_{twa}$ is the network trained in one stage. $\mathrm{SMT}_{fwp}$ is the network fine-tuned with only the interpenetration loss. $\mathrm{SMT}_{fwi}$ is the network fine-tuned with image features. $\mathrm{SMT}_{fwq}$ is the network fine-tuned with the features of the querying transformer. + +our method is able to successfully preserve high-level motion semantics leveraging the vision-language model. We observe that our approach reaches the best results among all methods, achieving more reliable semantics preservation and lower interpenetration rates. It suggests that with semantics and geometry fine-tuning, our method could effectively solve interpenetration issues together with semantics preservation. + +# 4.3. Ablation Studies + +Skeleton-aware pre-training. The proposed method can be divided into two stage: pre-training and fine-tuning. To illustrate the importance of skeleton-aware pre-training, we evaluate the network trained with only the semantics consistency loss and the interpenetration loss in Tab. 2, denoted as $\mathrm{SMT}_{twf}$ . The network trained without skeleton-aware pretraining performs worst in MSE and semantics preservation. A reasonable explanation is that the semantics consistency loss is highly non-linear, so it is important to pre-train the network at the skeletal level to provide better initial values. We also visualize qualitative results in Fig. 5. + +Semantics & geometry fine-tuning. We also conduct ablation study to illustrate the importance of semantics and geometry fine-tuning in Tab. 2. We first evaluate the performance of the skeleton-aware model without fine-tuning, denoted as $\mathrm{SMT}_{tws}$ . Though it reaches the best global posi + +![](images/67d286d86772b680eeddbe7dbdd2a2bf855b305691d26c3b7e03dbdc18835f4f.jpg) +Figure 4. Qualitative comparison. The results demonstrate that our method can effectively preserve semantics while the baseline methods suffer from interpenetration or semantic information loss. From the first column to the last column are the source motion, the Copy strategy, NKN [22], SAN [2], R2ET [25], our method and text descriptions, respectively. + +![](images/2cefa79331f00b4de3501751f6fd2d2bc1d78bc2156f2a7c10e76f96b04c4e6d.jpg) +Figure 5. The qualitative comparison of ablation study between the network without fine-tuning (TWS), the network trained with only semantics and geometry fine-tuning (TWF), the network trained with all loss functions (TWA), the network fine-tuned with only the interpenetration loss (FWP) and our full model (All). + +tion MSE, it suffers from interpenetration and semantic information loss because of the low-quality motion data provided by Mixamo. We next evaluate the network fine-tuned with only the interpenetration loss, denoted as $\mathrm{SMT}_{fwp}$ . This version results in a significant boost in terms of penetration rate. However, the gradient of interpenetration loss is only relevant with the face normals of the geometry mesh without considering the semantic information conveyed in the motion. It indicates the importance of the semantic consistency loss that makes the network reach a better balance + +between interpenetration and semantics. We also try to train the network with all loss functions in one stage, denoted as $\mathrm{SMT}_{twa}$ . However, it is challenging for the model to acquire general knowledge of interpenetration and semantics that is suitable for every character with limited data. Therefore, training the model with skeleton-aware pre-training and fine-tuning it with semantics consistency and geometry constraints for each target character remains a more reasonable and data-efficient strategy. + +Latent semantic embedding. The vision-language model used for semantic extraction can be divided into three parts: the image encoder from CLIP [19], the querying transformer and the large language model. In Tab. 2, we compare the feature outputted by the image encoder, the querying transformer and the encoder of the large language model, denoted as $\mathrm{SMT}_{fwi}$ , $\mathrm{SMT}_{fwq}$ , and $\mathrm{SMT}_{Ours}$ , respectively. The results show that the image feature performs worse since it is greatly affected by the appearance of the character. It indicates that with the help of the large language model, the semantic representation better focuses on the semantic meaning of the motion instead of the character's visual appearance. Therefore, the encoder output of the large language model is more suitable for semantic embedding. More details can be found in the supplementary materials. + +Prompt design. To validate the importance of guiding visual question answering, we compare the textual descriptions generated by visual question answering with and without guiding questions as well as image captioning. The re + +Image Captioning +Visual Question Answering +![](images/06dd6b0b0ab33fa1a55868a80afc687cc42688a78a6088cc89698e1acbf6ac3d.jpg) +A 3d model of a boy wearing glasses and a hat. +Q: What is the character doing? +A: The character is praying. + +Image Captioning +Visual Question Answering +Figure 6. Text descriptions generated by different ways. The guiding visual question answering yields more comprehensive results. +![](images/89686dcf50707ed7e14930dbdac4e66d787a29b9ad437e6d61383caac35da9e3.jpg) +A 3d model of a robot running on a cheeked floor. +Q: What is the character doing? +A: The character is running on a checkered floor. + +
MethodQuality ↑Smoothness ↑Semantics ↑
Copy0.720.860.71
NKN [22]0.650.800.66
SAN [2]0.690.820.67
R2ET [25]0.800.610.85
Ours0.890.800.92
+ +Table 3. User study results. We collect 100 comparisons in three aspects. Our method gets highest scores in the overall quality as well as semantics preservation. + +sults in Fig. 6 indicate that using guiding questions for visual question answering yields the most comprehensive and reasonable text descriptions for motion semantics. Compared with image captioning that uses the vision-language model to generate text description directly from images, the answers from visual question answering task can be guided by the designed question to focus on motion semantics. + +# 4.4. User Study + +We conduct a user study to evaluate the performance of our method against the baseline methods. Human subjects are given 12 videos. Each video includes one source skinned motion and five anonymous skinned results. The retargeted results are randomly placed. We ask subjects to rate the results out of 1.0 in three aspects: overall quality, motion smoothness and semantics preservation. We collect a total of 100 comparisons. During the evaluation, users are required to extract semantic meaning from the source motion themselves and then evaluate the preservation of retargeted motions. In general, more than $92\%$ of subjects prefer the retargeting results of our method. + +# 4.5. Retargeting Motion from Human Videos + +In this section, we evaluate our motion retargeting approach from human videos in the human3.6M [10] dataset. Video retargeting involves two stages: human pose estimation from video and motion retargeting. However, inaccuracies in estimating body postures may result in semantic information loss and thus accumulation of errors in the entire + +Guiding Visual Question Answering +![](images/31a5234b3dd525a192b0a0e30eebda6fa48e4919aaca3bb3d5291bf467c490ce.jpg) +Q: Where are the hands of the character? + +![](images/6b583f7206f29c0b1fc775e655fae1efd11ecf2e6f701ff2617c580822706bd8.jpg) + +![](images/8f56ffaf23669bc2d89939f221262149d7058f8c5b811f3796ffd00f378fcaf0.jpg) + +Guiding Visual Question Answering +Figure 7. We retarget from human motion clips in the human3.6M [10] dataset. The retargeted motions are free from interpenetration and preserve semantics well. +![](images/90c070f166b3ff245499e227a911f1815b1940b8b7385812cb83bc7354f8103a.jpg) +Q: Where are the hands of the character? A: Holding a ball. +Q: What is the character doing? +A: The character is trying to throw a ball with both hands on the right side of his body. + +![](images/6e07af2e5d7ced8a5d3c5c66f78180d8a2e50126bd1e2e0cee03ada486231a7d.jpg) + +![](images/1be675d4ea3fdae8b9bd00e2d57b8dc35130e1f7e72e0afc87a8a71c3a69667a.jpg) + +retargeting process. Therefore, we first get the estimated human pose from [17]. Then we utilize the vision-language model to extract the semantic embedding of the original video and calculate the semantic consistency loss to optimize the joint angles acquired from the retargeting process directly. In Fig. 7, we show our results of motion retargeting from human videos to Mixamo characters. + +# 5. Conclusions + +In this paper, we present a novel semantics-aware motion retargeting method that leverages the capabilities of vision-language models to extract semantic embeddings and facilitate the preservation of motion semantics. This approach offers a promising solution to the challenge of lacking labelled semantic data for motion. Our proposed method involves a two-stage process that integrates skeleton-level motion characteristics and semantics-level consistency along with geometry constraints. Experimental results demonstrate that our approach excels in generating high-quality retargeted motions with semantics consistency. + +Limitations. The main limitation is the performance of the vision-language model in extracting motion semantics. Without the support of motion semantic datasets of sufficient data size and quality, we rely on the model pre-trained on large image-text datasets. Although the model achieves some remarkable results in motion semantics extraction, there is still room for improvement. In addition, the projection of 3D motion into 2D images loses spatial information and affects the performance. + +Future work. Compared with 2D vision-language models, 3D vision-language models have the advantage of capturing spatial relationships directly. Therefore, fine-tuning 3D vision-language models to make them more suitable for the task of motion semantics extraction is worth exploring in our future work. + +Acknowledgements. This work was supported by the National Nature Science Foundation of China under Grant 62173293. + +# References + +[1] Adobe's mixamo. https://www MIXamo.com/. Accessed: 2023-02-08. +[2] Kfir Aberman, Peizhuo Li, Dani Lischinski, Olga Sorkine-Hornung, Daniel Cohen-Or, and Baoquan Chen. Skeleton-aware networks for deep motion retargeting. ACM Transactions on Graphics (TOG), 39(4):62-1, 2020. +[3] Daichi Azuma, Taiki Miyanishi, Shuhei Kurita, and Motoaki Kawanabe. Scanqa: 3d question answering for spatial scene understanding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022. +[4] Kwang-Jin Choi and Hyeong-Seok Ko. Online motion retargeting. The Journal of Visualization and Computer Animation, 11(5):223-235, 2000. +[5] Hyung Won Chung, Le Hou, Shayne Longpre, Barret Zoph, Yi Tay, William Fedus, Yunxuan Li, Xuezhi Wang, Mostafa Dehghani, Siddhartha Brahma, et al. Scaling instruction-finetuned language models. arXiv preprint arXiv:2210.11416, 2022. +[6] Michael Gleicher. Retargetting motion to new characters. In Proceedings of the 25th annual conference on Computer graphics and interactive techniques, pages 33-42, 1998. +[7] Chuan Guo, Xinxin Zuo, Sen Wang, Shihao Zou, Qingyao Sun, Annan Deng, Minglun Gong, and Li Cheng. Action2motion: Conditioned generation of 3d human motions. In Proceedings of the 28th ACM International Conference on Multimedia, pages 2021-2029, 2020. +[8] Chuan Guo, Shihao Zou, Xinxin Zuo, Sen Wang, Wei Ji, Xingyu Li, and Li Cheng. Generating diverse and natural 3d human motions from text. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5152-5161, 2022. +[9] Lei Hu, Zihao Zhang, Chongyang Zhong, Boyuan Jiang, and Shihong Xia. Pose-aware attention network for flexible motion retargeting by body part. IEEE Transactions on Visualization and Computer Graphics, pages 1-17, 2023. +[10] Catalin Ionescu, Dragos Papava, Vlad Olaru, and Cristian Sminchisescu. Human3.6m: Large scale datasets and predictive methods for 3d human sensing in natural environments. IEEE Transactions on Pattern Analysis and Machine Intelligence, 36(7):1325-1339, 2014. +[11] Jehee Lee and Sung Yong Shin. A hierarchical approach to interactive motion editing for human-like figures. In Proceedings of the 26th annual conference on Computer graphics and interactive techniques, pages 39-48, 1999. +[12] John P Lewis, Matt Cordner, and Nickson Fong. Pose space deformation: a unified approach to shape interpolation and skeleton-driven deformation. In Proceedings of the 27th annual conference on Computer graphics and interactive techniques, pages 165-172, 2000. +[13] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. arXiv preprint arXiv:2301.12597, 2023. +[14] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. BLIP-2: bootstrapping language-image pre-training with + +frozen image encoders and large language models. In ICML, 2023. +[15] Jongin Lim, Hyung Jin Chang, and Jin Young Choi. Pmnet: Learning of disentangled pose and movement for unsupervised motion retargeting. In BMVC, page 7, 2019. +[16] Shichen Liu, Tianye Li, Weikai Chen, and Hao Li. Soft rasterizer: A differentiable renderer for image-based 3d reasoning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7708-7717, 2019. +[17] Gyeongsik Moon, Hongsuk Choi, and Kyoung Mu Lee. Accurate 3d hand pose estimation for whole-body 3d human mesh estimation. In Computer Vision and Pattern Recognition Workshop (CVPRW), 2022. +[18] Zoran Popović and Andrew Witkin. Physically based motion transformation. In Proceedings of the 26th annual conference on Computer graphics and interactive techniques, pages 11-20, 1999. +[19] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021. +[20] Guy Tevet, Brian Gordon, Amir Hertz, Amit H Bermano, and Daniel Cohen-Or. Motionclip: Exposing human motion generation to clip space. In European Conference on Computer Vision, pages 358–374. Springer, 2022. +[21] Guy Tevet, Sigal Raab, Brian Gordon, Yonatan Shafir, Daniel Cohen-Or, and Amit H Bermano. Human motion diffusion model. arXiv preprint arXiv:2209.14916, 2022. +[22] Ruben Villegas, Jimei Yang, Duygu Ceylan, and Honglak Lee. Neural kinematic networks for unsupervised motion retargeting. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 8639-8648, 2018. +[23] Ruben Villegas, Duygu Ceylan, Aaron Hertzmann, Jimei Yang, and Jun Saito. Contact-aware retargeting of skinned motion. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9720-9729, 2021. +[24] Zhengyuan Yang, Songyang Zhang, Liwei Wang, and Jiebo Luo. Sat: 2d semantics assisted training for 3d visual grounding. In ICCV, 2021. +[25] Jiaxu Zhang, Junwu Weng, Di Kang, Fang Zhao, Shaoli Huang, Xuefei Zhe, Linchao Bao, Ying Shan, Jue Wang, and Zhigang Tu. Skinned motion retargeting with residual perception of motion semantics & geometry. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13864-13872, 2023. +[26] Yi Zhou, Connelly Barnes, Jingwan Lu, Jimei Yang, and Hao Li. On the continuity of rotation representations in neural networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5745-5753, 2019. +[27] Deyao Zhu, Jun Chen, Xiaogian Shen, Xiang Li, and Mohamed Elhoseiny. Minigpt-4: Enhancing vision-language understanding with advanced large language models. arXiv preprint arXiv:2304.10592, 2023. + +[28] Ziyu Zhu, Xiaojian Ma, Yixin Chen, Zhidong Deng, Siyuan Huang, and Qing Li. 3d-vista: Pre-trained transformer for 3d vision and text alignment. ICCV, 2023. \ No newline at end of file diff --git a/2024/Semantics-aware Motion Retargeting with Vision-Language Models/images.zip b/2024/Semantics-aware Motion Retargeting with Vision-Language Models/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..ddc23fbececb6d5fc22e883112801daee47911c2 --- /dev/null +++ b/2024/Semantics-aware Motion Retargeting with Vision-Language Models/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:915fe94afd63cee6fef818b68cd20e4da9808e7742c51bc862c8687dcf66e8a6 +size 531452 diff --git a/2024/Semantics-aware Motion Retargeting with Vision-Language Models/layout.json b/2024/Semantics-aware Motion Retargeting with Vision-Language Models/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..4dbdb9ea63c04308abda07e29c85572e4e088745 --- /dev/null +++ b/2024/Semantics-aware Motion Retargeting with Vision-Language Models/layout.json @@ -0,0 +1,7808 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 87, + 103, + 506, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 103, + 506, + 121 + ], + "spans": [ + { + "bbox": [ + 87, + 103, + 506, + 121 + ], + "type": "text", + "content": "Semantics-aware Motion Retargeting with Vision-Language Models" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 108, + 142, + 485, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 142, + 485, + 186 + ], + "spans": [ + { + "bbox": [ + 108, + 142, + 485, + 186 + ], + "type": "text", + "content": "Haodong Zhang" + }, + { + "bbox": [ + 108, + 142, + 485, + 186 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 108, + 142, + 485, + 186 + ], + "type": "text", + "content": " Zhike Chen" + }, + { + "bbox": [ + 108, + 142, + 485, + 186 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 108, + 142, + 485, + 186 + ], + "type": "text", + "content": " Haocheng Xu" + }, + { + "bbox": [ + 108, + 142, + 485, + 186 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 108, + 142, + 485, + 186 + ], + "type": "text", + "content": " Lei Hao" + }, + { + "bbox": [ + 108, + 142, + 485, + 186 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 108, + 142, + 485, + 186 + ], + "type": "text", + "content": " Xiaofei Wu" + }, + { + "bbox": [ + 108, + 142, + 485, + 186 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 108, + 142, + 485, + 186 + ], + "type": "text", + "content": " Songcen Xu" + }, + { + "bbox": [ + 108, + 142, + 485, + 186 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 108, + 142, + 485, + 186 + ], + "type": "text", + "content": " Zhensong Zhang" + }, + { + "bbox": [ + 108, + 142, + 485, + 186 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 108, + 142, + 485, + 186 + ], + "type": "text", + "content": " Yue Wang" + }, + { + "bbox": [ + 108, + 142, + 485, + 186 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 108, + 142, + 485, + 186 + ], + "type": "text", + "content": " Rong Xiong" + }, + { + "bbox": [ + 108, + 142, + 485, + 186 + ], + "type": "inline_equation", + "content": "^{1\\dagger}" + }, + { + "bbox": [ + 108, + 142, + 485, + 186 + ], + "type": "text", + "content": " Zhejiang University " + }, + { + "bbox": [ + 108, + 142, + 485, + 186 + ], + "type": "inline_equation", + "content": "{}^{2}" + }, + { + "bbox": [ + 108, + 142, + 485, + 186 + ], + "type": "text", + "content": "Huawei Noah's Ark Lab" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 143, + 213, + 192, + 225 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 213, + 192, + 225 + ], + "spans": [ + { + "bbox": [ + 143, + 213, + 192, + 225 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 238, + 290, + 478 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 238, + 290, + 478 + ], + "spans": [ + { + "bbox": [ + 46, + 238, + 290, + 478 + ], + "type": "text", + "content": "Capturing and preserving motion semantics is essential to motion retargeting between animation characters. However, most of the previous works neglect the semantic information or rely on human-designed joint-level representations. Here, we present a novel Semantics-aware Motion reTargeting (SMT) method with the advantage of vision-language models to extract and maintain meaningful motion semantics. We utilize a differentiable module to render 3D motions. Then the high-level motion semantics are incorporated into the motion retargeting process by feeding the vision-language model with the rendered images and aligning the extracted semantic embeddings. To ensure the preservation of fine-grained motion details and high-level semantics, we adopt a two-stage pipeline consisting of skeleton-aware pre-training and fine-tuning with semantics and geometry constraints. Experimental results show the effectiveness of the proposed method in producing high-quality motion retargeting results while accurately preserving motion semantics. Project page can be found at https://sites.google.com/view/smtnet." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 499, + 128, + 511 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 499, + 128, + 511 + ], + "spans": [ + { + "bbox": [ + 47, + 499, + 128, + 511 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 519, + 287, + 650 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 519, + 287, + 650 + ], + "spans": [ + { + "bbox": [ + 46, + 519, + 287, + 650 + ], + "type": "text", + "content": "3D animation characters have extensive application in animation production, virtual reality, and various other domains. These characters are animated using motion data, resulting in lifelike and immersive animations. Nevertheless, acquiring motion data for each character can be a costly endeavor. Therefore, the ability to retarget existing motion data for new characters holds immense importance. The goal of motion retargeting is to transfer existing motion data to new characters following motion feature extraction and integration processes, which ensure the preservation of the original motion's characteristics." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 651, + 287, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 651, + 287, + 687 + ], + "spans": [ + { + "bbox": [ + 46, + 651, + 287, + 687 + ], + "type": "text", + "content": "Semantics encompasses the meaningful and contextually relevant information conveyed in motion and plays a critical role in ensuring the realism and vividness of the anima" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 306, + 212, + 429, + 341 + ], + "blocks": [ + { + "bbox": [ + 306, + 212, + 429, + 341 + ], + "lines": [ + { + "bbox": [ + 306, + 212, + 429, + 341 + ], + "spans": [ + { + "bbox": [ + 306, + 212, + 429, + 341 + ], + "type": "image", + "image_path": "bd9e13161b93841733ced865f78598c1d6a977e14cdaba3f18310fb1838f555d.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 350, + 547, + 439 + ], + "lines": [ + { + "bbox": [ + 305, + 350, + 547, + 439 + ], + "spans": [ + { + "bbox": [ + 305, + 350, + 547, + 439 + ], + "type": "text", + "content": "Figure 1. Comparison with previous motion retargeting methods. (a) Previous works rely on human-designed joint distance matrix [25] or self-contacts between mesh vertices [23] to ensure semantics preservation. (b) Ours work enforces human-level motion semantics consistency with the extensive knowledge of vision-language models. (c) Comparison of motion quality and semantics preservation on the Mixamo dataset [1]. Our method achieves the best motion quality and semantics consistency." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 431, + 213, + 545, + 341 + ], + "blocks": [ + { + "bbox": [ + 431, + 213, + 545, + 341 + ], + "lines": [ + { + "bbox": [ + 431, + 213, + 545, + 341 + ], + "spans": [ + { + "bbox": [ + 431, + 213, + 545, + 341 + ], + "type": "image", + "image_path": "e549d6320b67a13aa9b070cb0d500c4433aa93e45dd2ca4d66e68f52b2a81134.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 459, + 546, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 459, + 546, + 662 + ], + "spans": [ + { + "bbox": [ + 304, + 459, + 546, + 662 + ], + "type": "text", + "content": "tion characters. Preservation of motion semantics can enhance the efficiency of motion retargeting by reducing the need for time-consuming manual adjustments and refinements. However, previous methods [2, 15, 22] are mainly based on retargeting of joint positions and make less use of the extraction of semantic information. They focus on trajectory-level motion retargeting with few attention to motion semantics. Consequently, this leads to a significant loss of motion semantics and necessitates the labor-intensive intervention of animation artists for manual trajectory adjustments. Recent advancements have introduced self-contacts [23] and joint distance matrices [25] as the representation of motion semantics. Nevertheless, self-contacts are not applicable to non-contact semantics and require intricate vertex correspondence. The human-designed joint distance matrices primarily focus on joint relative relationships and still lack consideration of high-level semantic information." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 665, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 665, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 305, + 665, + 547, + 715 + ], + "type": "text", + "content": "To address the intricate task of capturing and preserving motion semantics, we introduce a new perspective: the most general and comprehensive form of motion semantics is human-level natural language, reflecting the user's intu" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 693, + 212, + 703 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 693, + 212, + 703 + ], + "spans": [ + { + "bbox": [ + 58, + 693, + 212, + 703 + ], + "type": "text", + "content": "*These authors contributed equally to this work" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 59, + 703, + 200, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 703, + 200, + 712 + ], + "spans": [ + { + "bbox": [ + 59, + 703, + 200, + 712 + ], + "type": "text", + "content": "† Corresponding author: rxiong@zju.edu.cn" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2155" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "content": "itive understanding of motion. However, the main challenge of human-level motion semantics representation lies in the scarcity of labelled data. It is difficult and expensive to label sufficient semantic textual descriptions for motion data." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 121, + 288, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 121, + 288, + 312 + ], + "spans": [ + { + "bbox": [ + 46, + 121, + 288, + 312 + ], + "type": "text", + "content": "In this paper, we introduce the incorporation of robust, state-of-the-art vision-language models to provide semantic guidance to the motion retargeting network. In the absence of labelled semantic data, we leverage the capabilities of a vision-language model to serve as a semantic supervisor in an unsupervised manner, which can extract motion semantics in a more intuitive way, as illustrated in Fig. 1. This approach offers a solution to the challenge of the limited availability of labelled semantic datasets for motion retargeting. To establish a connection between the vision-language model and motion semantics extraction, we employ the differentiable skinning and rendering modules to translate 3D motions into image sequences. Subsequently, we adopt visual question answering with guiding questions to inquire about the most relevant motion semantics from the vision-language model." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 314, + 288, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 314, + 288, + 517 + ], + "spans": [ + { + "bbox": [ + 46, + 314, + 288, + 517 + ], + "type": "text", + "content": "To guarantee the preservation of motion semantics during motion retargeting, we introduce a semantics consistency loss that enforces the semantic embeddings of the targeted motion to closely align with those of the source motion. For dense semantic supervision and computational efficiency, we utilize latent features extracted by the vision-language model as the semantic embeddings instead of textual descriptions. To alleviate the non-linearity of the semantics consistency loss, we introduce a two-stage training approach. We categorize motion information into two distinct levels: the skeletal level and the semantic level. Our approach involves pre-training the motion retargeting network at the skeletal level, which is then further refined and fine-tuned at the semantic level with the power of vision-language models. To the best of our knowledge, we are the first to leverage the extensive capability of vision-language models for the task of semantics-aware motion retargeting." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 59, + 518, + 275, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 518, + 275, + 529 + ], + "spans": [ + { + "bbox": [ + 59, + 518, + 275, + 529 + ], + "type": "text", + "content": "To summarize, the contributions of our work include:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 534, + 287, + 713 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 47, + 534, + 287, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 534, + 287, + 582 + ], + "spans": [ + { + "bbox": [ + 47, + 534, + 287, + 582 + ], + "type": "text", + "content": "- We introduce an innovative framework that leverages the expertise of vision-language models as a semantic supervisor to tackle the challenge of limited labelled semantic data for the task of motion retargeting." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 582, + 287, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 582, + 287, + 629 + ], + "spans": [ + { + "bbox": [ + 47, + 582, + 287, + 629 + ], + "type": "text", + "content": "- We propose to use differentiable skinning and rendering to translate from the motion domain to the image domain and perform guiding visual question answering to obtain human-level semantic representation." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 630, + 287, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 630, + 287, + 677 + ], + "spans": [ + { + "bbox": [ + 47, + 630, + 287, + 677 + ], + "type": "text", + "content": "- We design a semantics consistency loss to maintain motion semantics and introduce an effective two-stage training pipeline consisting of pre-training at the skeletal level and fine-tuning at the semantic level." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 678, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 678, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 678, + 287, + 713 + ], + "type": "text", + "content": "- Our model achieves state-of-the-art performance in the challenging task of semantics-aware motion retargeting, delivering exceptional performance marked by high" + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 314, + 72, + 520, + 84 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 72, + 520, + 84 + ], + "spans": [ + { + "bbox": [ + 314, + 72, + 520, + 84 + ], + "type": "text", + "content": "quality motion and superior semantics consistency." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 95, + 397, + 107 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 95, + 397, + 107 + ], + "spans": [ + { + "bbox": [ + 306, + 95, + 397, + 107 + ], + "type": "text", + "content": "2. Related Works" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 114, + 545, + 306 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 114, + 545, + 306 + ], + "spans": [ + { + "bbox": [ + 304, + 114, + 545, + 306 + ], + "type": "text", + "content": "Optimization-based Motion Retargeting. Motion retargeting is a technique to adapt existing motion data from a source character to a target character with different bone proportions, mesh skins, and skeletal structures. Early works formulate motion retargeting as a constrained optimization problem [4, 6, 11, 18]. Gleicher et al. [6] introduced a motion retargeting method, which identifies motion features as constraints and computes an adapted motion using a space-time constraint solver to preserve the desirable qualities. Lee et al. [11] proposed a method to adapt existing motion of a human-like character to have the desired features with specified constraints and combined a hierarchical curve fitting technique with inverse kinematics. Nonetheless, these methods necessitate the tedious and time-consuming process of formulating human-designed constraints for specific motion sequences." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 307, + 545, + 557 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 307, + 545, + 557 + ], + "spans": [ + { + "bbox": [ + 304, + 307, + 545, + 557 + ], + "type": "text", + "content": "Learning-based Motion Retargeting. With the rise of deep learning, researchers have been developing learning-based motion retargeting methods in recent years [2, 9, 15, 22, 23, 25]. Villegas et al. [22] presented a recurrent neural network architecture, which incorporates a forward kinematics layer and cycle consistency loss for unsupervised motion retargetting. Aberman et al. [2] designed a skeleton-aware network with differentiable convolution, pooling, and unpooling operators to transform various homeomorphic skeletons into a primary skeleton for cross-structural motion retargeting. However, these methods tend to concentrate on trajectory-level motion retargeting with limited consideration for motion semantics, which often results in a notable loss of motion semantics and increase the heavy burden of manual adjustments to the trajectories. To address these problems, Zhang et al. [25] presented a residual retargeting network that uses a skeleton-aware module to preserve motion semantics and a shape-aware module to reduce interpenetration and contact missing. While this method successfully preserves joint relative relationships, it still falls short in addressing high-level motion semantics." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 558, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 558, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 558, + 545, + 713 + ], + "type": "text", + "content": "Vision-Language Models. Vision-language models have empowered various vision-language tasks, including visual question answering and image captioning. Tevet et al. [20] introduced a human motion generation model that aligns the latent space with that of the Contrastive Language-Image Pre-training (CLIP) model. Li et al. [13] proposed a pretraining strategy from off-the-shelf frozen pre-trained image encoders and frozen large language models for vision-to-language generative learning. Zhu et al. [27] presented a vision-language model, which uses one projection layer to align a frozen visual encoder with a frozen advanced large language models (LLM). However, these efforts primarily concentrate on vision-language tasks, leaving the question" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2156" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 62, + 71, + 532, + 213 + ], + "blocks": [ + { + "bbox": [ + 62, + 71, + 532, + 213 + ], + "lines": [ + { + "bbox": [ + 62, + 71, + 532, + 213 + ], + "spans": [ + { + "bbox": [ + 62, + 71, + 532, + 213 + ], + "type": "image", + "image_path": "c14d3f4806670441419f84b705ce55d23b7c4d74c2f021432b6dd6694aa6c582.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 61, + 215, + 174, + 224 + ], + "lines": [ + { + "bbox": [ + 61, + 215, + 174, + 224 + ], + "spans": [ + { + "bbox": [ + 61, + 215, + 174, + 224 + ], + "type": "text", + "content": "Stage I: Skeleton-aware Pre-training" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 61, + 225, + 212, + 292 + ], + "blocks": [ + { + "bbox": [ + 61, + 225, + 212, + 292 + ], + "lines": [ + { + "bbox": [ + 61, + 225, + 212, + 292 + ], + "spans": [ + { + "bbox": [ + 61, + 225, + 212, + 292 + ], + "type": "image", + "image_path": "69ec7727da1edbc17c8a750293f6acfb0d272504d6e5f0d1f8352703faec249d.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 303, + 545, + 360 + ], + "lines": [ + { + "bbox": [ + 46, + 303, + 545, + 360 + ], + "spans": [ + { + "bbox": [ + 46, + 303, + 545, + 360 + ], + "type": "text", + "content": "Figure 2. Model Architecture. Our semantics-aware motion retargeting framework employs a two-stage pipeline. Initially, the retargeting network consisting of multiple spatial-temporal graph convolution layers is trained at the skeletal level to establish a base model. Subsequently, this model undergoes further refinement and fine-tuning at the semantic level by the alignment of latent semantic embeddings of the source and target, leveraging the extensive knowledge of vision-language models. The latent semantic embedding is extracted by guiding visual question answering. Additionally, the geometry constraints are also enforced during fine-tuning to avoid interpenetration." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 217, + 225, + 533, + 293 + ], + "blocks": [ + { + "bbox": [ + 277, + 216, + 414, + 225 + ], + "lines": [ + { + "bbox": [ + 277, + 216, + 414, + 225 + ], + "spans": [ + { + "bbox": [ + 277, + 216, + 414, + 225 + ], + "type": "text", + "content": "Stage II: Semantics & Geometry Fine-tuning" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 217, + 225, + 533, + 293 + ], + "lines": [ + { + "bbox": [ + 217, + 225, + 533, + 293 + ], + "spans": [ + { + "bbox": [ + 217, + 225, + 533, + 293 + ], + "type": "image", + "image_path": "6bf8c1ef8f6296d6854894bb056aa820762d9293e2c39fd0d75a3383c649cc35.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 371, + 287, + 395 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 371, + 287, + 395 + ], + "spans": [ + { + "bbox": [ + 46, + 371, + 287, + 395 + ], + "type": "text", + "content": "of how to effectively employ vision-language models to guide motion retargeting as an open and unexplored area." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 396, + 287, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 396, + 287, + 529 + ], + "spans": [ + { + "bbox": [ + 46, + 396, + 287, + 529 + ], + "type": "text", + "content": "Human motion synthesis. Human motion synthesis is a domain related to motion retargeting, which aims to synthesize realistic and lifelike human motions from random noise or other inputs with generative networks. Guo et al. [7] proposed to generate human motion sequences based on action type. Guo et al. [8] presented a temporal variational autoencoder to synthesize human motions from text input. Tevet et al. [21] introduced a diffusion-based generative model for human motion generation. As comparison, we focus on the task of motion retargeting, where existing motion data is transferred from a source character to a target character." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 541, + 102, + 553 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 541, + 102, + 553 + ], + "spans": [ + { + "bbox": [ + 47, + 541, + 102, + 553 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 561, + 115, + 573 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 561, + 115, + 573 + ], + "spans": [ + { + "bbox": [ + 47, + 561, + 115, + 573 + ], + "type": "text", + "content": "3.1. Overview" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 581, + 287, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 581, + 287, + 676 + ], + "spans": [ + { + "bbox": [ + 46, + 581, + 287, + 676 + ], + "type": "text", + "content": "We present a novel semantic-aware motion retargeting method, as illustrated in Fig 2. In contrast to previous methods that neglect motion semantics [2, 15, 22] or rely on human-designed joint-level representations [25], our approach integrates natural language descriptions from vision-language models to offer an explicit and comprehensive semantic representation of character motions, thereby maintaining the preservation of semantic consistency." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "type": "text", + "content": "Task definition. Given a source motion sequence, consisting of the skeleton motion and its associated skinning geometry, as well as a target character in the reference pose (e.g.," + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 371, + 545, + 419 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 371, + 545, + 419 + ], + "spans": [ + { + "bbox": [ + 304, + 371, + 545, + 419 + ], + "type": "text", + "content": "T-posed), the objective of motion retargeting is to generate the target motion while preserving crucial motion characteristics, such as joint trajectory similarity and motion semantics, and satisfying geometry constraints." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 422, + 546, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 422, + 546, + 567 + ], + "spans": [ + { + "bbox": [ + 304, + 422, + 546, + 567 + ], + "type": "text", + "content": "Graph representation. The skeleton motion sequence can be modelled as a sequence of graphs according to the skeleton hierarchy where each node corresponds to a joint and each edge represents a directed connection between joints. Assume that the motion sequence has " + }, + { + "bbox": [ + 304, + 422, + 546, + 567 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 304, + 422, + 546, + 567 + ], + "type": "text", + "content": " frames in total and the animation characters have " + }, + { + "bbox": [ + 304, + 422, + 546, + 567 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 304, + 422, + 546, + 567 + ], + "type": "text", + "content": " nodes and " + }, + { + "bbox": [ + 304, + 422, + 546, + 567 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 304, + 422, + 546, + 567 + ], + "type": "text", + "content": " edges. In our approach, we consider motion data as node features " + }, + { + "bbox": [ + 304, + 422, + 546, + 567 + ], + "type": "inline_equation", + "content": "\\mathbf{Q} \\in \\mathbb{R}^{T \\times N \\times 9}" + }, + { + "bbox": [ + 304, + 422, + 546, + 567 + ], + "type": "text", + "content": ", which encompasses the 6D joint rotation representation [26] and 3D joint positions. Additionally, we utilize skeleton hierarchy information as edge features " + }, + { + "bbox": [ + 304, + 422, + 546, + 567 + ], + "type": "inline_equation", + "content": "\\mathbf{E} \\in \\mathbb{R}^{M \\times 3}" + }, + { + "bbox": [ + 304, + 422, + 546, + 567 + ], + "type": "text", + "content": ", which consists of the 3D position offset between each joint and its parent joint." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 570, + 546, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 570, + 546, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 570, + 546, + 713 + ], + "type": "text", + "content": "Two-stage training. The motion of animation characters can be divided into skeletal movements and skinned movements, represented by skeletal joints and skinned vertices respectively. The skinned movements can be derived from the skeletal movements through the linear blend skinning algorithm [12]. Therefore, motion retargeting at the skeletal level can effectively downscale the data and reduce the complexity of the problem. However, this simplification process can lead to the loss of motion semantics and violations of geometry constraints. To address these issues, we employ a two-stage pipeline. Initially, we pre-train a skeleton-aware network to ensure a general initialization for motion retard" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2157" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "text", + "content": "getting without considering motion semantics and geometry constraints. Subsequently, we fine-tune the pre-trained network for each source-target character pair with the vision-language model to maintain semantic consistency and enforce geometry constraints to prevent interpenetrations." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 138, + 203, + 152 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 138, + 203, + 152 + ], + "spans": [ + { + "bbox": [ + 47, + 138, + 203, + 152 + ], + "type": "text", + "content": "3.2. Skeleton-aware Pre-training" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 157, + 287, + 264 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 157, + 287, + 264 + ], + "spans": [ + { + "bbox": [ + 46, + 157, + 287, + 264 + ], + "type": "text", + "content": "Retargeting network. We propose a retargeting network consisting of a graph motion encoder and a graph motion decoder for motion retargeting. The motion encoder " + }, + { + "bbox": [ + 46, + 157, + 287, + 264 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{\\theta}" + }, + { + "bbox": [ + 46, + 157, + 287, + 264 + ], + "type": "text", + "content": " encodes the motion data " + }, + { + "bbox": [ + 46, + 157, + 287, + 264 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}_A" + }, + { + "bbox": [ + 46, + 157, + 287, + 264 + ], + "type": "text", + "content": " of the source character A into the latent motion embedding " + }, + { + "bbox": [ + 46, + 157, + 287, + 264 + ], + "type": "inline_equation", + "content": "\\mathbf{Z}_A" + }, + { + "bbox": [ + 46, + 157, + 287, + 264 + ], + "type": "text", + "content": ". Then, the motion decoder " + }, + { + "bbox": [ + 46, + 157, + 287, + 264 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{\\phi}" + }, + { + "bbox": [ + 46, + 157, + 287, + 264 + ], + "type": "text", + "content": " generates the joint angles " + }, + { + "bbox": [ + 46, + 157, + 287, + 264 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}_B" + }, + { + "bbox": [ + 46, + 157, + 287, + 264 + ], + "type": "text", + "content": " of the target character B based on the latent features. Both the motion encoder and decoder are composed of multiple graph convolutions. More details are available in the supplementary materials." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 127, + 274, + 208, + 287 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 274, + 208, + 287 + ], + "spans": [ + { + "bbox": [ + 127, + 274, + 208, + 287 + ], + "type": "interline_equation", + "content": "\\mathbf {Z} _ {A} = \\mathcal {F} _ {\\theta} (\\mathbf {Q} _ {A}, \\mathbf {E} _ {A})", + "image_path": "1919fe3d9ed0bd29a042719cc67e1911ba82636f63227b008c9bad9bcbdf3370.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 127, + 287, + 287, + 301 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 287, + 287, + 301 + ], + "spans": [ + { + "bbox": [ + 127, + 287, + 287, + 301 + ], + "type": "interline_equation", + "content": "\\mathbf {Q} _ {B} = \\mathcal {F} _ {\\phi} (\\mathbf {Z} _ {A}, \\mathbf {E} _ {B}) \\tag {1}", + "image_path": "0e577cbea2ea7811c96f070e8016d8a17249289ca724884fa5dc1d85e059cacd.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 304, + 287, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 304, + 287, + 388 + ], + "spans": [ + { + "bbox": [ + 46, + 304, + 287, + 388 + ], + "type": "text", + "content": "In the first phase, we train the motion encoder and decoder at the skeletal level to establish a robust initialization for motion retargeting. Following the unsupervised learning setting in [22], we train the network with the reconstruction loss, cycle consistency loss, adversarial loss, and joint relationship loss. The overall objective function for skeleton-aware pre-training is defined as follows:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 63, + 407, + 287, + 420 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 407, + 287, + 420 + ], + "spans": [ + { + "bbox": [ + 63, + 407, + 287, + 420 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {s k e l} = \\lambda_ {r} \\mathcal {L} _ {r e c} + \\lambda_ {c} \\mathcal {L} _ {c y c} + \\lambda_ {a} \\mathcal {L} _ {a d v} + \\lambda_ {j} \\mathcal {L} _ {j d m} \\tag {2}", + "image_path": "598dbc9915a094c5ddaf3281499c7d06215fb76ee0d4bb600f32b4e131f909be.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 426, + 287, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 426, + 287, + 487 + ], + "spans": [ + { + "bbox": [ + 46, + 426, + 287, + 487 + ], + "type": "text", + "content": "The reconstruction loss " + }, + { + "bbox": [ + 46, + 426, + 287, + 487 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{rec}" + }, + { + "bbox": [ + 46, + 426, + 287, + 487 + ], + "type": "text", + "content": " encourages the retargeted motion to match the source motion when the target character is the same as the source character. Let " + }, + { + "bbox": [ + 46, + 426, + 287, + 487 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}_{A,t}" + }, + { + "bbox": [ + 46, + 426, + 287, + 487 + ], + "type": "text", + "content": " be the motion data of source character A at frame " + }, + { + "bbox": [ + 46, + 426, + 287, + 487 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 46, + 426, + 287, + 487 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 46, + 426, + 287, + 487 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{Q}}_{A,t}^{rec}" + }, + { + "bbox": [ + 46, + 426, + 287, + 487 + ], + "type": "text", + "content": " be the reconstructed motion. Then " + }, + { + "bbox": [ + 46, + 426, + 287, + 487 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{rec}" + }, + { + "bbox": [ + 46, + 426, + 287, + 487 + ], + "type": "text", + "content": " is defined as:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 492, + 287, + 520 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 492, + 287, + 520 + ], + "spans": [ + { + "bbox": [ + 105, + 492, + 287, + 520 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {r e c} = \\sum_ {t} \\left| \\left| \\hat {\\mathbf {Q}} _ {A, t} ^ {r e c} - \\mathbf {Q} _ {A, t} \\right| \\right| _ {2} ^ {2} \\tag {3}", + "image_path": "037cd8b61920e39bf5722851b113af5bde217d4ef145af8f8129bd27cfab1b30.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 526, + 287, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 526, + 287, + 588 + ], + "spans": [ + { + "bbox": [ + 46, + 526, + 287, + 588 + ], + "type": "text", + "content": "The cycle consistency loss " + }, + { + "bbox": [ + 46, + 526, + 287, + 588 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{cyc}" + }, + { + "bbox": [ + 46, + 526, + 287, + 588 + ], + "type": "text", + "content": " promotes the consistency of retargeted motion from the source character A to the target character B and then back to the source character A, ensuring it remains in line with the original motion. Let " + }, + { + "bbox": [ + 46, + 526, + 287, + 588 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{Q}}_{A,t}^{cyc}" + }, + { + "bbox": [ + 46, + 526, + 287, + 588 + ], + "type": "text", + "content": " represent the retargeted motion. Then " + }, + { + "bbox": [ + 46, + 526, + 287, + 588 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{cyc}" + }, + { + "bbox": [ + 46, + 526, + 287, + 588 + ], + "type": "text", + "content": " is defined as:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 594, + 287, + 622 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 594, + 287, + 622 + ], + "spans": [ + { + "bbox": [ + 105, + 594, + 287, + 622 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {c y c} = \\sum_ {t} \\left| \\left| \\hat {\\mathbf {Q}} _ {A, t} ^ {c y c} - \\mathbf {Q} _ {A, t} \\right| \\right| _ {2} ^ {2} \\tag {4}", + "image_path": "49692772fed633d343b051ab83a79a7f8a1cc25c479bf086d8ec27360a602885.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 625, + 287, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 625, + 287, + 685 + ], + "spans": [ + { + "bbox": [ + 46, + 625, + 287, + 685 + ], + "type": "text", + "content": "The adversarial loss " + }, + { + "bbox": [ + 46, + 625, + 287, + 685 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{adv}" + }, + { + "bbox": [ + 46, + 625, + 287, + 685 + ], + "type": "text", + "content": " is calculated by a discriminator network, which utilizes the unpaired data of the target character to learn how to distinguish whether the motions are real or fake. Let " + }, + { + "bbox": [ + 46, + 625, + 287, + 685 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{\\gamma}" + }, + { + "bbox": [ + 46, + 625, + 287, + 685 + ], + "type": "text", + "content": " be the discriminator network, and " + }, + { + "bbox": [ + 46, + 625, + 287, + 685 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}_{B,t}" + }, + { + "bbox": [ + 46, + 625, + 287, + 685 + ], + "type": "text", + "content": " be the retargeted motion at frame " + }, + { + "bbox": [ + 46, + 625, + 287, + 685 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 46, + 625, + 287, + 685 + ], + "type": "text", + "content": ". Then it is defined as:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 99, + 691, + 287, + 715 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 691, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 99, + 691, + 287, + 715 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {a d v} = \\sum_ {t} \\log \\left(1 - \\mathcal {F} _ {\\gamma} \\left(\\mathbf {Q} _ {B, t}\\right)\\right) \\tag {5}", + "image_path": "3b79a194dacaf25e05b88c7840b8ec52ea33090ff1427848e48944a8dc33d51e.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 72, + 545, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 72, + 545, + 156 + ], + "spans": [ + { + "bbox": [ + 305, + 72, + 545, + 156 + ], + "type": "text", + "content": "The joint relationship loss " + }, + { + "bbox": [ + 305, + 72, + 545, + 156 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{jdm}" + }, + { + "bbox": [ + 305, + 72, + 545, + 156 + ], + "type": "text", + "content": " is calculated by the joint distance matrix (JDM) " + }, + { + "bbox": [ + 305, + 72, + 545, + 156 + ], + "type": "inline_equation", + "content": "\\mathbf{D} \\in \\mathbb{R}^{N \\times N}" + }, + { + "bbox": [ + 305, + 72, + 545, + 156 + ], + "type": "text", + "content": ", which represents the relative positional relationships of the joints. The element " + }, + { + "bbox": [ + 305, + 72, + 545, + 156 + ], + "type": "inline_equation", + "content": "d_{i,j}" + }, + { + "bbox": [ + 305, + 72, + 545, + 156 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 305, + 72, + 545, + 156 + ], + "type": "inline_equation", + "content": "\\mathbf{D}" + }, + { + "bbox": [ + 305, + 72, + 545, + 156 + ], + "type": "text", + "content": " represents the Euclidean distance between joint " + }, + { + "bbox": [ + 305, + 72, + 545, + 156 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 305, + 72, + 545, + 156 + ], + "type": "text", + "content": " and joint " + }, + { + "bbox": [ + 305, + 72, + 545, + 156 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 305, + 72, + 545, + 156 + ], + "type": "text", + "content": ". We extract the joint distance matrix from the target character and compare it with the source character. Then " + }, + { + "bbox": [ + 305, + 72, + 545, + 156 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{jdm}" + }, + { + "bbox": [ + 305, + 72, + 545, + 156 + ], + "type": "text", + "content": " is defined as:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 350, + 165, + 545, + 189 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 350, + 165, + 545, + 189 + ], + "spans": [ + { + "bbox": [ + 350, + 165, + 545, + 189 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {j d m} = \\sum_ {t} \\left| \\left| \\eta (\\mathbf {D} _ {A, t}) - \\eta (\\mathbf {D} _ {B, t}) \\right| \\right| _ {2} ^ {2} \\tag {6}", + "image_path": "26c89dc4bf2f31b37f96ccec8caa2eee48f3c566353e109125d1231d4b53db61.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 305, + 192, + 545, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 192, + 545, + 228 + ], + "spans": [ + { + "bbox": [ + 305, + 192, + 545, + 228 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 192, + 545, + 228 + ], + "type": "inline_equation", + "content": "\\eta(.)" + }, + { + "bbox": [ + 305, + 192, + 545, + 228 + ], + "type": "text", + "content": " is an L1 normalization performed on each row of the distance matrix. This normalization operation eliminates the difference in bone length to some extent." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 234, + 495, + 247 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 234, + 495, + 247 + ], + "spans": [ + { + "bbox": [ + 306, + 234, + 495, + 247 + ], + "type": "text", + "content": "3.3. Semantics & Geometry Fine-tuning" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 305, + 253, + 545, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 253, + 545, + 361 + ], + "spans": [ + { + "bbox": [ + 305, + 253, + 545, + 361 + ], + "type": "text", + "content": "In the second phase, we fine-tune the pre-trained retargeting network for each source-target character pair to preserve motion semantics and satisfy geometry constraints. The motion semantics is maintained by the semantics consistency loss, which aligns the semantic embeddings extracted from a vision-language model for both the source and target. Additionally, the geometry constraint is satisfied by minimizing the interpenetration loss. The overall objective function for fine-tuning is outlined as follows:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 367, + 372, + 545, + 385 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 367, + 372, + 545, + 385 + ], + "spans": [ + { + "bbox": [ + 367, + 372, + 545, + 385 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {f i n e}} = \\lambda_ {s} \\mathcal {L} _ {\\text {s e m}} + \\lambda_ {p} \\mathcal {L} _ {\\text {p e n}} \\tag {7}", + "image_path": "30aed3a2cc6df73d54d3929337649b737b41743c47e1a94e44230511b77285c7.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 305, + 387, + 545, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 387, + 545, + 555 + ], + "spans": [ + { + "bbox": [ + 305, + 387, + 545, + 555 + ], + "type": "text", + "content": "Differentiable skinning & rendering. To make the finetuning process differentiable for gradient back-propagation, we first use the differentiable linear blend skinning algorithm [12], denoted as " + }, + { + "bbox": [ + 305, + 387, + 545, + 555 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{lbs}" + }, + { + "bbox": [ + 305, + 387, + 545, + 555 + ], + "type": "text", + "content": ", to transform the target joint angles " + }, + { + "bbox": [ + 305, + 387, + 545, + 555 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}_B" + }, + { + "bbox": [ + 305, + 387, + 545, + 555 + ], + "type": "text", + "content": " into skinned motions " + }, + { + "bbox": [ + 305, + 387, + 545, + 555 + ], + "type": "inline_equation", + "content": "\\mathbf{V}_B" + }, + { + "bbox": [ + 305, + 387, + 545, + 555 + ], + "type": "text", + "content": ", represented by 3D mesh vertices. Subsequently, we employ the differentiable projection function " + }, + { + "bbox": [ + 305, + 387, + 545, + 555 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{proj}" + }, + { + "bbox": [ + 305, + 387, + 545, + 555 + ], + "type": "text", + "content": " as introduced in [16] to convert the skinned motions into 2D images " + }, + { + "bbox": [ + 305, + 387, + 545, + 555 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_B" + }, + { + "bbox": [ + 305, + 387, + 545, + 555 + ], + "type": "text", + "content": ". A limitation for the differentiable rendering process is that when projecting the 3D skinned mesh onto 2D images, the depth information is lost. To obtain a comprehensive semantic representation of the motion, we render the character from multiple perspectives and then combine the extracted features, following the Non-rigid Shape Fitting task in [16]." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 378, + 564, + 474, + 577 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 378, + 564, + 474, + 577 + ], + "spans": [ + { + "bbox": [ + 378, + 564, + 474, + 577 + ], + "type": "interline_equation", + "content": "\\mathbf {I} _ {A} = \\mathcal {F} _ {p r o j} \\left(\\mathcal {F} _ {l b s} \\left(\\mathbf {Q} _ {A}\\right)\\right)", + "image_path": "6b1cbe2b61b7b0d1dba3cdff005b50016d300cdda02870573087895c8a0813d1.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 378, + 576, + 474, + 592 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 378, + 576, + 474, + 592 + ], + "spans": [ + { + "bbox": [ + 378, + 576, + 474, + 592 + ], + "type": "interline_equation", + "content": "\\mathbf {I} _ {B} = \\mathcal {F} _ {p r o j} \\left(\\mathcal {F} _ {l b s} (\\mathbf {Q} _ {B})\\right)", + "image_path": "e7325427fd44436d8d0aa4891e1a1e88b1891e0b265baf71a88d4ac023542bfb.jpg" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 545, + 573, + 547, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 545, + 573, + 547, + 582 + ], + "spans": [ + { + "bbox": [ + 545, + 573, + 547, + 582 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 305, + 594, + 545, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 594, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 305, + 594, + 545, + 714 + ], + "type": "text", + "content": "Frozen vision-language model. To obtain an explicit and reliable semantic feature of the motion, we employ a frozen vision-language model as our semantic supervisor. Current 3D vision-language datasets [3, 28] mainly focus on the occupation or the segmentation of the object in a spatial scene like rooms, and thus the state-of-the-art 3D vision-language models [28] lack prior knowledge relevant to animation characters. In contrast, 2D vision-language models achieve better results in semantic tasks, such as image captioning, visual question answering and image-text" + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "2158" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 49, + 71, + 289, + 199 + ], + "blocks": [ + { + "bbox": [ + 49, + 71, + 289, + 199 + ], + "lines": [ + { + "bbox": [ + 49, + 71, + 289, + 199 + ], + "spans": [ + { + "bbox": [ + 49, + 71, + 289, + 199 + ], + "type": "image", + "image_path": "d2c109697a3715ea97365be98876fb3fa3c49e189dc3a65adc55a802494945f6.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 57, + 209, + 277, + 221 + ], + "lines": [ + { + "bbox": [ + 57, + 209, + 277, + 221 + ], + "spans": [ + { + "bbox": [ + 57, + 209, + 277, + 221 + ], + "type": "text", + "content": "Figure 3. An example of guiding visual question answering." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 233, + 287, + 317 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 233, + 287, + 317 + ], + "spans": [ + { + "bbox": [ + 46, + 233, + 287, + 317 + ], + "type": "text", + "content": "retrieval, and provides cleaner and richer semantics [24]. Therefore, we utilize a frozen 2D vision-language model to extract latent embeddings of motion semantics. The frozen 2D vision-language model employed in our work is BLIP-2 [14], which incorporates a lightweight querying transformer as a bridge between the off-the-shelf frozen pre-trained image encoder and the frozen large language model." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 317, + 287, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 317, + 287, + 521 + ], + "spans": [ + { + "bbox": [ + 46, + 317, + 287, + 521 + ], + "type": "text", + "content": "Prompt design. Since the vision-language model has the capability to extract rich information from images, it is possible that the extracted features might contain redundant details, such as the appearance of the character. To guide the vision-language model to obtain semantic embedding relevant to character motions, we adopt a guiding visual question answering approach for motion semantics extraction, as depicted in Fig. 3. We believe that there is a strong correlation between motion semantics and hand movements. To acquire a more comprehensive description of the motion, we initially provide a guiding question to BLIP-2: \"Where are the hands of the character?\" Subsequently, we introduce a new question and combine it with the first answer as the input to BLIP-2: \"[The answers to the first question generated by the vision-language model] What is the character in the image doing?\" For more details, please refer to the supplementary materials." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 521, + 288, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 521, + 288, + 677 + ], + "spans": [ + { + "bbox": [ + 46, + 521, + 288, + 677 + ], + "type": "text", + "content": "Latent semantic embedding. We opt to align the latent semantic embeddings of the source and target generated by the vision-language model rather than relying on textual descriptions, specifically leveraging the encoder output of the large language model. This approach enables us to acquire a more accurate and denser representation, while also mitigating computational costs and the non-linearity of the training objective caused by the large number of parameters of the vision-language model. Let " + }, + { + "bbox": [ + 46, + 521, + 288, + 677 + ], + "type": "inline_equation", + "content": "\\mathbf{E}_A" + }, + { + "bbox": [ + 46, + 521, + 288, + 677 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 521, + 288, + 677 + ], + "type": "inline_equation", + "content": "\\mathbf{E}_B" + }, + { + "bbox": [ + 46, + 521, + 288, + 677 + ], + "type": "text", + "content": " be the latent semantic embeddings of the source and target motions, " + }, + { + "bbox": [ + 46, + 521, + 288, + 677 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{\\omega}" + }, + { + "bbox": [ + 46, + 521, + 288, + 677 + ], + "type": "text", + "content": " be the frozen pre-trained image encoder, " + }, + { + "bbox": [ + 46, + 521, + 288, + 677 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{\\sigma}" + }, + { + "bbox": [ + 46, + 521, + 288, + 677 + ], + "type": "text", + "content": " be the frozen querying transformer, " + }, + { + "bbox": [ + 46, + 521, + 288, + 677 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{\\psi}" + }, + { + "bbox": [ + 46, + 521, + 288, + 677 + ], + "type": "text", + "content": " be the encoder of the frozen large language model, and context be the question." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 96, + 687, + 287, + 715 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 687, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 96, + 687, + 287, + 715 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\begin{array}{l} \\mathbf {E} _ {A} = \\mathcal {F} _ {\\psi} \\left(\\mathcal {F} _ {\\sigma} \\left(\\mathcal {F} _ {\\omega} (\\mathbf {I} _ {A}), \\text {c o n t e x t}\\right)\\right) \\\\ \\overline {{\\mathbf {E}}} = \\overline {{\\mathbf {F}}} \\left(\\overline {{\\mathbf {F}}} \\left(\\overline {{\\mathbf {F}}} (\\mathbf {I} _ {A}), \\text {c o n t e x t}\\right)\\right) \\end{array} \\tag {9} \\\\ \\mathbf {E} _ {B} = \\mathcal {F} _ {\\psi} (\\mathcal {F} _ {\\sigma} (\\mathcal {F} _ {\\omega} (\\mathbf {I} _ {B}), c o n t e x t)) \\\\ \\end{array}", + "image_path": "b34ab970d33aee8f7bb753524ed2943e6bbd35cbfe591157fb47a5c646835255.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 305, + 72, + 547, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 72, + 547, + 156 + ], + "spans": [ + { + "bbox": [ + 305, + 72, + 547, + 156 + ], + "type": "text", + "content": "Fine-tuning with semantics consistency. As illustrated in Fig. 2, our approach aligns the latent semantic embeddings of both the source and target motions in an unsupervised manner, ensuring a high degree of semantic consistency in the retargeted results. The semantics consistency loss " + }, + { + "bbox": [ + 305, + 72, + 547, + 156 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{sem}" + }, + { + "bbox": [ + 305, + 72, + 547, + 156 + ], + "type": "text", + "content": " is calculated using the mean square error and it is defined as follows:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 365, + 165, + 545, + 190 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 365, + 165, + 545, + 190 + ], + "spans": [ + { + "bbox": [ + 365, + 165, + 545, + 190 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {s e m} = \\sum_ {t} \\| \\mathbf {E} _ {A, t} - \\mathbf {E} _ {B, t} \\| _ {2} ^ {2} \\tag {10}", + "image_path": "7692a0363b9d59358c9841579c9894c7f98c1f5466796dc79bbd9726a48541f7.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 305, + 194, + 545, + 291 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 194, + 545, + 291 + ], + "spans": [ + { + "bbox": [ + 305, + 194, + 545, + 291 + ], + "type": "text", + "content": "Fine-tuning with geometry constraints. From our observations, most interpenetration problems occur between the limbs and the main body. To address this, we incorporate the signed distance field between the limb vertices and the body mesh as the interpenetration loss. First, we convert the skeleton motion output from the network into mesh vertices using the linear blend skinning method [12]. Then, the interpenetration loss is defined as follows:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 357, + 300, + 545, + 324 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 357, + 300, + 545, + 324 + ], + "spans": [ + { + "bbox": [ + 357, + 300, + 545, + 324 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {p e n} = \\sum_ {t} R e L U (- \\Phi_ {b, t} (\\mathbf {V} _ {l, t})) \\tag {11}", + "image_path": "00b7f8b3cc19efb240953f6ac1e7239067f6ca6047be97cb4449f7a89ba03e77.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 329, + 546, + 377 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 329, + 546, + 377 + ], + "spans": [ + { + "bbox": [ + 305, + 329, + 546, + 377 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 329, + 546, + 377 + ], + "type": "inline_equation", + "content": "\\Phi_b" + }, + { + "bbox": [ + 305, + 329, + 546, + 377 + ], + "type": "text", + "content": " indicates the signed distance field function, " + }, + { + "bbox": [ + 305, + 329, + 546, + 377 + ], + "type": "inline_equation", + "content": "\\mathbf{V}_l" + }, + { + "bbox": [ + 305, + 329, + 546, + 377 + ], + "type": "text", + "content": " is the vertices of the limbs. If the vertex locates inside the body, the value of the function is less than zero. Therefore, we use the " + }, + { + "bbox": [ + 305, + 329, + 546, + 377 + ], + "type": "inline_equation", + "content": "ReLU" + }, + { + "bbox": [ + 305, + 329, + 546, + 377 + ], + "type": "text", + "content": " function to penalize the inner vertices." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 388, + 388, + 402 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 388, + 388, + 402 + ], + "spans": [ + { + "bbox": [ + 306, + 388, + 388, + 402 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 407, + 366, + 421 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 407, + 366, + 421 + ], + "spans": [ + { + "bbox": [ + 306, + 407, + 366, + 421 + ], + "type": "text", + "content": "4.1. Settings" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 426, + 545, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 426, + 545, + 666 + ], + "spans": [ + { + "bbox": [ + 304, + 426, + 545, + 666 + ], + "type": "text", + "content": "Datasets. We train and evaluate our method on the Mixamo dataset [1], an extensive repository of animations performed by various 3D virtual characters with distinct skeletons and geometry shapes. The training set we use to pretrain our skeleton aware module is the same as that used in [2], which contains 1646 motions performed by 7 characters. It's important to note that the Mixamo dataset does not provide clean ground truth data, since many of the motion sequences suffer from interpenetration issues and semantic information loss. To mitigate this, we have carefully selected a subset of motion sequences that are both semantically clean and free of interpenetration issues for fine-tuning and testing. Our fine-tuning process involves retargeting 15 clean motions including 3127 frames, originally performed by 3 source characters, namely \"Y Bot\", \"X Bot\", and \"Ortiz\", onto 3 target characters, including \"Aj\", \"Kaya\", and \"Mousey\". Then we evaluate the performance of our model on the task of retargeting 30 additional motions that are previously unseen in the training set and fine-tuning sets. More details could be found in the supplementary materials." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 666, + 547, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 666, + 547, + 714 + ], + "spans": [ + { + "bbox": [ + 305, + 666, + 547, + 714 + ], + "type": "text", + "content": "Implementation details. The hyper-parameters " + }, + { + "bbox": [ + 305, + 666, + 547, + 714 + ], + "type": "inline_equation", + "content": "\\lambda_r, \\lambda_c, \\lambda_a, \\lambda_j, \\lambda_p, \\lambda_s" + }, + { + "bbox": [ + 305, + 666, + 547, + 714 + ], + "type": "text", + "content": " for pre-training and fine-tuning loss functions are set to 10.0, 1.0, 0.1, 1.0, 1.0, 0.1. For semantics fine-tuning, we use BLIP-2 [14] with pre-trained FlanT5-XXL" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2159" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 286, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 286, + 167 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 286, + 167 + ], + "type": "text", + "content": "[5] large language model. To extract the semantic representation of the motion, we render animation from three perspectives, including the front view, left view and right view. The fine-tuning process takes 25 epochs with 5 clean motion sequences of the source character for each target character. During pre-training and fine-tuning, we use an Adam optimizer to optimize the retargeting network. Please refer to the supplementary materials for more details." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 168, + 288, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 168, + 288, + 350 + ], + "spans": [ + { + "bbox": [ + 46, + 168, + 288, + 350 + ], + "type": "text", + "content": "Evaluation metrics. We evaluate the performance of our method across three key dimensions: skeleton, geometry, and semantics. At the skeletal level, we measure the Mean Square Error (MSE) between retargeted joint positions and the ground truth provided by Mixamo, analyzing both the global and the local joint positions. At the geometric level, we evaluate the interpenetration percentage (PEN). At the semantic level, we utilize the Image-Text Matching (ITM) score, Fréchet inception distance (FID) and semantics consistency loss (SCL) as metrics. The ITM score quantifies the visual-semantic similarity between the source textual description and the rendered retargeted motion. FID is calculated between the semantic embedding distribution of retargeted motion and source motion. More details are provided in the supplementary materials." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 358, + 230, + 371 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 358, + 230, + 371 + ], + "spans": [ + { + "bbox": [ + 47, + 358, + 230, + 371 + ], + "type": "text", + "content": "4.2. Comparison with State of the Arts" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 378, + 287, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 378, + 287, + 617 + ], + "spans": [ + { + "bbox": [ + 46, + 378, + 287, + 617 + ], + "type": "text", + "content": "Quantitative. In this section, we conduct a comparative analysis of our method against the state-of-the-art approaches as illustrated in Tab. 1. The baseline methods include R2ET [25], SAN [2], NKN [22] and the Copy strategy. The Copy strategy achieves the lowest local MSE because the ground truth data in the Mixamo dataset are not entirely clean, and many of them are generated by copying rotations. As a result, this strategy comes at the cost of semantic loss and interpenetration issues. SAN [2] and NKN [22] focus on skeleton-level motion features, which results in a high interpenetration rate and relatively low semantics preservation. R2ET [25] treats motion semantics as the joint distance matrix and mesh distance field, which helps it obtain better motion semantics than SAN and Copy. Nevertheless, there is still a gap between the human-designed distance matrix and the human-level semantics. Notably, our model exhibits the best interpenetration rate and semantics preservation among all methods, showcasing the capability of the proposed method in producing high-quality retargeted motions with semantics consistency." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 618, + 287, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 618, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 618, + 287, + 715 + ], + "type": "text", + "content": "Qualitative. In Fig. 4, we visualize the text descriptions of the motions and the qualitative comparison between the state-of-the-arts and our method. SAN [2] and Copy neglect the preservation of semantics and have severe interpenetration. R2ET [25] utilizes joint distance matrix as semantics representation and fails to capture high-level semantic information. For example, the salute motion retargeted by R2ET [25] appears more like a hand-up motion. As a comparison," + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 305, + 70, + 545, + 164 + ], + "blocks": [ + { + "bbox": [ + 305, + 70, + 545, + 164 + ], + "lines": [ + { + "bbox": [ + 305, + 70, + 545, + 164 + ], + "spans": [ + { + "bbox": [ + 305, + 70, + 545, + 164 + ], + "type": "table", + "html": "
MethodMSE ↓MSElc ↓Pen.% ↓ITM ↑FID ↓SCL ↓
Source--4.430.796--
GT--9.060.58226.991.331
Copy-0.0059.030.58126.581.327
NKN [22]0.3260.2318.710.57527.791.414
SAN [2]0.4350.2559.740.56128.331.448
R2ET [25]0.4990.4967.620.6435.4690.405
Ours0.2840.2293.500.6800.4360.143
", + "image_path": "2b82f0a5a5983c28b764dadc23600be325d82a9c9e9245e1eafee3844373281c.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 306, + 217, + 545, + 309 + ], + "blocks": [ + { + "bbox": [ + 305, + 167, + 545, + 212 + ], + "lines": [ + { + "bbox": [ + 305, + 167, + 545, + 212 + ], + "spans": [ + { + "bbox": [ + 305, + 167, + 545, + 212 + ], + "type": "text", + "content": "Table 1. Quantitative comparison with the state-of-the-arts. " + }, + { + "bbox": [ + 305, + 167, + 545, + 212 + ], + "type": "inline_equation", + "content": "\\mathrm{MSE}^{lc}" + }, + { + "bbox": [ + 305, + 167, + 545, + 212 + ], + "type": "text", + "content": " denotes the local MSE. ITM indicates the image-text matching score. FID is Fréchet inception distance of motion semantics. SCL is the semantics consistency loss." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 306, + 217, + 545, + 309 + ], + "lines": [ + { + "bbox": [ + 306, + 217, + 545, + 309 + ], + "spans": [ + { + "bbox": [ + 306, + 217, + 545, + 309 + ], + "type": "table", + "html": "
MethodMSE ↓MSElc ↓Pen.% ↓ITM ↑FID ↓SCL ↓
SMTtws0.2480.1298.370.5867.7270.769
SMTtwf7.7987.0830.440.43256.5313.29
SMTtwa0.3350.2885.360.6582.8260.266
SMTfwp0.4390.3681.220.5977.2410.583
SMTfwi5.4184.5764.410.55278.4618.96
SMTfwq0.7390.5174.560.6682.4970.191
SMTOurs0.2840.2293.500.6800.4360.143
", + "image_path": "3f37e06acad8ecddeb20ed992af0b49d22e67417caca59727deb5a925f0362a9.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 305, + 312, + 545, + 389 + ], + "lines": [ + { + "bbox": [ + 305, + 312, + 545, + 389 + ], + "spans": [ + { + "bbox": [ + 305, + 312, + 545, + 389 + ], + "type": "text", + "content": "Table 2. Ablation study. " + }, + { + "bbox": [ + 305, + 312, + 545, + 389 + ], + "type": "inline_equation", + "content": "\\mathrm{SMT}_{tws}" + }, + { + "bbox": [ + 305, + 312, + 545, + 389 + ], + "type": "text", + "content": " is the network trained with only skeleton-aware pre-training. " + }, + { + "bbox": [ + 305, + 312, + 545, + 389 + ], + "type": "inline_equation", + "content": "\\mathrm{SMT}_{twf}" + }, + { + "bbox": [ + 305, + 312, + 545, + 389 + ], + "type": "text", + "content": " is the network trained with only semantics and geometry fine-tuning. " + }, + { + "bbox": [ + 305, + 312, + 545, + 389 + ], + "type": "inline_equation", + "content": "\\mathrm{SMT}_{twa}" + }, + { + "bbox": [ + 305, + 312, + 545, + 389 + ], + "type": "text", + "content": " is the network trained in one stage. " + }, + { + "bbox": [ + 305, + 312, + 545, + 389 + ], + "type": "inline_equation", + "content": "\\mathrm{SMT}_{fwp}" + }, + { + "bbox": [ + 305, + 312, + 545, + 389 + ], + "type": "text", + "content": " is the network fine-tuned with only the interpenetration loss. " + }, + { + "bbox": [ + 305, + 312, + 545, + 389 + ], + "type": "inline_equation", + "content": "\\mathrm{SMT}_{fwi}" + }, + { + "bbox": [ + 305, + 312, + 545, + 389 + ], + "type": "text", + "content": " is the network fine-tuned with image features. " + }, + { + "bbox": [ + 305, + 312, + 545, + 389 + ], + "type": "inline_equation", + "content": "\\mathrm{SMT}_{fwq}" + }, + { + "bbox": [ + 305, + 312, + 545, + 389 + ], + "type": "text", + "content": " is the network fine-tuned with the features of the querying transformer." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 304, + 397, + 545, + 494 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 397, + 545, + 494 + ], + "spans": [ + { + "bbox": [ + 304, + 397, + 545, + 494 + ], + "type": "text", + "content": "our method is able to successfully preserve high-level motion semantics leveraging the vision-language model. We observe that our approach reaches the best results among all methods, achieving more reliable semantics preservation and lower interpenetration rates. It suggests that with semantics and geometry fine-tuning, our method could effectively solve interpenetration issues together with semantics preservation." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 502, + 406, + 514 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 502, + 406, + 514 + ], + "spans": [ + { + "bbox": [ + 306, + 502, + 406, + 514 + ], + "type": "text", + "content": "4.3. Ablation Studies" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 521, + 545, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 521, + 545, + 653 + ], + "spans": [ + { + "bbox": [ + 304, + 521, + 545, + 653 + ], + "type": "text", + "content": "Skeleton-aware pre-training. The proposed method can be divided into two stage: pre-training and fine-tuning. To illustrate the importance of skeleton-aware pre-training, we evaluate the network trained with only the semantics consistency loss and the interpenetration loss in Tab. 2, denoted as " + }, + { + "bbox": [ + 304, + 521, + 545, + 653 + ], + "type": "inline_equation", + "content": "\\mathrm{SMT}_{twf}" + }, + { + "bbox": [ + 304, + 521, + 545, + 653 + ], + "type": "text", + "content": ". The network trained without skeleton-aware pretraining performs worst in MSE and semantics preservation. A reasonable explanation is that the semantics consistency loss is highly non-linear, so it is important to pre-train the network at the skeletal level to provide better initial values. We also visualize qualitative results in Fig. 5." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 653, + 545, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 653, + 545, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 653, + 545, + 715 + ], + "type": "text", + "content": "Semantics & geometry fine-tuning. We also conduct ablation study to illustrate the importance of semantics and geometry fine-tuning in Tab. 2. We first evaluate the performance of the skeleton-aware model without fine-tuning, denoted as " + }, + { + "bbox": [ + 304, + 653, + 545, + 715 + ], + "type": "inline_equation", + "content": "\\mathrm{SMT}_{tws}" + }, + { + "bbox": [ + 304, + 653, + 545, + 715 + ], + "type": "text", + "content": ". Though it reaches the best global posi" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2160" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 47, + 74, + 547, + 317 + ], + "blocks": [ + { + "bbox": [ + 47, + 74, + 547, + 317 + ], + "lines": [ + { + "bbox": [ + 47, + 74, + 547, + 317 + ], + "spans": [ + { + "bbox": [ + 47, + 74, + 547, + 317 + ], + "type": "image", + "image_path": "67d286d86772b680eeddbe7dbdd2a2bf855b305691d26c3b7e03dbdc18835f4f.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 323, + 546, + 357 + ], + "lines": [ + { + "bbox": [ + 46, + 323, + 546, + 357 + ], + "spans": [ + { + "bbox": [ + 46, + 323, + 546, + 357 + ], + "type": "text", + "content": "Figure 4. Qualitative comparison. The results demonstrate that our method can effectively preserve semantics while the baseline methods suffer from interpenetration or semantic information loss. From the first column to the last column are the source motion, the Copy strategy, NKN [22], SAN [2], R2ET [25], our method and text descriptions, respectively." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 58, + 365, + 274, + 529 + ], + "blocks": [ + { + "bbox": [ + 58, + 365, + 274, + 529 + ], + "lines": [ + { + "bbox": [ + 58, + 365, + 274, + 529 + ], + "spans": [ + { + "bbox": [ + 58, + 365, + 274, + 529 + ], + "type": "image", + "image_path": "2cefa79331f00b4de3501751f6fd2d2bc1d78bc2156f2a7c10e76f96b04c4e6d.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 533, + 287, + 588 + ], + "lines": [ + { + "bbox": [ + 46, + 533, + 287, + 588 + ], + "spans": [ + { + "bbox": [ + 46, + 533, + 287, + 588 + ], + "type": "text", + "content": "Figure 5. The qualitative comparison of ablation study between the network without fine-tuning (TWS), the network trained with only semantics and geometry fine-tuning (TWF), the network trained with all loss functions (TWA), the network fine-tuned with only the interpenetration loss (FWP) and our full model (All)." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 594, + 289, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 594, + 289, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 594, + 289, + 713 + ], + "type": "text", + "content": "tion MSE, it suffers from interpenetration and semantic information loss because of the low-quality motion data provided by Mixamo. We next evaluate the network fine-tuned with only the interpenetration loss, denoted as " + }, + { + "bbox": [ + 46, + 594, + 289, + 713 + ], + "type": "inline_equation", + "content": "\\mathrm{SMT}_{fwp}" + }, + { + "bbox": [ + 46, + 594, + 289, + 713 + ], + "type": "text", + "content": ". This version results in a significant boost in terms of penetration rate. However, the gradient of interpenetration loss is only relevant with the face normals of the geometry mesh without considering the semantic information conveyed in the motion. It indicates the importance of the semantic consistency loss that makes the network reach a better balance" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 367, + 545, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 367, + 545, + 475 + ], + "spans": [ + { + "bbox": [ + 304, + 367, + 545, + 475 + ], + "type": "text", + "content": "between interpenetration and semantics. We also try to train the network with all loss functions in one stage, denoted as " + }, + { + "bbox": [ + 304, + 367, + 545, + 475 + ], + "type": "inline_equation", + "content": "\\mathrm{SMT}_{twa}" + }, + { + "bbox": [ + 304, + 367, + 545, + 475 + ], + "type": "text", + "content": ". However, it is challenging for the model to acquire general knowledge of interpenetration and semantics that is suitable for every character with limited data. Therefore, training the model with skeleton-aware pre-training and fine-tuning it with semantics consistency and geometry constraints for each target character remains a more reasonable and data-efficient strategy." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 480, + 546, + 660 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 480, + 546, + 660 + ], + "spans": [ + { + "bbox": [ + 304, + 480, + 546, + 660 + ], + "type": "text", + "content": "Latent semantic embedding. The vision-language model used for semantic extraction can be divided into three parts: the image encoder from CLIP [19], the querying transformer and the large language model. In Tab. 2, we compare the feature outputted by the image encoder, the querying transformer and the encoder of the large language model, denoted as " + }, + { + "bbox": [ + 304, + 480, + 546, + 660 + ], + "type": "inline_equation", + "content": "\\mathrm{SMT}_{fwi}" + }, + { + "bbox": [ + 304, + 480, + 546, + 660 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 480, + 546, + 660 + ], + "type": "inline_equation", + "content": "\\mathrm{SMT}_{fwq}" + }, + { + "bbox": [ + 304, + 480, + 546, + 660 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 304, + 480, + 546, + 660 + ], + "type": "inline_equation", + "content": "\\mathrm{SMT}_{Ours}" + }, + { + "bbox": [ + 304, + 480, + 546, + 660 + ], + "type": "text", + "content": ", respectively. The results show that the image feature performs worse since it is greatly affected by the appearance of the character. It indicates that with the help of the large language model, the semantic representation better focuses on the semantic meaning of the motion instead of the character's visual appearance. Therefore, the encoder output of the large language model is more suitable for semantic embedding. More details can be found in the supplementary materials." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 665, + 545, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 545, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 545, + 715 + ], + "type": "text", + "content": "Prompt design. To validate the importance of guiding visual question answering, we compare the textual descriptions generated by visual question answering with and without guiding questions as well as image captioning. The re" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "type": "text", + "content": "2161" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 49, + 71, + 113, + 135 + ], + "blocks": [ + { + "bbox": [ + 49, + 71, + 113, + 135 + ], + "lines": [ + { + "bbox": [ + 49, + 71, + 113, + 135 + ], + "spans": [ + { + "bbox": [ + 49, + 71, + 113, + 135 + ], + "type": "image", + "image_path": "06dd6b0b0ab33fa1a55868a80afc687cc42688a78a6088cc89698e1acbf6ac3d.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 118, + 77, + 154, + 83 + ], + "lines": [ + { + "bbox": [ + 118, + 77, + 154, + 83 + ], + "spans": [ + { + "bbox": [ + 118, + 77, + 154, + 83 + ], + "type": "text", + "content": "Image Captioning" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 118, + 84, + 176, + 97 + ], + "lines": [ + { + "bbox": [ + 118, + 84, + 176, + 97 + ], + "spans": [ + { + "bbox": [ + 118, + 84, + 176, + 97 + ], + "type": "text", + "content": "A 3d model of a boy wearing glasses and a hat." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 118, + 110, + 171, + 116 + ], + "lines": [ + { + "bbox": [ + 118, + 110, + 171, + 116 + ], + "spans": [ + { + "bbox": [ + 118, + 110, + 171, + 116 + ], + "type": "text", + "content": "Visual Question Answering" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 118, + 117, + 183, + 123 + ], + "lines": [ + { + "bbox": [ + 118, + 117, + 183, + 123 + ], + "spans": [ + { + "bbox": [ + 118, + 117, + 183, + 123 + ], + "type": "text", + "content": "Q: What is the character doing?" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 118, + 123, + 174, + 130 + ], + "lines": [ + { + "bbox": [ + 118, + 123, + 174, + 130 + ], + "spans": [ + { + "bbox": [ + 118, + 123, + 174, + 130 + ], + "type": "text", + "content": "A: The character is praying." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_footnote" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 48, + 140, + 113, + 205 + ], + "blocks": [ + { + "bbox": [ + 48, + 140, + 113, + 205 + ], + "lines": [ + { + "bbox": [ + 48, + 140, + 113, + 205 + ], + "spans": [ + { + "bbox": [ + 48, + 140, + 113, + 205 + ], + "type": "image", + "image_path": "89686dcf50707ed7e14930dbdac4e66d787a29b9ad437e6d61383caac35da9e3.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 118, + 145, + 154, + 152 + ], + "lines": [ + { + "bbox": [ + 118, + 145, + 154, + 152 + ], + "spans": [ + { + "bbox": [ + 118, + 145, + 154, + 152 + ], + "type": "text", + "content": "Image Captioning" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 118, + 152, + 187, + 165 + ], + "lines": [ + { + "bbox": [ + 118, + 152, + 187, + 165 + ], + "spans": [ + { + "bbox": [ + 118, + 152, + 187, + 165 + ], + "type": "text", + "content": "A 3d model of a robot running on a cheeked floor." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 118, + 176, + 171, + 182 + ], + "lines": [ + { + "bbox": [ + 118, + 176, + 171, + 182 + ], + "spans": [ + { + "bbox": [ + 118, + 176, + 171, + 182 + ], + "type": "text", + "content": "Visual Question Answering" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 118, + 183, + 184, + 203 + ], + "lines": [ + { + "bbox": [ + 118, + 183, + 184, + 203 + ], + "spans": [ + { + "bbox": [ + 118, + 183, + 184, + 203 + ], + "type": "text", + "content": "Q: What is the character doing? \nA: The character is running on a checkered floor." + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 47, + 211, + 287, + 233 + ], + "lines": [ + { + "bbox": [ + 47, + 211, + 287, + 233 + ], + "spans": [ + { + "bbox": [ + 47, + 211, + 287, + 233 + ], + "type": "text", + "content": "Figure 6. Text descriptions generated by different ways. The guiding visual question answering yields more comprehensive results." + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "table", + "bbox": [ + 50, + 239, + 280, + 323 + ], + "blocks": [ + { + "bbox": [ + 50, + 239, + 280, + 323 + ], + "lines": [ + { + "bbox": [ + 50, + 239, + 280, + 323 + ], + "spans": [ + { + "bbox": [ + 50, + 239, + 280, + 323 + ], + "type": "table", + "html": "
MethodQuality ↑Smoothness ↑Semantics ↑
Copy0.720.860.71
NKN [22]0.650.800.66
SAN [2]0.690.820.67
R2ET [25]0.800.610.85
Ours0.890.800.92
", + "image_path": "2ac2256d22a7e955f6a30ba90a569076e2396410284e0fcb87ca4c00023820b5.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 47, + 326, + 287, + 359 + ], + "lines": [ + { + "bbox": [ + 47, + 326, + 287, + 359 + ], + "spans": [ + { + "bbox": [ + 47, + 326, + 287, + 359 + ], + "type": "text", + "content": "Table 3. User study results. We collect 100 comparisons in three aspects. Our method gets highest scores in the overall quality as well as semantics preservation." + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 20 + }, + { + "bbox": [ + 46, + 363, + 287, + 447 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 363, + 287, + 447 + ], + "spans": [ + { + "bbox": [ + 46, + 363, + 287, + 447 + ], + "type": "text", + "content": "sults in Fig. 6 indicate that using guiding questions for visual question answering yields the most comprehensive and reasonable text descriptions for motion semantics. Compared with image captioning that uses the vision-language model to generate text description directly from images, the answers from visual question answering task can be guided by the designed question to focus on motion semantics." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 47, + 454, + 122, + 467 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 454, + 122, + 467 + ], + "spans": [ + { + "bbox": [ + 47, + 454, + 122, + 467 + ], + "type": "text", + "content": "4.4. User Study" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 46, + 472, + 287, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 472, + 287, + 616 + ], + "spans": [ + { + "bbox": [ + 46, + 472, + 287, + 616 + ], + "type": "text", + "content": "We conduct a user study to evaluate the performance of our method against the baseline methods. Human subjects are given 12 videos. Each video includes one source skinned motion and five anonymous skinned results. The retargeted results are randomly placed. We ask subjects to rate the results out of 1.0 in three aspects: overall quality, motion smoothness and semantics preservation. We collect a total of 100 comparisons. During the evaluation, users are required to extract semantic meaning from the source motion themselves and then evaluate the preservation of retargeted motions. In general, more than " + }, + { + "bbox": [ + 46, + 472, + 287, + 616 + ], + "type": "inline_equation", + "content": "92\\%" + }, + { + "bbox": [ + 46, + 472, + 287, + 616 + ], + "type": "text", + "content": " of subjects prefer the retargeting results of our method." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 47, + 623, + 260, + 635 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 623, + 260, + 635 + ], + "spans": [ + { + "bbox": [ + 47, + 623, + 260, + 635 + ], + "type": "text", + "content": "4.5. Retargeting Motion from Human Videos" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 46, + 642, + 287, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 642, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 46, + 642, + 287, + 712 + ], + "type": "text", + "content": "In this section, we evaluate our motion retargeting approach from human videos in the human3.6M [10] dataset. Video retargeting involves two stages: human pose estimation from video and motion retargeting. However, inaccuracies in estimating body postures may result in semantic information loss and thus accumulation of errors in the entire" + } + ] + } + ], + "index": 26 + }, + { + "type": "image", + "bbox": [ + 318, + 71, + 388, + 140 + ], + "blocks": [ + { + "bbox": [ + 198, + 84, + 268, + 90 + ], + "lines": [ + { + "bbox": [ + 198, + 84, + 268, + 90 + ], + "spans": [ + { + "bbox": [ + 198, + 84, + 268, + 90 + ], + "type": "text", + "content": "Guiding Visual Question Answering" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 198, + 90, + 283, + 95 + ], + "lines": [ + { + "bbox": [ + 198, + 90, + 283, + 95 + ], + "spans": [ + { + "bbox": [ + 198, + 90, + 283, + 95 + ], + "type": "text", + "content": "Q: Where are the hands of the character?" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 318, + 71, + 388, + 140 + ], + "lines": [ + { + "bbox": [ + 318, + 71, + 388, + 140 + ], + "spans": [ + { + "bbox": [ + 318, + 71, + 388, + 140 + ], + "type": "image", + "image_path": "31a5234b3dd525a192b0a0e30eebda6fa48e4919aaca3bb3d5291bf467c490ce.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + } + ], + "index": 27 + }, + { + "type": "image", + "bbox": [ + 390, + 71, + 459, + 140 + ], + "blocks": [ + { + "bbox": [ + 390, + 71, + 459, + 140 + ], + "lines": [ + { + "bbox": [ + 390, + 71, + 459, + 140 + ], + "spans": [ + { + "bbox": [ + 390, + 71, + 459, + 140 + ], + "type": "image", + "image_path": "6b583f7206f29c0b1fc775e655fae1efd11ecf2e6f701ff2617c580822706bd8.jpg" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_body" + } + ], + "index": 28 + }, + { + "type": "image", + "bbox": [ + 462, + 71, + 531, + 140 + ], + "blocks": [ + { + "bbox": [ + 462, + 71, + 531, + 140 + ], + "lines": [ + { + "bbox": [ + 462, + 71, + 531, + 140 + ], + "spans": [ + { + "bbox": [ + 462, + 71, + 531, + 140 + ], + "type": "image", + "image_path": "8f56ffaf23669bc2d89939f221262149d7058f8c5b811f3796ffd00f378fcaf0.jpg" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_body" + } + ], + "index": 29 + }, + { + "type": "image", + "bbox": [ + 318, + 142, + 388, + 212 + ], + "blocks": [ + { + "bbox": [ + 198, + 152, + 268, + 158 + ], + "lines": [ + { + "bbox": [ + 198, + 152, + 268, + 158 + ], + "spans": [ + { + "bbox": [ + 198, + 152, + 268, + 158 + ], + "type": "text", + "content": "Guiding Visual Question Answering" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 198, + 159, + 283, + 170 + ], + "lines": [ + { + "bbox": [ + 198, + 159, + 283, + 170 + ], + "spans": [ + { + "bbox": [ + 198, + 159, + 283, + 170 + ], + "type": "text", + "content": "Q: Where are the hands of the character? A: Holding a ball." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 198, + 170, + 264, + 175 + ], + "lines": [ + { + "bbox": [ + 198, + 170, + 264, + 175 + ], + "spans": [ + { + "bbox": [ + 198, + 170, + 264, + 175 + ], + "type": "text", + "content": "Q: What is the character doing?" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 198, + 175, + 280, + 193 + ], + "lines": [ + { + "bbox": [ + 198, + 175, + 280, + 193 + ], + "spans": [ + { + "bbox": [ + 198, + 175, + 280, + 193 + ], + "type": "text", + "content": "A: The character is trying to throw a ball with both hands on the right side of his body." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 318, + 142, + 388, + 212 + ], + "lines": [ + { + "bbox": [ + 318, + 142, + 388, + 212 + ], + "spans": [ + { + "bbox": [ + 318, + 142, + 388, + 212 + ], + "type": "image", + "image_path": "90c070f166b3ff245499e227a911f1815b1940b8b7385812cb83bc7354f8103a.jpg" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 306, + 216, + 545, + 249 + ], + "lines": [ + { + "bbox": [ + 306, + 216, + 545, + 249 + ], + "spans": [ + { + "bbox": [ + 306, + 216, + 545, + 249 + ], + "type": "text", + "content": "Figure 7. We retarget from human motion clips in the human3.6M [10] dataset. The retargeted motions are free from interpenetration and preserve semantics well." + } + ] + } + ], + "index": 33, + "angle": 0, + "type": "image_caption" + } + ], + "index": 30 + }, + { + "type": "image", + "bbox": [ + 390, + 143, + 459, + 212 + ], + "blocks": [ + { + "bbox": [ + 390, + 143, + 459, + 212 + ], + "lines": [ + { + "bbox": [ + 390, + 143, + 459, + 212 + ], + "spans": [ + { + "bbox": [ + 390, + 143, + 459, + 212 + ], + "type": "image", + "image_path": "6e07af2e5d7ced8a5d3c5c66f78180d8a2e50126bd1e2e0cee03ada486231a7d.jpg" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_body" + } + ], + "index": 31 + }, + { + "type": "image", + "bbox": [ + 462, + 143, + 531, + 212 + ], + "blocks": [ + { + "bbox": [ + 462, + 143, + 531, + 212 + ], + "lines": [ + { + "bbox": [ + 462, + 143, + 531, + 212 + ], + "spans": [ + { + "bbox": [ + 462, + 143, + 531, + 212 + ], + "type": "image", + "image_path": "1be675d4ea3fdae8b9bd00e2d57b8dc35130e1f7e72e0afc87a8a71c3a69667a.jpg" + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_body" + } + ], + "index": 32 + }, + { + "bbox": [ + 305, + 252, + 545, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 252, + 545, + 335 + ], + "spans": [ + { + "bbox": [ + 305, + 252, + 545, + 335 + ], + "type": "text", + "content": "retargeting process. Therefore, we first get the estimated human pose from [17]. Then we utilize the vision-language model to extract the semantic embedding of the original video and calculate the semantic consistency loss to optimize the joint angles acquired from the retargeting process directly. In Fig. 7, we show our results of motion retargeting from human videos to Mixamo characters." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 306, + 346, + 383, + 358 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 346, + 383, + 358 + ], + "spans": [ + { + "bbox": [ + 306, + 346, + 383, + 358 + ], + "type": "text", + "content": "5. Conclusions" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 304, + 366, + 545, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 366, + 545, + 498 + ], + "spans": [ + { + "bbox": [ + 304, + 366, + 545, + 498 + ], + "type": "text", + "content": "In this paper, we present a novel semantics-aware motion retargeting method that leverages the capabilities of vision-language models to extract semantic embeddings and facilitate the preservation of motion semantics. This approach offers a promising solution to the challenge of lacking labelled semantic data for motion. Our proposed method involves a two-stage process that integrates skeleton-level motion characteristics and semantics-level consistency along with geometry constraints. Experimental results demonstrate that our approach excels in generating high-quality retargeted motions with semantics consistency." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 304, + 498, + 545, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 498, + 545, + 605 + ], + "spans": [ + { + "bbox": [ + 304, + 498, + 545, + 605 + ], + "type": "text", + "content": "Limitations. The main limitation is the performance of the vision-language model in extracting motion semantics. Without the support of motion semantic datasets of sufficient data size and quality, we rely on the model pre-trained on large image-text datasets. Although the model achieves some remarkable results in motion semantics extraction, there is still room for improvement. In addition, the projection of 3D motion into 2D images loses spatial information and affects the performance." + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 304, + 606, + 545, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 606, + 545, + 676 + ], + "spans": [ + { + "bbox": [ + 304, + 606, + 545, + 676 + ], + "type": "text", + "content": "Future work. Compared with 2D vision-language models, 3D vision-language models have the advantage of capturing spatial relationships directly. Therefore, fine-tuning 3D vision-language models to make them more suitable for the task of motion semantics extraction is worth exploring in our future work." + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 304, + 677, + 545, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 545, + 712 + ], + "type": "text", + "content": "Acknowledgements. This work was supported by the National Nature Science Foundation of China under Grant 62173293." + } + ] + } + ], + "index": 39 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "2162" + } + ] + } + ], + "index": 40 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 714 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 112 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 112 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 112 + ], + "type": "text", + "content": "[1] Adobe's mixamo. https://www MIXamo.com/. Accessed: 2023-02-08." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 114, + 287, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 114, + 287, + 158 + ], + "spans": [ + { + "bbox": [ + 53, + 114, + 287, + 158 + ], + "type": "text", + "content": "[2] Kfir Aberman, Peizhuo Li, Dani Lischinski, Olga Sorkine-Hornung, Daniel Cohen-Or, and Baoquan Chen. Skeleton-aware networks for deep motion retargeting. ACM Transactions on Graphics (TOG), 39(4):62-1, 2020." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 159, + 288, + 204 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 159, + 288, + 204 + ], + "spans": [ + { + "bbox": [ + 53, + 159, + 288, + 204 + ], + "type": "text", + "content": "[3] Daichi Azuma, Taiki Miyanishi, Shuhei Kurita, and Motoaki Kawanabe. Scanqa: 3d question answering for spatial scene understanding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 205, + 287, + 237 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 205, + 287, + 237 + ], + "spans": [ + { + "bbox": [ + 53, + 205, + 287, + 237 + ], + "type": "text", + "content": "[4] Kwang-Jin Choi and Hyeong-Seok Ko. Online motion retargeting. The Journal of Visualization and Computer Animation, 11(5):223-235, 2000." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 239, + 288, + 294 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 239, + 288, + 294 + ], + "spans": [ + { + "bbox": [ + 53, + 239, + 288, + 294 + ], + "type": "text", + "content": "[5] Hyung Won Chung, Le Hou, Shayne Longpre, Barret Zoph, Yi Tay, William Fedus, Yunxuan Li, Xuezhi Wang, Mostafa Dehghani, Siddhartha Brahma, et al. Scaling instruction-finetuned language models. arXiv preprint arXiv:2210.11416, 2022." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 296, + 288, + 329 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 296, + 288, + 329 + ], + "spans": [ + { + "bbox": [ + 53, + 296, + 288, + 329 + ], + "type": "text", + "content": "[6] Michael Gleicher. Retargetting motion to new characters. In Proceedings of the 25th annual conference on Computer graphics and interactive techniques, pages 33-42, 1998." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 331, + 287, + 385 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 331, + 287, + 385 + ], + "spans": [ + { + "bbox": [ + 53, + 331, + 287, + 385 + ], + "type": "text", + "content": "[7] Chuan Guo, Xinxin Zuo, Sen Wang, Shihao Zou, Qingyao Sun, Annan Deng, Minglun Gong, and Li Cheng. Action2motion: Conditioned generation of 3d human motions. In Proceedings of the 28th ACM International Conference on Multimedia, pages 2021-2029, 2020." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 387, + 287, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 387, + 287, + 441 + ], + "spans": [ + { + "bbox": [ + 53, + 387, + 287, + 441 + ], + "type": "text", + "content": "[8] Chuan Guo, Shihao Zou, Xinxin Zuo, Sen Wang, Wei Ji, Xingyu Li, and Li Cheng. Generating diverse and natural 3d human motions from text. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5152-5161, 2022." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 443, + 287, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 443, + 287, + 487 + ], + "spans": [ + { + "bbox": [ + 53, + 443, + 287, + 487 + ], + "type": "text", + "content": "[9] Lei Hu, Zihao Zhang, Chongyang Zhong, Boyuan Jiang, and Shihong Xia. Pose-aware attention network for flexible motion retargeting by body part. IEEE Transactions on Visualization and Computer Graphics, pages 1-17, 2023." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 488, + 287, + 543 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 488, + 287, + 543 + ], + "spans": [ + { + "bbox": [ + 48, + 488, + 287, + 543 + ], + "type": "text", + "content": "[10] Catalin Ionescu, Dragos Papava, Vlad Olaru, and Cristian Sminchisescu. Human3.6m: Large scale datasets and predictive methods for 3d human sensing in natural environments. IEEE Transactions on Pattern Analysis and Machine Intelligence, 36(7):1325-1339, 2014." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 544, + 287, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 544, + 287, + 588 + ], + "spans": [ + { + "bbox": [ + 48, + 544, + 287, + 588 + ], + "type": "text", + "content": "[11] Jehee Lee and Sung Yong Shin. A hierarchical approach to interactive motion editing for human-like figures. In Proceedings of the 26th annual conference on Computer graphics and interactive techniques, pages 39-48, 1999." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 590, + 287, + 644 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 590, + 287, + 644 + ], + "spans": [ + { + "bbox": [ + 48, + 590, + 287, + 644 + ], + "type": "text", + "content": "[12] John P Lewis, Matt Cordner, and Nickson Fong. Pose space deformation: a unified approach to shape interpolation and skeleton-driven deformation. In Proceedings of the 27th annual conference on Computer graphics and interactive techniques, pages 165-172, 2000." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 647, + 287, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 647, + 287, + 689 + ], + "spans": [ + { + "bbox": [ + 48, + 647, + 287, + 689 + ], + "type": "text", + "content": "[13] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. arXiv preprint arXiv:2301.12597, 2023." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 691, + 287, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 691, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 48, + 691, + 287, + 714 + ], + "type": "text", + "content": "[14] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. BLIP-2: bootstrapping language-image pre-training with" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 308, + 73, + 545, + 714 + ], + "type": "list", + "angle": 0, + "index": 30, + "blocks": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "text", + "content": "frozen image encoders and large language models. In ICML, 2023." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 96, + 545, + 129 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 96, + 545, + 129 + ], + "spans": [ + { + "bbox": [ + 308, + 96, + 545, + 129 + ], + "type": "text", + "content": "[15] Jongin Lim, Hyung Jin Chang, and Jin Young Choi. Pmnet: Learning of disentangled pose and movement for unsupervised motion retargeting. In BMVC, page 7, 2019." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 130, + 545, + 175 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 130, + 545, + 175 + ], + "spans": [ + { + "bbox": [ + 308, + 130, + 545, + 175 + ], + "type": "text", + "content": "[16] Shichen Liu, Tianye Li, Weikai Chen, and Hao Li. Soft rasterizer: A differentiable renderer for image-based 3d reasoning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7708-7717, 2019." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 175, + 545, + 219 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 175, + 545, + 219 + ], + "spans": [ + { + "bbox": [ + 308, + 175, + 545, + 219 + ], + "type": "text", + "content": "[17] Gyeongsik Moon, Hongsuk Choi, and Kyoung Mu Lee. Accurate 3d hand pose estimation for whole-body 3d human mesh estimation. In Computer Vision and Pattern Recognition Workshop (CVPRW), 2022." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 220, + 545, + 263 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 220, + 545, + 263 + ], + "spans": [ + { + "bbox": [ + 308, + 220, + 545, + 263 + ], + "type": "text", + "content": "[18] Zoran Popović and Andrew Witkin. Physically based motion transformation. In Proceedings of the 26th annual conference on Computer graphics and interactive techniques, pages 11-20, 1999." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 265, + 545, + 331 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 265, + 545, + 331 + ], + "spans": [ + { + "bbox": [ + 308, + 265, + 545, + 331 + ], + "type": "text", + "content": "[19] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 332, + 545, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 332, + 545, + 376 + ], + "spans": [ + { + "bbox": [ + 308, + 332, + 545, + 376 + ], + "type": "text", + "content": "[20] Guy Tevet, Brian Gordon, Amir Hertz, Amit H Bermano, and Daniel Cohen-Or. Motionclip: Exposing human motion generation to clip space. In European Conference on Computer Vision, pages 358–374. Springer, 2022." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 377, + 545, + 410 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 377, + 545, + 410 + ], + "spans": [ + { + "bbox": [ + 308, + 377, + 545, + 410 + ], + "type": "text", + "content": "[21] Guy Tevet, Sigal Raab, Brian Gordon, Yonatan Shafir, Daniel Cohen-Or, and Amit H Bermano. Human motion diffusion model. arXiv preprint arXiv:2209.14916, 2022." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 411, + 545, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 411, + 545, + 464 + ], + "spans": [ + { + "bbox": [ + 308, + 411, + 545, + 464 + ], + "type": "text", + "content": "[22] Ruben Villegas, Jimei Yang, Duygu Ceylan, and Honglak Lee. Neural kinematic networks for unsupervised motion retargeting. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 8639-8648, 2018." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 468, + 545, + 511 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 468, + 545, + 511 + ], + "spans": [ + { + "bbox": [ + 308, + 468, + 545, + 511 + ], + "type": "text", + "content": "[23] Ruben Villegas, Duygu Ceylan, Aaron Hertzmann, Jimei Yang, and Jun Saito. Contact-aware retargeting of skinned motion. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9720-9729, 2021." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 513, + 545, + 545 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 513, + 545, + 545 + ], + "spans": [ + { + "bbox": [ + 308, + 513, + 545, + 545 + ], + "type": "text", + "content": "[24] Zhengyuan Yang, Songyang Zhang, Liwei Wang, and Jiebo Luo. Sat: 2d semantics assisted training for 3d visual grounding. In ICCV, 2021." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 547, + 545, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 547, + 545, + 612 + ], + "spans": [ + { + "bbox": [ + 308, + 547, + 545, + 612 + ], + "type": "text", + "content": "[25] Jiaxu Zhang, Junwu Weng, Di Kang, Fang Zhao, Shaoli Huang, Xuefei Zhe, Linchao Bao, Ying Shan, Jue Wang, and Zhigang Tu. Skinned motion retargeting with residual perception of motion semantics & geometry. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13864-13872, 2023." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 308, + 613, + 545, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 613, + 545, + 667 + ], + "spans": [ + { + "bbox": [ + 308, + 613, + 545, + 667 + ], + "type": "text", + "content": "[26] Yi Zhou, Connelly Barnes, Jingwan Lu, Jimei Yang, and Hao Li. On the continuity of rotation representations in neural networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5745-5753, 2019." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 308, + 670, + 545, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 670, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 308, + 670, + 545, + 714 + ], + "type": "text", + "content": "[27] Deyao Zhu, Jun Chen, Xiaogian Shen, Xiang Li, and Mohamed Elhoseiny. Minigpt-4: Enhancing vision-language understanding with advanced large language models. arXiv preprint arXiv:2304.10592, 2023." + } + ] + } + ], + "index": 29 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2163" + } + ] + } + ], + "index": 31 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 107 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 288, + 107 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 288, + 107 + ], + "type": "text", + "content": "[28] Ziyu Zhu, Xiaojian Ma, Yixin Chen, Zhidong Deng, Siyuan Huang, and Qing Li. 3d-vista: Pre-trained transformer for 3d vision and text alignment. ICCV, 2023." + } + ] + } + ], + "index": 0 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 316, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 316, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 316, + 757 + ], + "type": "text", + "content": "2164" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file