diff --git "a/2025/Semantic Discrepancy-aware Detector for Image Forgery Identification/layout.json" "b/2025/Semantic Discrepancy-aware Detector for Image Forgery Identification/layout.json" new file mode 100644--- /dev/null +++ "b/2025/Semantic Discrepancy-aware Detector for Image Forgery Identification/layout.json" @@ -0,0 +1,9276 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 85, + 103, + 524, + 120 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 103, + 524, + 120 + ], + "spans": [ + { + "bbox": [ + 85, + 103, + 524, + 120 + ], + "type": "text", + "content": "Semantic Discrepancy-aware Detector for Image Forgery Identification" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 153, + 143, + 458, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 153, + 143, + 458, + 172 + ], + "spans": [ + { + "bbox": [ + 153, + 143, + 458, + 172 + ], + "type": "text", + "content": "Ziye Wang Minghang Yu Chunyan Xu* Zhen Cui Nanjing University of Science and Technology, Nanjing, China" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 157, + 174, + 449, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 157, + 174, + 449, + 185 + ], + "spans": [ + { + "bbox": [ + 157, + 174, + 449, + 185 + ], + "type": "text", + "content": "{wzynjust,mhyu,cyx}@njust.edu.cn,zhen.cui@bnu.edu.cn" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 152, + 213, + 200, + 224 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 152, + 213, + 200, + 224 + ], + "spans": [ + { + "bbox": [ + 152, + 213, + 200, + 224 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 239, + 296, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 239, + 296, + 550 + ], + "spans": [ + { + "bbox": [ + 55, + 239, + 296, + 550 + ], + "type": "text", + "content": "With the rapid advancement of image generation techniques, robust forgery detection has become increasingly imperative to ensure the trustworthiness of digital media. Recent research indicates that the learned semantic concepts of pre-trained models are critical for identifying fake images. However, the misalignment between the forgery and semantic concept spaces hinders the model's forgery detection performance. To address this problem, we propose a novel Semantic Discrepancy-aware Detector (SDD) that leverages reconstruction learning to align the two spaces at a fine-grained visual level. By exploiting the conceptual knowledge embedded in the pre-trained vision-language model, we specifically design a semantic token sampling module to mitigate the space shifts caused by features irrelevant to both forgery traces and semantic concepts. A concept-level forgery discrepancy learning module, based on reconstruction, enhances the interaction between semantic concepts and forgery traces, effectively capturing discrepancies under the concepts' guidance. Finally, the low-level forgery feature enhancement integrates the learned concept-level forgery discrepancies to minimize redundant forgery information. Experiments conducted on two standard image forgery datasets demonstrate the efficacy of the proposed SDD, which achieves superior results compared to existing methods. The code is available at https://github.com/wzy111111/SSD." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 574, + 135, + 588 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 574, + 135, + 588 + ], + "spans": [ + { + "bbox": [ + 56, + 574, + 135, + 588 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 595, + 295, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 595, + 295, + 677 + ], + "spans": [ + { + "bbox": [ + 55, + 595, + 295, + 677 + ], + "type": "text", + "content": "With the thriving of generative AI technologies, like Generative Adversarial Networks (GANs) [14] and diffusion models [2], the images generated by these models can easily create confusion by passing off the spurious as genuine. Therefore, it is crucial to develop a universal method for detecting fake images to mitigate the widespread dissemination of disinformation." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 680, + 295, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 680, + 295, + 693 + ], + "spans": [ + { + "bbox": [ + 67, + 680, + 295, + 693 + ], + "type": "text", + "content": "Pioneering research [26, 32] has shown that projecting" + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 326, + 211, + 544, + 266 + ], + "blocks": [ + { + "bbox": [ + 326, + 211, + 544, + 266 + ], + "lines": [ + { + "bbox": [ + 326, + 211, + 544, + 266 + ], + "spans": [ + { + "bbox": [ + 326, + 211, + 544, + 266 + ], + "type": "image", + "image_path": "ebad5eb90f3ca9bb87c11fd2b66f803687de28196141364d2508083b6c397c5a.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 327, + 267, + 541, + 430 + ], + "blocks": [ + { + "bbox": [ + 327, + 267, + 541, + 430 + ], + "lines": [ + { + "bbox": [ + 327, + 267, + 541, + 430 + ], + "spans": [ + { + "bbox": [ + 327, + 267, + 541, + 430 + ], + "type": "image", + "image_path": "81954e406961bea5aa5a7ed67c1b28aa41cf3ad135b9f7908fb60b5698e8f433.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 440, + 555, + 561 + ], + "lines": [ + { + "bbox": [ + 313, + 440, + 555, + 561 + ], + "spans": [ + { + "bbox": [ + 313, + 440, + 555, + 561 + ], + "type": "text", + "content": "Figure 1. The phenomenon of misalignment between semantic concept space and forgery space. Since " + }, + { + "bbox": [ + 313, + 440, + 555, + 561 + ], + "type": "inline_equation", + "content": "\\cos \\theta" + }, + { + "bbox": [ + 313, + 440, + 555, + 561 + ], + "type": "text", + "content": " can reflect the similarity of image descriptions, we model the feature space in polar coordinates. As the semantic concept space in [32] is frozen, fake samples sharing similar concepts with real ones can be easily misclassified. With forgery-adaptive space like [26], the model can correctly distinguish between them based on re-learned forgery features. Nevertheless, due to the semantic concept bias introduced by coarse text prompts, the target samples may be projected into an inaccurate semantic concept dimension, causing them to drift away from the real source samples along the fake dimension." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 569, + 553, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 569, + 553, + 640 + ], + "spans": [ + { + "bbox": [ + 313, + 569, + 553, + 640 + ], + "type": "text", + "content": "images in a joint embedding space of texts and images can effectively capture discrepancies between fake and real images. In contrast, methods [6, 13, 44, 50] overlooking the interplay between forgery traces and semantic concepts perform poorly when confronted with unseen generative models." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 642, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 642, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 642, + 555, + 713 + ], + "type": "text", + "content": "To investigate the visual semantic concepts of pretrained models, we conduct a statistical analysis of the output features from CNNSpot [50] and CLIP-ViT [32] (See Appendix A for more details). Under different categories, CNNSpot exhibits a synchronized difference between real and fake features in its training space. However, when tran" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 146, + 0, + 494, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 146, + 0, + 494, + 37 + ], + "spans": [ + { + "bbox": [ + 146, + 0, + 494, + 37 + ], + "type": "text", + "content": "This ICCV paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 703, + 143, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 703, + 143, + 712 + ], + "spans": [ + { + "bbox": [ + 67, + 703, + 143, + 712 + ], + "type": "text", + "content": "*Corresponding author" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "18388" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 67, + 70, + 285, + 186 + ], + "blocks": [ + { + "bbox": [ + 67, + 70, + 285, + 186 + ], + "lines": [ + { + "bbox": [ + 67, + 70, + 285, + 186 + ], + "spans": [ + { + "bbox": [ + 67, + 70, + 285, + 186 + ], + "type": "image", + "image_path": "3211c3b1e370bfa02685bd229d7de6b14fbe69599dd492db2cb33e55aba0b7cb.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 193, + 296, + 261 + ], + "lines": [ + { + "bbox": [ + 55, + 193, + 296, + 261 + ], + "spans": [ + { + "bbox": [ + 55, + 193, + 296, + 261 + ], + "type": "text", + "content": "Figure 2. Different paradigms of image forgery identification with pre-trained vision-language model. (a) Fine-tune the frozen model only by fully connected (FC) layers [32]. (b) Prompt-based designs are tuned on text prompts and contrastive objectives [26]. (c) Our paradigm incorporating visual clues can capture fine-grained forge traces by reconstruction learning." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 268, + 296, + 328 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 268, + 296, + 328 + ], + "spans": [ + { + "bbox": [ + 55, + 268, + 296, + 328 + ], + "type": "text", + "content": "sitioning to the CLIP's space, these differences become inconsistent. From this, we infer a nuanced relationship between semantic concepts and forgery traces: Different semantic concepts may guide the model to uncover distinct forgery traces." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 329, + 296, + 484 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 329, + 296, + 484 + ], + "spans": [ + { + "bbox": [ + 55, + 329, + 296, + 484 + ], + "type": "text", + "content": "Intuitively, relying on a frozen pre-trained vision-language model like UnivFD [32] is essential, but this tends to overlook fine-grained forgery details. Although Fat-Former [26] achieves a substantial enhancement in generalization by employing the forgery-aware adaptive transformer, we observe that soft prompts based on simple [CLASS] embeddings have an intrinsic limitation in their semantic description granularity (See Appendix B for more details). The constrained breadth of the conveyed concepts may lead the detection toward incorrect predictions. This limitation highlights a misalignment between the visual semantic concept space and the target forgery space, as illustrated in Fig. 1." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 485, + 296, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 485, + 296, + 579 + ], + "spans": [ + { + "bbox": [ + 55, + 485, + 296, + 579 + ], + "type": "text", + "content": "To address this, one empirical approach is to design more detailed text descriptions, but this method struggles to describe all visual forgery details due to the limited length of texts and brings more computational overhead. Drawing from the aforementioned findings and analysis, we make a first attempt to align the CLIP's visual semantic concept space with the forgery space by reconstructing semantic features." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 582, + 296, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 582, + 296, + 715 + ], + "spans": [ + { + "bbox": [ + 55, + 582, + 296, + 715 + ], + "type": "text", + "content": "We develop a vision-based paradigm, as outlined in Fig. 2. First, employing a pre-trained model only with nearest neighbor or linear probing (e.g. UnivFD [32], Fig. 2 (a)) is suboptimal for image forgery detection. Second, modifying the pre-trained model with task-specific prompts (e.g. FatFormer [26], Fig. 2 (b)) may favor models biased towards any particular semantic concept. These studies pave the way for exploring pre-trained space with rich semantic concepts. Inspired by image reconstruction [43, 53], our paradigm amplifies the concept-level forgery discrepancies of forgery images, which empowers the model to detect sus" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 72, + 553, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 553, + 95 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 553, + 95 + ], + "type": "text", + "content": "picious forgery traces with the assistance of semantic concepts." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 96, + 555, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 96, + 555, + 443 + ], + "spans": [ + { + "bbox": [ + 313, + 96, + 555, + 443 + ], + "type": "text", + "content": "In this work, we present a novel Semantic Discrepancy-aware Detector (SDD) to accurately align the semantic concept space and the forgery space. Firstly, to mitigate interference from features unrelated to learned semantic concepts and forgery traces, we divide the real images into non-overlapping blocks and feed them to the frozen CLIP [36] to obtain diverse semantic patch tokens. These tokens acting as visual clues smoothly align the semantic concepts' space and forgery space. It is noteworthy that these tokens sampled by JS divergence are universally representative of the real semantic distribution. Then, the visual clues are fused into a concept-level forgery discrepancy module. Unlike FatFormer, LoRA layers are incorporated into the image encoder. The goal is to preserve the completeness and diversity of the learned semantic concepts of CLIP, while the forgery features sharing similar semantic concepts should be highlighted. During reconstruction, we only narrow the reconstruction gap for real samples to reinforce the reconstructed discrepancies of the synthetic images. Finally, we present low-level forgery feature enhancement to let the reconstruction difference map enhance the extraction of the highly generalizable forgery features while introducing minimal additional parameters. The main challenge is how to capture forgery features with strong semantic concept correlation and features with high forgery relevance but weak semantic concept ties to ensure the model converges to powerful features. Motivated by this, we apply convolutional modules and adaptive weight parameters to avoid over-relying on semantic concepts." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 443, + 556, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 443, + 556, + 525 + ], + "spans": [ + { + "bbox": [ + 313, + 443, + 556, + 525 + ], + "type": "text", + "content": "We thoroughly evaluate the generalization performance of our model on a UnivFD benchmark [32] and a SynRIS benchmark [5]. Surprisingly, our method achieves superior performance by a " + }, + { + "bbox": [ + 313, + 443, + 556, + 525 + ], + "type": "inline_equation", + "content": "ap_{m}" + }, + { + "bbox": [ + 313, + 443, + 556, + 525 + ], + "type": "text", + "content": " of 98.51% and a " + }, + { + "bbox": [ + 313, + 443, + 556, + 525 + ], + "type": "inline_equation", + "content": "acc_{m}" + }, + { + "bbox": [ + 313, + 443, + 556, + 525 + ], + "type": "text", + "content": " of 93.61% on the UnivFD benchmark [32] and an average AUROC of 95.1% on the SynRIS benchmark [5]. In summary, our contributions are as follows:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 527, + 553, + 647 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 313, + 527, + 553, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 527, + 553, + 562 + ], + "spans": [ + { + "bbox": [ + 313, + 527, + 553, + 562 + ], + "type": "text", + "content": "- We propose a robust model (SDD) for forgery detection, specifically designed to align the visual concept space and forgery space in terms of visual information." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 563, + 553, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 563, + 553, + 609 + ], + "spans": [ + { + "bbox": [ + 313, + 563, + 553, + 609 + ], + "type": "text", + "content": "- We sample semantic tokens to mitigate the space shifts and align the two spaces through reconstruction learning. Additionally, we strengthen low-level forgery features to enhance the model's robustness." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 611, + 553, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 611, + 553, + 647 + ], + "spans": [ + { + "bbox": [ + 313, + 611, + 553, + 647 + ], + "type": "text", + "content": "- Our method achieves superior performance on two benchmarks, demonstrating its superior capability in comparison to existing approaches." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 313, + 657, + 400, + 670 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 657, + 400, + 670 + ], + "spans": [ + { + "bbox": [ + 313, + 657, + 400, + 670 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 677, + 555, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 677, + 555, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 677, + 555, + 715 + ], + "type": "text", + "content": "AI-generated Images Detection. Extensive efforts have been devoted to enhancing the performance of AI-generated image detection. Early works like [25, 44, 45] tend to mine" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "18389" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 58, + 70, + 555, + 305 + ], + "blocks": [ + { + "bbox": [ + 58, + 70, + 555, + 305 + ], + "lines": [ + { + "bbox": [ + 58, + 70, + 555, + 305 + ], + "spans": [ + { + "bbox": [ + 58, + 70, + 555, + 305 + ], + "type": "image", + "image_path": "94dd20070c1e79972c6f8254b876b80beacef33b3db35c97c2297de23b51f24e.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 313, + 555, + 381 + ], + "lines": [ + { + "bbox": [ + 55, + 313, + 555, + 381 + ], + "spans": [ + { + "bbox": [ + 55, + 313, + 555, + 381 + ], + "type": "text", + "content": "Figure 3. The architecture of SDD. First, Next, we sample semantic tokens from real images to learn features related to both concepts and forgery. the input images are mapped into a joint space of visual semantic concepts and forgery, which are transformed into learnable features " + }, + { + "bbox": [ + 55, + 313, + 555, + 381 + ], + "type": "inline_equation", + "content": "V_{H}" + }, + { + "bbox": [ + 55, + 313, + 555, + 381 + ], + "type": "text", + "content": " . Then, we use transformer-based encoder and decoder to get reconstructed features " + }, + { + "bbox": [ + 55, + 313, + 555, + 381 + ], + "type": "inline_equation", + "content": "\\mathcal{R}_f" + }, + { + "bbox": [ + 55, + 313, + 555, + 381 + ], + "type": "text", + "content": " . A reconstruction difference map " + }, + { + "bbox": [ + 55, + 313, + 555, + 381 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_S" + }, + { + "bbox": [ + 55, + 313, + 555, + 381 + ], + "type": "text", + "content": " is obtained and goes through the multi-scale convolutional network to refine forgery features. Finally, we concatenate the CLIP's CLS token with this output along the same dimension for classification. The whole system is trained by jointly minimizing the binary cross-entropy loss " + }, + { + "bbox": [ + 55, + 313, + 555, + 381 + ], + "type": "inline_equation", + "content": "L_{bce}" + }, + { + "bbox": [ + 55, + 313, + 555, + 381 + ], + "type": "text", + "content": " , the reconstruction loss " + }, + { + "bbox": [ + 55, + 313, + 555, + 381 + ], + "type": "inline_equation", + "content": "L_{r}" + }, + { + "bbox": [ + 55, + 313, + 555, + 381 + ], + "type": "text", + "content": " , and the triplet loss " + }, + { + "bbox": [ + 55, + 313, + 555, + 381 + ], + "type": "inline_equation", + "content": "L_{tri}" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 392, + 296, + 691 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 392, + 296, + 691 + ], + "spans": [ + { + "bbox": [ + 56, + 392, + 296, + 691 + ], + "type": "text", + "content": "the common forgery traces between all real and fake images, such as noise patterns, texture statistics, and frequency signals. As an illustration, Liu et al. [24] designed a network that learns the consistent noise patterns in images for fake detection. Liu et al. [28] proposed to leverage the gram matrix to discover the global anomalous texture of fake images. An effective approach [13] demonstrated that frequency representation is an important factor in improving fake detection performance. However, these differences are rigorously specific to the monotonous features, which contribute to the issue of overfitting. Cutting-edge research [26, 32] shifted attention toward the semantic properties of images. Ojha et al. [32] showed that projecting images into the feature space of pre-trained vision-language model enables strong generalization ability. To build generalized forgery representations, Liu et al. [26] constructed forgery adaptive space by a forgery-aware adapter. The above research [5, 26, 32] has suggested that concept attributes are vital in the image forgery detection task. Assuming that diffusion-based models leave distinct forgery traces that are characteristic of specific concept distributions, we aim to extract robust forgery features guided by semantic concepts, rather than suppressing them. Therefore, even \"useless\" information can be useful by providing significant certainty about the content of the image." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 692, + 295, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 692, + 295, + 704 + ], + "spans": [ + { + "bbox": [ + 55, + 692, + 295, + 704 + ], + "type": "text", + "content": "Reconstruction Learning. Reconstruction learning has" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 313, + 392, + 556, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 392, + 556, + 609 + ], + "spans": [ + { + "bbox": [ + 313, + 392, + 556, + 609 + ], + "type": "text", + "content": "great potential in unsupervised representation learning [16, 27]. Some works [5, 39] utilized reconstruction learning to reveal the nuances between real and fake images. For example, Wang et al. [51] found that reconstructing images by DDIM exposes an error between real images and their reconstructed replica. The new synthetic image detection method[5] used text-conditioned inversion maps to learn internal representations, which is conducive to predicting whether an image is fake. Ricker et al. [39] offered a simple detection approach by applying AE to measure the reconstruction error. Notably, these works are committed to reconstructing the distributions of both real and fake samples by leveraging generative models. Unlike previous works, we focus solely on reconstructing real images in the finetuned CLIP space in light of the authenticity and richness of semantic concepts. The distribution of real samples, learned from pre-trained vision-language model, helps to define an optimal boundary, thus alleviating overfitting." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 313, + 620, + 397, + 634 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 620, + 397, + 634 + ], + "spans": [ + { + "bbox": [ + 313, + 620, + 397, + 634 + ], + "type": "text", + "content": "3. Methodology" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 642, + 556, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 642, + 556, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 642, + 556, + 715 + ], + "type": "text", + "content": "Our goal is to align forgery and visual semantic concept spaces using reconstruction techniques for robust and generalizable synthetic image detection. To achieve this, we introduce a fine-grained model named Semantic Discrepancy-aware Detector (SDD). Building on prior works, we harness the generalization capability of vision-language mod" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "18390" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 54, + 72, + 294, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 72, + 294, + 156 + ], + "spans": [ + { + "bbox": [ + 54, + 72, + 294, + 156 + ], + "type": "text", + "content": "els. Semantic concept space: The ideal joint embedding space of images and texts with four properties: semantic alignment, modality invariance, locality consistency, and structure preservation. Forgery space: The ideal space covers forgery traces. Notably, we derive semantic concept space via a vision language model pretrained solely on real images; thus we treat the two spaces as independent." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 157, + 295, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 157, + 295, + 300 + ], + "spans": [ + { + "bbox": [ + 55, + 157, + 295, + 300 + ], + "type": "text", + "content": "First, the Semantic Tokens Sampling (STS) module utilizes Jensen-Shannon (JS) divergence to sample semantic patch tokens, which serve as a transitional bridge, facilitating model to establish the association between real and forgery images accurately. Next, the Concept-level forgery Discrepancy Learning (CFDL) module employs reconstruction learning to explore the forgery discrepancies within the visual semantic concept space, which focuses on identifying subtle variations between reconstructed forgery features. Finally, the reconstruction difference map is trained with Low-level forgery Feature Enhancement module, which aims to refine forgery features with more visual details." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 307, + 205, + 319 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 307, + 205, + 319 + ], + "spans": [ + { + "bbox": [ + 55, + 307, + 205, + 319 + ], + "type": "text", + "content": "3.1. Semantic Tokens Sampling" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 324, + 296, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 324, + 296, + 491 + ], + "spans": [ + { + "bbox": [ + 55, + 324, + 296, + 491 + ], + "type": "text", + "content": "Initially, we considered directly aligning the visual semantic concept space and forgery space by leveraging fine-grained reconstruction learning to model real and fake semantic distributions. However, this strategy would treat the differences in features unrelated to semantics and forgery as crucial factors for identifying image's authenticity. To eliminate these redundant features, we sample real semantic image patch tokens as visual clues to bridge real and forged semantic domains. This module enables the model to focus on concept-related forgery traces and highlight the distinctions between real and fake images. In a tangible way, the image encoder of CLIP: ViT-L/14 is adapted to transform a real image " + }, + { + "bbox": [ + 55, + 324, + 296, + 491 + ], + "type": "inline_equation", + "content": "x_{r}" + }, + { + "bbox": [ + 55, + 324, + 296, + 491 + ], + "type": "text", + "content": " into a set of features " + }, + { + "bbox": [ + 55, + 324, + 296, + 491 + ], + "type": "inline_equation", + "content": "f_{r}" + }, + { + "bbox": [ + 55, + 324, + 296, + 491 + ], + "type": "text", + "content": ", without the image CLS token. We define the transformation as:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 149, + 501, + 295, + 514 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 501, + 295, + 514 + ], + "spans": [ + { + "bbox": [ + 149, + 501, + 295, + 514 + ], + "type": "interline_equation", + "content": "f _ {r} = \\phi (x _ {r}), \\tag {1}", + "image_path": "f927597f238a779d31e5a487eb53bd3da19e4faf6d892f20d6521945abd9926c.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 521, + 295, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 521, + 295, + 582 + ], + "spans": [ + { + "bbox": [ + 55, + 521, + 295, + 582 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 521, + 295, + 582 + ], + "type": "inline_equation", + "content": "\\phi (\\cdot)" + }, + { + "bbox": [ + 55, + 521, + 295, + 582 + ], + "type": "text", + "content": " is the CLIP:ViT-L/14's visual encoder, " + }, + { + "bbox": [ + 55, + 521, + 295, + 582 + ], + "type": "inline_equation", + "content": "x_{r}\\in" + }, + { + "bbox": [ + 55, + 521, + 295, + 582 + ], + "type": "inline_equation", + "content": "\\mathbb{I}_r^{H\\times W\\times 3}" + }, + { + "bbox": [ + 55, + 521, + 295, + 582 + ], + "type": "text", + "content": " represents a real image characterized by a height of " + }, + { + "bbox": [ + 55, + 521, + 295, + 582 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 55, + 521, + 295, + 582 + ], + "type": "text", + "content": " and a width of " + }, + { + "bbox": [ + 55, + 521, + 295, + 582 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 55, + 521, + 295, + 582 + ], + "type": "text", + "content": ". Besides, " + }, + { + "bbox": [ + 55, + 521, + 295, + 582 + ], + "type": "inline_equation", + "content": "f_{r}\\in \\mathbb{R}^{N\\times D}" + }, + { + "bbox": [ + 55, + 521, + 295, + 582 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 55, + 521, + 295, + 582 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 55, + 521, + 295, + 582 + ], + "type": "text", + "content": " is the number of tokens and " + }, + { + "bbox": [ + 55, + 521, + 295, + 582 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 55, + 521, + 295, + 582 + ], + "type": "text", + "content": " denotes the dimension of each patch token." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 582, + 295, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 582, + 295, + 677 + ], + "spans": [ + { + "bbox": [ + 55, + 582, + 295, + 677 + ], + "type": "text", + "content": "Since integrating all real patch tokens into the image reconstruction module is computationally intensive and memory-consuming, it is urgent to select a suitable subset of these tokens. From a distribution perspective, the Jensen-Shannon (JS) divergence, derived from the Kullback-Leibler divergence [48], is a symmetric and finite metric that can effectively measure the similarity between tokens by quantifying differences in their distributions." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 677, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 677, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 677, + 296, + 714 + ], + "type": "text", + "content": "To calculate the JS divergence between two tokens, both are converted into computable probability distribution space using the softmax function. Let " + }, + { + "bbox": [ + 55, + 677, + 296, + 714 + ], + "type": "inline_equation", + "content": "f_{s} \\in \\mathbb{F}_{r}^{M \\times D}" + }, + { + "bbox": [ + 55, + 677, + 296, + 714 + ], + "type": "text", + "content": " be the selected" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 72, + 553, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 553, + 167 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 553, + 167 + ], + "type": "text", + "content": "semantic patch tokens with the num of tokens " + }, + { + "bbox": [ + 313, + 72, + 553, + 167 + ], + "type": "inline_equation", + "content": "M = 1 / \\delta" + }, + { + "bbox": [ + 313, + 72, + 553, + 167 + ], + "type": "text", + "content": " and dimension " + }, + { + "bbox": [ + 313, + 72, + 553, + 167 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 313, + 72, + 553, + 167 + ], + "type": "text", + "content": " in terms of sampling rate " + }, + { + "bbox": [ + 313, + 72, + 553, + 167 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 313, + 72, + 553, + 167 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 313, + 72, + 553, + 167 + ], + "type": "inline_equation", + "content": "0 \\leq \\delta \\leq 1" + }, + { + "bbox": [ + 313, + 72, + 553, + 167 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 72, + 553, + 167 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 313, + 72, + 553, + 167 + ], + "type": "text", + "content": " is user-defined). Once the initial token " + }, + { + "bbox": [ + 313, + 72, + 553, + 167 + ], + "type": "inline_equation", + "content": "\\tilde{r}" + }, + { + "bbox": [ + 313, + 72, + 553, + 167 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 72, + 553, + 167 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 313, + 72, + 553, + 167 + ], + "type": "text", + "content": " are determined, the JS divergence between " + }, + { + "bbox": [ + 313, + 72, + 553, + 167 + ], + "type": "inline_equation", + "content": "\\tilde{r}" + }, + { + "bbox": [ + 313, + 72, + 553, + 167 + ], + "type": "text", + "content": " and other tokens " + }, + { + "bbox": [ + 313, + 72, + 553, + 167 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 313, + 72, + 553, + 167 + ], + "type": "text", + "content": " falls within the range [0, 1]. Subsequently, the sampling interval is split into " + }, + { + "bbox": [ + 313, + 72, + 553, + 167 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 313, + 72, + 553, + 167 + ], + "type": "text", + "content": " equal segments with one token selected from each segment. As a consequence, the semantic tokens sampling module can be formulated as:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 332, + 170, + 433, + 201 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 332, + 170, + 433, + 201 + ], + "spans": [ + { + "bbox": [ + 332, + 170, + 433, + 201 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} f _ {s} = \\mathcal {S} \\left(\\mathbb {R} ^ {N \\times D}, \\delta\\right) \\\\ = A _ {c} ^ {N _ {a} \\times M} \\times \\mathbb {R} ^ {N \\times D}, \\\\ \\end{array}", + "image_path": "b1673ef70479f64400aa62e1a90168013dfe36d23dbc75d23f8eb56c258378aa.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 324, + 203, + 553, + 230 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 203, + 553, + 230 + ], + "spans": [ + { + "bbox": [ + 324, + 203, + 553, + 230 + ], + "type": "text", + "content": "s.t. JS(softmax " + }, + { + "bbox": [ + 324, + 203, + 553, + 230 + ], + "type": "inline_equation", + "content": "(\\tilde{r})" + }, + { + "bbox": [ + 324, + 203, + 553, + 230 + ], + "type": "text", + "content": " ,softmax " + }, + { + "bbox": [ + 324, + 203, + 553, + 230 + ], + "type": "inline_equation", + "content": "(r)) = \\frac{i}{M}" + }, + { + "bbox": [ + 324, + 203, + 553, + 230 + ], + "type": "text", + "content": " , if " + }, + { + "bbox": [ + 324, + 203, + 553, + 230 + ], + "type": "inline_equation", + "content": "a_{ij} = 1" + }, + { + "bbox": [ + 324, + 203, + 553, + 230 + ], + "type": "text", + "content": " (2)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 343, + 228, + 485, + 262 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 343, + 228, + 485, + 262 + ], + "spans": [ + { + "bbox": [ + 343, + 228, + 485, + 262 + ], + "type": "interline_equation", + "content": "\\sum_ {i = 0} ^ {N _ {a}} \\sum_ {j = 0} ^ {M} a _ {i j} = M; \\sum_ {j = 0} ^ {M} a _ {i j} = 1 \\text {o r} 0,", + "image_path": "22131de446311ff01b584ce196be7d780869ef135d597925e22a2e48d3b6a0c3.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 341, + 264, + 465, + 277 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 341, + 264, + 465, + 277 + ], + "spans": [ + { + "bbox": [ + 341, + 264, + 465, + 277 + ], + "type": "interline_equation", + "content": "i = 1, \\dots , N _ {a}; j = 1, \\dots , M,", + "image_path": "45ffcb7cbdb7365aeb1d69b12ccd78f601b1f42cd109261d8ec31750b7a6add6.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 283, + 555, + 428 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 283, + 555, + 428 + ], + "spans": [ + { + "bbox": [ + 313, + 283, + 555, + 428 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 283, + 555, + 428 + ], + "type": "inline_equation", + "content": "S(\\cdot, \\cdot)" + }, + { + "bbox": [ + 313, + 283, + 555, + 428 + ], + "type": "text", + "content": " represents the sampling process. " + }, + { + "bbox": [ + 313, + 283, + 555, + 428 + ], + "type": "inline_equation", + "content": "A_{c}^{N_{a} \\times M}" + }, + { + "bbox": [ + 313, + 283, + 555, + 428 + ], + "type": "text", + "content": " is a constraint matrix of size " + }, + { + "bbox": [ + 313, + 283, + 555, + 428 + ], + "type": "inline_equation", + "content": "N_{a} \\times M" + }, + { + "bbox": [ + 313, + 283, + 555, + 428 + ], + "type": "text", + "content": " whose element " + }, + { + "bbox": [ + 313, + 283, + 555, + 428 + ], + "type": "inline_equation", + "content": "a_{ij}" + }, + { + "bbox": [ + 313, + 283, + 555, + 428 + ], + "type": "text", + "content": " is constrained to the binary pattern of " + }, + { + "bbox": [ + 313, + 283, + 555, + 428 + ], + "type": "inline_equation", + "content": "\\{0, 1\\}" + }, + { + "bbox": [ + 313, + 283, + 555, + 428 + ], + "type": "text", + "content": ". Here JS " + }, + { + "bbox": [ + 313, + 283, + 555, + 428 + ], + "type": "inline_equation", + "content": "(\\cdot)" + }, + { + "bbox": [ + 313, + 283, + 555, + 428 + ], + "type": "text", + "content": " refers to the Jensen-Shannon divergence, " + }, + { + "bbox": [ + 313, + 283, + 555, + 428 + ], + "type": "inline_equation", + "content": "N_{a}" + }, + { + "bbox": [ + 313, + 283, + 555, + 428 + ], + "type": "text", + "content": " denotes the total number of real image patch tokens sampled from the training dataset of UnivFD and " + }, + { + "bbox": [ + 313, + 283, + 555, + 428 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 313, + 283, + 555, + 428 + ], + "type": "text", + "content": " represents the required subset size. The softmax " + }, + { + "bbox": [ + 313, + 283, + 555, + 428 + ], + "type": "inline_equation", + "content": "(\\cdot)" + }, + { + "bbox": [ + 313, + 283, + 555, + 428 + ], + "type": "text", + "content": " is the softmax function. The sampling tokens help the reconstruction module avoid becoming biased towards any particular forgery-unrelated distribution. Meanwhile, it avoids the semantic bias often introduced by text prompts, since the tokens are evenly distributed in a unified CLIP space." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 433, + 542, + 446 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 433, + 542, + 446 + ], + "spans": [ + { + "bbox": [ + 313, + 433, + 542, + 446 + ], + "type": "text", + "content": "3.2. Concept-level Forged Discrepancy Learning" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 450, + 555, + 618 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 450, + 555, + 618 + ], + "spans": [ + { + "bbox": [ + 313, + 450, + 555, + 618 + ], + "type": "text", + "content": "A few words alone can hardly paint a picture. We argue that the fine-grained visual details can uncover more forgery traces concealed in the images. As such, we mix sampling tokens with extracted features and capitalize on reconstruction learning to compensate for the omission of forgery traces brought by coarse prompts. As previous work [26] has demonstrated that the pre-trained vision-language model necessitates fine-tuning to adapt to the forgery detection task. Therefore, we integrate LoRA [17] with the CLIP-ViT model to capture discriminative forgery features by making use of the bread semantic concepts. This method, denoted as LoRA-CLIP [54], is more streamlined and flexible. Given an input image " + }, + { + "bbox": [ + 313, + 450, + 555, + 618 + ], + "type": "inline_equation", + "content": "\\mathcal{I} \\in \\mathbb{I}^{H \\times W \\times 3}" + }, + { + "bbox": [ + 313, + 450, + 555, + 618 + ], + "type": "text", + "content": ", we can get high-level visual features " + }, + { + "bbox": [ + 313, + 450, + 555, + 618 + ], + "type": "inline_equation", + "content": "V_{H}" + }, + { + "bbox": [ + 313, + 450, + 555, + 618 + ], + "type": "text", + "content": ", as follows:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 396, + 623, + 553, + 636 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 396, + 623, + 553, + 636 + ], + "spans": [ + { + "bbox": [ + 396, + 623, + 553, + 636 + ], + "type": "interline_equation", + "content": "V _ {H} = \\mathcal {F} _ {L o R A} (\\mathcal {I}). \\tag {3}", + "image_path": "314740cadca648129e1efc3dc3692b99cf4e100e085964874ef8ba845ddc990a.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 641, + 555, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 641, + 555, + 712 + ], + "spans": [ + { + "bbox": [ + 313, + 641, + 555, + 712 + ], + "type": "text", + "content": "Here " + }, + { + "bbox": [ + 313, + 641, + 555, + 712 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{LoRA}" + }, + { + "bbox": [ + 313, + 641, + 555, + 712 + ], + "type": "text", + "content": " refers to the CLIP image encoder fine-tuned by LoRA. The reconstruction module of CFDL encompasses two submodules, i.e., transformer-based encoder and decoder. Thanks to the transformer's capability of long-range relationship modeling, we capitalize on the multi-head attention (MHA) mechanism, the core mechanism of" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "18391" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 295, + 96 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 295, + 96 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 295, + 96 + ], + "type": "text", + "content": "the transformer, to obtain more discriminative distribution by utilizing contextual information, which is set as:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 66, + 104, + 295, + 179 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 104, + 295, + 179 + ], + "spans": [ + { + "bbox": [ + 66, + 104, + 295, + 179 + ], + "type": "interline_equation", + "content": "\\left\\{ \\begin{array}{r l} \\operatorname {h e a d} _ {i} & = \\operatorname {A t t n} \\left(Q W _ {i} ^ {Q}, K W _ {i} ^ {K}, V W _ {i} ^ {V}\\right) \\\\ & = \\operatorname {S o f t m a x} \\left(\\frac {Q W _ {i} ^ {Q} \\left(K W _ {i} ^ {K}\\right) ^ {\\top}}{\\sqrt {d}}\\right) V W _ {i} ^ {V}, \\\\ \\operatorname {M H A} (Q, K, V) & = \\operatorname {C o n c a t} \\left(\\operatorname {h e a d} _ {1}, \\dots , \\operatorname {h e a d} _ {h}\\right) W ^ {O}, \\end{array} \\right. \\tag {4}", + "image_path": "c175dbe07601fc84121dccf61a36fc2ea1d94595dba67b1068d5f85ac84ce077.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 180, + 296, + 263 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 180, + 296, + 263 + ], + "spans": [ + { + "bbox": [ + 55, + 180, + 296, + 263 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 180, + 296, + 263 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 55, + 180, + 296, + 263 + ], + "type": "text", + "content": " (Query), " + }, + { + "bbox": [ + 55, + 180, + 296, + 263 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 55, + 180, + 296, + 263 + ], + "type": "text", + "content": " (Key), " + }, + { + "bbox": [ + 55, + 180, + 296, + 263 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 55, + 180, + 296, + 263 + ], + "type": "text", + "content": " (Value) refer to the input, " + }, + { + "bbox": [ + 55, + 180, + 296, + 263 + ], + "type": "inline_equation", + "content": "W_{i}^{Q}, W_{i}^{K}, W_{i}^{V}" + }, + { + "bbox": [ + 55, + 180, + 296, + 263 + ], + "type": "text", + "content": " separately denote the corresponding weights of linear projection, " + }, + { + "bbox": [ + 55, + 180, + 296, + 263 + ], + "type": "inline_equation", + "content": "\\mathrm{Attn}(\\cdot)" + }, + { + "bbox": [ + 55, + 180, + 296, + 263 + ], + "type": "text", + "content": " denotes the function of the scaled dot product, Softmax is the softmax function, " + }, + { + "bbox": [ + 55, + 180, + 296, + 263 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 55, + 180, + 296, + 263 + ], + "type": "text", + "content": " refers to the dimension of input, Concat (" + }, + { + "bbox": [ + 55, + 180, + 296, + 263 + ], + "type": "inline_equation", + "content": "\\cdot" + }, + { + "bbox": [ + 55, + 180, + 296, + 263 + ], + "type": "text", + "content": ") represents the concatenation used to stitch the discrete attention outputs of head " + }, + { + "bbox": [ + 55, + 180, + 296, + 263 + ], + "type": "inline_equation", + "content": "1 \\sim h" + }, + { + "bbox": [ + 55, + 180, + 296, + 263 + ], + "type": "text", + "content": " together." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 264, + 296, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 264, + 296, + 312 + ], + "spans": [ + { + "bbox": [ + 55, + 264, + 296, + 312 + ], + "type": "text", + "content": "To amplify the discrepancy between a fake image and its reconstructed counterpart, the sampled visual clues are employed for the initial processing by the encoder. The encoder's process can be formulated as follows:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 110, + 322, + 294, + 335 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 322, + 294, + 335 + ], + "spans": [ + { + "bbox": [ + 110, + 322, + 294, + 335 + ], + "type": "interline_equation", + "content": "R _ {1} = \\operatorname {L N} \\left(\\mathrm {M H A} \\left(f _ {s}, V _ {H}, V _ {H}\\right)\\right), \\tag {5}", + "image_path": "df6b3ec424ab3677b5fc6a432a0f5df2401f7f07dfd5de79679080faf3fd1550.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 337, + 294, + 350 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 337, + 294, + 350 + ], + "spans": [ + { + "bbox": [ + 111, + 337, + 294, + 350 + ], + "type": "interline_equation", + "content": "R _ {2} = \\operatorname {L N} \\left(\\mathrm {M H A} \\left(R _ {1}, V _ {H}, V _ {H}\\right)\\right), \\tag {6}", + "image_path": "e8a28d88ab4d834415bcb0a67ba5a44365295d5ea0021622d7905beab46893db.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 359, + 296, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 359, + 296, + 418 + ], + "spans": [ + { + "bbox": [ + 55, + 359, + 296, + 418 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 359, + 296, + 418 + ], + "type": "inline_equation", + "content": "\\mathrm{LN}(\\cdot)" + }, + { + "bbox": [ + 55, + 359, + 296, + 418 + ], + "type": "text", + "content": " denotes the Layer Normalization. Then, the encoder's outputs used as queries are injected into the decoder to get the final reconstructed features, which are similar to the encoder process and perform the following operation:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 429, + 294, + 441 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 429, + 294, + 441 + ], + "spans": [ + { + "bbox": [ + 111, + 429, + 294, + 441 + ], + "type": "interline_equation", + "content": "R _ {3} = \\mathrm {L N} (\\mathrm {M H A} (R _ {2}, R _ {2}, R _ {2})), \\tag {7}", + "image_path": "aa92fde74c974d56acdb710d5503650cf873df09f827e5f18f01f71635b0971c.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 113, + 444, + 294, + 456 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 444, + 294, + 456 + ], + "spans": [ + { + "bbox": [ + 113, + 444, + 294, + 456 + ], + "type": "interline_equation", + "content": "R _ {e} = \\mathrm {L N} (\\mathrm {M H A} \\left(R _ {1}, R _ {3}, R _ {3}\\right)). \\tag {8}", + "image_path": "41c58f4ee86a40e85c45405934de624b27c459aacdb4aa89d0900ca8f56d104b.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 466, + 296, + 512 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 466, + 296, + 512 + ], + "spans": [ + { + "bbox": [ + 55, + 466, + 296, + 512 + ], + "type": "text", + "content": "During the reconstruction process, we just calculate the reconstruction loss " + }, + { + "bbox": [ + 55, + 466, + 296, + 512 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_r" + }, + { + "bbox": [ + 55, + 466, + 296, + 512 + ], + "type": "text", + "content": " between the real input features and their reconstructed counterparts " + }, + { + "bbox": [ + 55, + 466, + 296, + 512 + ], + "type": "inline_equation", + "content": "\\mathcal{R}_e" + }, + { + "bbox": [ + 55, + 466, + 296, + 512 + ], + "type": "text", + "content": " within a mini-batch as follows:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 116, + 511, + 295, + 543 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 511, + 295, + 543 + ], + "spans": [ + { + "bbox": [ + 116, + 511, + 295, + 543 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {r} = \\frac {1}{B} \\sum_ {i = 0} ^ {B} \\operatorname {M S E} \\left(R _ {e}, V _ {H}\\right), \\tag {9}", + "image_path": "be3a4806567dafc287eefb6cd45fc30d8fbe79ce80ec399304f6c863e40b472b.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 55, + 548, + 296, + 621 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 548, + 296, + 621 + ], + "spans": [ + { + "bbox": [ + 55, + 548, + 296, + 621 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 548, + 296, + 621 + ], + "type": "inline_equation", + "content": "\\mathrm{MSE}(\\cdot ,\\cdot)" + }, + { + "bbox": [ + 55, + 548, + 296, + 621 + ], + "type": "text", + "content": " is mean squared error. Facilitating " + }, + { + "bbox": [ + 55, + 548, + 296, + 621 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_r" + }, + { + "bbox": [ + 55, + 548, + 296, + 621 + ], + "type": "text", + "content": " encourages preserving the completeness and richness of the visual semantic concept space and highlighting the concept-related forgery features. Given the reconstructed features " + }, + { + "bbox": [ + 55, + 548, + 296, + 621 + ], + "type": "inline_equation", + "content": "R_{f}" + }, + { + "bbox": [ + 55, + 548, + 296, + 621 + ], + "type": "text", + "content": " and the original feature " + }, + { + "bbox": [ + 55, + 548, + 296, + 621 + ], + "type": "inline_equation", + "content": "f_{r}" + }, + { + "bbox": [ + 55, + 548, + 296, + 621 + ], + "type": "text", + "content": ", the reconstruction difference map can be formally expressed as:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 139, + 630, + 295, + 643 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 630, + 295, + 643 + ], + "spans": [ + { + "bbox": [ + 139, + 630, + 295, + 643 + ], + "type": "interline_equation", + "content": "\\mathcal {D} _ {s} = \\left| R _ {f} - f _ {r} \\right|, \\tag {10}", + "image_path": "fdbca3dba04bcb0edfb29513e8d25a6eab435c19726612566d32df084f949629.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 55, + 652, + 239, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 652, + 239, + 664 + ], + "spans": [ + { + "bbox": [ + 55, + 652, + 239, + 664 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 652, + 239, + 664 + ], + "type": "inline_equation", + "content": "|\\cdot |" + }, + { + "bbox": [ + 55, + 652, + 239, + 664 + ], + "type": "text", + "content": " denotes the absolute value function." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 55, + 672, + 270, + 685 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 672, + 270, + 685 + ], + "spans": [ + { + "bbox": [ + 55, + 672, + 270, + 685 + ], + "type": "text", + "content": "3.3. Low-level Forgery Feature Enhancement" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "type": "text", + "content": "Existing methods based on pre-trained vision-language models [26, 32] overlook the importance of concept" + } + ] + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 358, + 72, + 512, + 196 + ], + "blocks": [ + { + "bbox": [ + 358, + 72, + 512, + 196 + ], + "lines": [ + { + "bbox": [ + 358, + 72, + 512, + 196 + ], + "spans": [ + { + "bbox": [ + 358, + 72, + 512, + 196 + ], + "type": "image", + "image_path": "de568ffeb00d073298b5f7dc1c0e06b896ff4b86f32ec8d1e4c74be043a2f038.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 202, + 553, + 236 + ], + "lines": [ + { + "bbox": [ + 313, + 202, + 553, + 236 + ], + "spans": [ + { + "bbox": [ + 313, + 202, + 553, + 236 + ], + "type": "text", + "content": "Figure 4. The curve of exponential inverse. In the \"fast\" interval, the value drops sharply. In the \"low\" interval, the curve flattens out, showing a decay towards 0." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 243, + 555, + 447 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 243, + 555, + 447 + ], + "spans": [ + { + "bbox": [ + 313, + 243, + 555, + 447 + ], + "type": "text", + "content": "weakly-related features. We believe that a thorough alignment between the visual semantic concept space and the forgery space should include the exploration of concept weakly-related forgery features. To eliminate redundant forgery features, we come up with a novel feature enhancement that refines low-level forgery features. Empowered by the reconstruction difference map, our detector orchestrates the extraction of multi-scale features with exceptional robustness and markedly enhanced effectiveness. As shown in Fig. 3, the enhancer follows the typical architecture of a convolutional network. It involves the repeated application of convolutions, each followed by a batch normalization (BN) and a rectified linear unit (ReLU). For a given stage " + }, + { + "bbox": [ + 313, + 243, + 555, + 447 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 313, + 243, + 555, + 447 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 243, + 555, + 447 + ], + "type": "inline_equation", + "content": "F(n)" + }, + { + "bbox": [ + 313, + 243, + 555, + 447 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 313, + 243, + 555, + 447 + ], + "type": "inline_equation", + "content": "N = 1,2,3" + }, + { + "bbox": [ + 313, + 243, + 555, + 447 + ], + "type": "text", + "content": ") corresponds to its output features. Then, We deconvolve the semantic difference map " + }, + { + "bbox": [ + 313, + 243, + 555, + 447 + ], + "type": "inline_equation", + "content": "D_{s}" + }, + { + "bbox": [ + 313, + 243, + 555, + 447 + ], + "type": "text", + "content": " to the shape same as " + }, + { + "bbox": [ + 313, + 243, + 555, + 447 + ], + "type": "inline_equation", + "content": "F(N)" + }, + { + "bbox": [ + 313, + 243, + 555, + 447 + ], + "type": "text", + "content": " and perform pixel-wise multiplication with " + }, + { + "bbox": [ + 313, + 243, + 555, + 447 + ], + "type": "inline_equation", + "content": "F(n)" + }, + { + "bbox": [ + 313, + 243, + 555, + 447 + ], + "type": "text", + "content": " to get " + }, + { + "bbox": [ + 313, + 243, + 555, + 447 + ], + "type": "inline_equation", + "content": "F'(n)" + }, + { + "bbox": [ + 313, + 243, + 555, + 447 + ], + "type": "text", + "content": " as:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 371, + 456, + 553, + 471 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 371, + 456, + 553, + 471 + ], + "spans": [ + { + "bbox": [ + 371, + 456, + 553, + 471 + ], + "type": "interline_equation", + "content": "F ^ {\\prime} (n) = \\operatorname {d e c o n v} (F (n)) \\otimes D _ {s}, \\tag {11}", + "image_path": "08999378248378ab40d4ad095d40cc5d59f086f722d8b22410f8d387efc09933.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 480, + 554, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 480, + 554, + 552 + ], + "spans": [ + { + "bbox": [ + 313, + 480, + 554, + 552 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 480, + 554, + 552 + ], + "type": "inline_equation", + "content": "\\otimes" + }, + { + "bbox": [ + 313, + 480, + 554, + 552 + ], + "type": "text", + "content": " is the element-wise multiplication, deconv " + }, + { + "bbox": [ + 313, + 480, + 554, + 552 + ], + "type": "inline_equation", + "content": "(\\cdot)" + }, + { + "bbox": [ + 313, + 480, + 554, + 552 + ], + "type": "text", + "content": " represents deconvolution operation and " + }, + { + "bbox": [ + 313, + 480, + 554, + 552 + ], + "type": "inline_equation", + "content": "\\mathcal{F}'(n)" + }, + { + "bbox": [ + 313, + 480, + 554, + 552 + ], + "type": "text", + "content": " is the low-level feature aggregated with semantic information. To further enhance the reliability of the extracted features, we compute an adaptive weight coefficient " + }, + { + "bbox": [ + 313, + 480, + 554, + 552 + ], + "type": "inline_equation", + "content": "\\frac{1}{e_n}" + }, + { + "bbox": [ + 313, + 480, + 554, + 552 + ], + "type": "text", + "content": " to indicate the importance of " + }, + { + "bbox": [ + 313, + 480, + 554, + 552 + ], + "type": "inline_equation", + "content": "D_{s}" + }, + { + "bbox": [ + 313, + 480, + 554, + 552 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 313, + 480, + 554, + 552 + ], + "type": "inline_equation", + "content": "F(n)" + }, + { + "bbox": [ + 313, + 480, + 554, + 552 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 391, + 560, + 553, + 586 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 391, + 560, + 553, + 586 + ], + "spans": [ + { + "bbox": [ + 391, + 560, + 553, + 586 + ], + "type": "interline_equation", + "content": "\\frac {1}{e _ {n}} = \\frac {1}{e ^ {\\left| F ^ {\\prime} (n) - F (n) \\right|}}. \\tag {12}", + "image_path": "2a8f6ac00182802ba9ac88bd0885ef01cecad3c4c78b9b298c6bbe26df378045.jpg" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 313, + 594, + 555, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 594, + 555, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 594, + 555, + 714 + ], + "type": "text", + "content": "Here we explain the role of the exponential inverse through Fig. 4. As " + }, + { + "bbox": [ + 313, + 594, + 555, + 714 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 313, + 594, + 555, + 714 + ], + "type": "text", + "content": " grows large, the curve of " + }, + { + "bbox": [ + 313, + 594, + 555, + 714 + ], + "type": "inline_equation", + "content": "e^x" + }, + { + "bbox": [ + 313, + 594, + 555, + 714 + ], + "type": "text", + "content": " becomes flatter. Therefore, in the \"fast\" interval, forgery features with a significant divergence from the semantic difference map will be assigned smaller weights, which mobilizes the network to capture concept strongly-related features. However, in the \"low\" interval, features strongly associated with forgery can avoid being misguided by semantic concepts, which indicates that the order of importance is reversed. Next, we have the attended output features " + }, + { + "bbox": [ + 313, + 594, + 555, + 714 + ], + "type": "inline_equation", + "content": "F_{low}" + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "18392" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 58, + 70, + 555, + 205 + ], + "blocks": [ + { + "bbox": [ + 58, + 70, + 555, + 205 + ], + "lines": [ + { + "bbox": [ + 58, + 70, + 555, + 205 + ], + "spans": [ + { + "bbox": [ + 58, + 70, + 555, + 205 + ], + "type": "table", + "html": "
MethodsRefGANDeep fakesLow levelPerceptual lossGuidedLDMGlideDallemAP
Pro-GANCycle-GANBig-GANStyle-GANGau-GANStar-GANSITDSANCRNIMLE200Steps200w/cfg100Steps100w/ CFG10027502710
CNN-SpotCVPR2020100.093.4784.5099.5489.4998.1589.0273.7559.4798.2498.4073.7270.6271.0070.5480.6584.9182.0770.5983.58
PatchForECCV202080.8872.8471.6685.7565.9969.2576.5576.1976.3474.5268.5275.0387.1086.7286.4085.3783.7378.3875.6777.73
Co-occurrenceElect.Imag.99.7480.9550.6198.6353.1167.9959.1468.9860.4273.0687.2170.2091.2189.0292.3989.3288.3582.7980.9678.11
Freq-specWIFS201955.39100.075.0855.1166.08100.045.1847.4657.1253.6150.9857.7277.7277.2576.4768.5864.5861.9267.7766.21
DireICCV2023100.083.5981.5096.5081.7099.8895.7362.5169.9897.3198.6279.5375.5273.4276.4586.2889.0088.3451.3583.54
UnivFDCVPR2023100.099.4699.5997.2499.9899.6082.4561.3279.0296.7299.0087.7799.1492.1599.1794.7495.3494.5797.1593.38
NPRCVPR2024100.099.5096.5099.8096.80100.092.2073.1078.7087.2064.8065.8099.8099.8099.8099.7099.8099.8098.6092.19
FatFormerCVPR2024100.0100.099.9899.75100.0100.097.9997.9481.2399.8499.9391.9299.8399.2299.8999.2799.5099.3399.8498.18
Ours100.099.7799.9399.4899.9899.9797.2397.9193.1099.7999.9692.0699.8898.9599.9298.0698.2997.7399.8198.51
", + "image_path": "87d0f709d3c5beebf16fa5d4102d64bc12877c31ade452d4291d5c56b5d7b629.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 57, + 257, + 555, + 393 + ], + "blocks": [ + { + "bbox": [ + 55, + 213, + 555, + 247 + ], + "lines": [ + { + "bbox": [ + 55, + 213, + 555, + 247 + ], + "spans": [ + { + "bbox": [ + 55, + 213, + 555, + 247 + ], + "type": "text", + "content": "Table 1. Average precision comparisons with different methods on the UnivFD dataset. We replicate the results of CNNSpot, Patchfor, Co-occurrence, Freq-spec, and UnivFD from the paper[32]. In addition, we obtained the results for Dire, NPR and FatFormer using either the official pre-trained models or our re-implemented versions. Red and underline indicates the best and the second best result, respectively." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 57, + 257, + 555, + 393 + ], + "lines": [ + { + "bbox": [ + 57, + 257, + 555, + 393 + ], + "spans": [ + { + "bbox": [ + 57, + 257, + 555, + 393 + ], + "type": "table", + "html": "
MethodsRefGANDeep fakesLow levelPerceptual lossGuidedLDMGlideDalleAvg-acc
Pro-GANCycle-GANBig-GANStyle-GANGau-GANStar-GANSITDSANCRNIMLE200 Steps200 wcfg100 Steps100 2750 27100 10
CNN-SpotCVPR202099.9985.2070.2085.7078.9591.7053.4766.6748.6986.3186.2660.0754.0354.9654.1460.7863.8065.6655.5869.58
PatchForECCV202075.0368.9768.4779.1664.2363.9475.5475.1475.2872.3355.3067.4176.5076.1075.7774.8173.2868.5267.9171.24
Co-occurrenceElect.Imag.97.7063.1553.7592.5051.1054.7057.1063.0655.8565.6565.8060.5070.7070.5571.0070.2569.6069.9067.5566.86
Freq-specWIFS201949.9099.9050.5049.9050.3099.7050.1050.0048.0050.6050.1050.9050.4050.4050.3051.7051.4050.4050.0055.45
DireICCV202399.8673.4760.6872.3965.1593.6088.8652.7856.3990.0794.0561.0559.3559.9560.6569.3072,7071.0052.7571.19
UnivFDCVPR2023100.098.5094.5082.0099.5097.0066.6063.0057.5059.5072.0070.0394.1973.7694.3679.0779.8578.1486.7881.38
NPRCVPR202499.8092.0089.5096.3087.6099.7079.4061.4070.6074.5057.1055.2397.4098.7097.9097.0097.9097.0088.8086.20
FatFormerCVPR202499.8999.3699.5097.1299.4399.7593.2581.3968.0469.4769.4776.0098.5594.8598.6094.3094.6094.1598.7090.86
Ours99.8895.7696.7098.0898.4699.1791.8283.6177.4595.4096.4779.5598.0594.6098.2592.2093.3591.8098.0093.61
", + "image_path": "2ce8a5a892e90254564779c552ba0bbf2b349de34e09a0b5faf6cd58b3ff19c3.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 161, + 401, + 447, + 412 + ], + "lines": [ + { + "bbox": [ + 161, + 401, + 447, + 412 + ], + "spans": [ + { + "bbox": [ + 161, + 401, + 447, + 412 + ], + "type": "text", + "content": "Table 2. Accuracy comparisons with different methods on the UnivFD dataset." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 55, + 420, + 167, + 431 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 420, + 167, + 431 + ], + "spans": [ + { + "bbox": [ + 55, + 420, + 167, + 431 + ], + "type": "text", + "content": "by the residual connection:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 116, + 437, + 294, + 464 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 437, + 294, + 464 + ], + "spans": [ + { + "bbox": [ + 116, + 437, + 294, + 464 + ], + "type": "interline_equation", + "content": "F _ {l o w} (n) = F ^ {\\prime} (n) + \\frac {F ^ {\\prime} (n)}{e _ {n}}. \\tag {13}", + "image_path": "991a2b93fcf68991da2ea441485030fb0ba0c01a67a96ef081cd220d9243405b.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 463, + 295, + 500 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 463, + 295, + 500 + ], + "spans": [ + { + "bbox": [ + 55, + 463, + 295, + 500 + ], + "type": "text", + "content": "For optimizing the anchor features " + }, + { + "bbox": [ + 55, + 463, + 295, + 500 + ], + "type": "inline_equation", + "content": "\\tilde{f}_a" + }, + { + "bbox": [ + 55, + 463, + 295, + 500 + ], + "type": "text", + "content": " of the enhancer, the following triplet loss [35] is employed to bring positive samples " + }, + { + "bbox": [ + 55, + 463, + 295, + 500 + ], + "type": "inline_equation", + "content": "f_{p}" + }, + { + "bbox": [ + 55, + 463, + 295, + 500 + ], + "type": "text", + "content": " closer while pushing negative samples " + }, + { + "bbox": [ + 55, + 463, + 295, + 500 + ], + "type": "inline_equation", + "content": "f_{n}" + }, + { + "bbox": [ + 55, + 463, + 295, + 500 + ], + "type": "text", + "content": " apart:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 78, + 505, + 294, + 520 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 505, + 294, + 520 + ], + "spans": [ + { + "bbox": [ + 78, + 505, + 294, + 520 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {t r i} = \\max \\left(0, d \\left(f _ {p}, f _ {a}\\right) - d \\left(f _ {n}, f _ {a}\\right) + \\alpha\\right), \\tag {14}", + "image_path": "53d151e6cf2f196bee8193296abb356b2ebb4787ee0a1cf02479e20ffd8cb315.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 525, + 295, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 525, + 295, + 548 + ], + "spans": [ + { + "bbox": [ + 55, + 525, + 295, + 548 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 525, + 295, + 548 + ], + "type": "inline_equation", + "content": "d(\\cdot)" + }, + { + "bbox": [ + 55, + 525, + 295, + 548 + ], + "type": "text", + "content": " represents the Euclidean distance between samples and " + }, + { + "bbox": [ + 55, + 525, + 295, + 548 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 55, + 525, + 295, + 548 + ], + "type": "text", + "content": " is the margin." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 548, + 296, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 548, + 296, + 620 + ], + "spans": [ + { + "bbox": [ + 55, + 548, + 296, + 620 + ], + "type": "text", + "content": "On top of that, we concatenate the LoRA-CLIP's CLS token " + }, + { + "bbox": [ + 55, + 548, + 296, + 620 + ], + "type": "inline_equation", + "content": "T_{CLS}" + }, + { + "bbox": [ + 55, + 548, + 296, + 620 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 55, + 548, + 296, + 620 + ], + "type": "inline_equation", + "content": "F_{low}" + }, + { + "bbox": [ + 55, + 548, + 296, + 620 + ], + "type": "text", + "content": " along the same dimension to yield the refined representation " + }, + { + "bbox": [ + 55, + 548, + 296, + 620 + ], + "type": "inline_equation", + "content": "F_{out}" + }, + { + "bbox": [ + 55, + 548, + 296, + 620 + ], + "type": "text", + "content": ". This ensures that forged features exhibit distinctiveness across different semantic identities while preserving their uniformity within similar semantic identities. The process is formulated as:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 627, + 295, + 640 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 627, + 295, + 640 + ], + "spans": [ + { + "bbox": [ + 132, + 627, + 295, + 640 + ], + "type": "interline_equation", + "content": "F _ {o u t} = F _ {l o w} \\| T _ {C L S}, \\tag {15}", + "image_path": "7197ac7b52a1f65c6eedd5a96ff6c2e5c5e892f668e5331b72954d4d14567d12.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 55, + 646, + 296, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 646, + 296, + 693 + ], + "spans": [ + { + "bbox": [ + 55, + 646, + 296, + 693 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 646, + 296, + 693 + ], + "type": "inline_equation", + "content": "F^{out}" + }, + { + "bbox": [ + 55, + 646, + 296, + 693 + ], + "type": "text", + "content": " is strategically integrated with a linear classifier to enable the execution of binary classification. Eventually, the total loss function " + }, + { + "bbox": [ + 55, + 646, + 296, + 693 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 55, + 646, + 296, + 693 + ], + "type": "text", + "content": " of the proposed framework can be defined as:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 116, + 701, + 295, + 714 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 701, + 295, + 714 + ], + "spans": [ + { + "bbox": [ + 116, + 701, + 295, + 714 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\mathcal {L} _ {b c e} + \\lambda_ {1} \\mathcal {L} _ {t r i} + \\lambda_ {2} \\mathcal {L} _ {r}, \\tag {16}", + "image_path": "eeda4d37e8bdf767295153547708777738fe9e4ef89b079c76865f10857cc5f0.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 419, + 555, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 419, + 555, + 456 + ], + "spans": [ + { + "bbox": [ + 313, + 419, + 555, + 456 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 419, + 555, + 456 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{bce}" + }, + { + "bbox": [ + 313, + 419, + 555, + 456 + ], + "type": "text", + "content": " presents the binary cross-entropy loss and " + }, + { + "bbox": [ + 313, + 419, + 555, + 456 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{tri}" + }, + { + "bbox": [ + 313, + 419, + 555, + 456 + ], + "type": "text", + "content": " is triplet loss. " + }, + { + "bbox": [ + 313, + 419, + 555, + 456 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{tri}" + }, + { + "bbox": [ + 313, + 419, + 555, + 456 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 419, + 555, + 456 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_r" + }, + { + "bbox": [ + 313, + 419, + 555, + 456 + ], + "type": "text", + "content": " are scaled by the hyperparameters " + }, + { + "bbox": [ + 313, + 419, + 555, + 456 + ], + "type": "inline_equation", + "content": "\\lambda_{1}" + }, + { + "bbox": [ + 313, + 419, + 555, + 456 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 419, + 555, + 456 + ], + "type": "inline_equation", + "content": "\\lambda_{2}" + }, + { + "bbox": [ + 313, + 419, + 555, + 456 + ], + "type": "text", + "content": ", respectively." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 470, + 395, + 483 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 470, + 395, + 483 + ], + "spans": [ + { + "bbox": [ + 313, + 470, + 395, + 483 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 491, + 425, + 504 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 491, + 425, + 504 + ], + "spans": [ + { + "bbox": [ + 313, + 491, + 425, + 504 + ], + "type": "text", + "content": "4.1. Experiment Setups" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 510, + 555, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 510, + 555, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 510, + 555, + 715 + ], + "type": "text", + "content": "Datasets: We follow the protocol described in [32], using ProGAN's real and fake images as training data. Additionally, we adopt the protocol from [5], where the training data is composed of fake Stable Diffusion v1 images [52] and random real LAION images [41]. The UnivFD dataset [32] cover a broad range of generative models, primarily including GANs and diffusion models, such as ProGAN [18], StyleGAN [19], BigGAN [4], CycleGAN [59], StarGAN [10], GauGAN [46], CRN [9], IMLE [22], SAN [11], SITD [7], DeepFakes [20], Guided [12], Glide [31], LDM [40] and DALL-E [37]. The SynRIS dataset [5] is designed to avoid bias toward any specific topic, theme, or style and contains high-fidelity images generated by text-to-image models, such as Kandinsky2 [38], Kandinsky3 [1], PixArt- " + }, + { + "bbox": [ + 313, + 510, + 555, + 715 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 313, + 510, + 555, + 715 + ], + "type": "text", + "content": " [8], SDXL-DPO [49], SDXL [34], SegMoE [23], SSD-1B [42], Stable-Cascade [33], Segmind-Vega [15], and Würstchen2 [33], Midjourney [29], DALL.E 3 [3]" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "18393" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 65, + 70, + 289, + 302 + ], + "blocks": [ + { + "bbox": [ + 65, + 70, + 289, + 302 + ], + "lines": [ + { + "bbox": [ + 65, + 70, + 289, + 302 + ], + "spans": [ + { + "bbox": [ + 65, + 70, + 289, + 302 + ], + "type": "table", + "html": "
MethodsCNN-SpotFreq-specDireUniv FDNPRFatF ormerFakeIn versionPatch ForOurs
Kandinsky260.057.071.656.297.575.669.953.597.13
Kandinsky365.945.774.961.493.780.174.351.494.8
PixArt-α62.756.481.564.789.575.373.049.690.2
SDXL-DPO84.369.869.970.297.686.088.154.595.5
Segmind-Vega74.265.381.062.397.182.481.153.197.0
SDXL81.461.286.266.396.085.180.763.997.6
Seg-MoE66.354.671.962.093.670.871.349.897.6
SSD-1B72.667.879.862.899.670.179.461.299.8
Stable-Cascade70.562.174.168.297.681.674.957.499.2
Würstchen261.063.374.269.790.972.970.547.298.2
Midjourney63.050.972.459.258.573.666.453.790.0
Playground58.252.367.958.793.181.462.554.192.8
DALL-E371.659.980.848.069.179.275.950.185.9
Average68.658.975.962.390.378.074.553.895.1
", + "image_path": "21d2a7a74f868328d782afdbbed51e059299eb60369d9ada291a02909bf435ec.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 55, + 312, + 295, + 369 + ], + "lines": [ + { + "bbox": [ + 55, + 312, + 295, + 369 + ], + "spans": [ + { + "bbox": [ + 55, + 312, + 295, + 369 + ], + "type": "text", + "content": "Table 3. AUROC comparisons with different methods on the SynRIS dataset. We retrieve the results of CNNSpot, UnivFD, and FakeInversion from [5] and obtain the results for Dire, NPR, Fat-Former, and PatchFor using re-implemented models. Red and underline indicate the best and the second best result, respectively." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 377, + 145, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 377, + 145, + 389 + ], + "spans": [ + { + "bbox": [ + 55, + 377, + 145, + 389 + ], + "type": "text", + "content": "and playground [21]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 389, + 295, + 424 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 389, + 295, + 424 + ], + "spans": [ + { + "bbox": [ + 55, + 389, + 295, + 424 + ], + "type": "text", + "content": "Metrics: As standard evaluation metrics, the average precision (AP), the accuracy (ACC) and AUCROC are considered to measure the effectiveness of different methods." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 426, + 295, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 426, + 295, + 628 + ], + "spans": [ + { + "bbox": [ + 55, + 426, + 295, + 628 + ], + "type": "text", + "content": "Baselines: In our experiments, we perform thorough comparisons with state-of-the-art methods, as follows: 1) CNNSpot [50]: The method relies only on one CNN generator. 2) PatchFor [6]: The method performs detection on a patch level. 3) Co-occurrence [30]: The method converts input images into co-occurrence matrices for classification. 4) Freq-spec [57]: The method employs the frequency spectrum of images. 5) Dire [51]: The method exploits the error between an input image and its reconstruction counterpart. 6) UnivFD [32]: The method uses the pre-trained language-vision model to determine the authenticity of images. 7) NPR [45]: The method captures the generalized artifacts according to the local interdependence among image pixels. 8) FatFormer [26]: The method is aimed at extracting forgery-adaptive features based on UnivFD. 9) Fakeinversion [5]: The method employs text-conditioned inversion maps extracted from Stable Diffusion." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 630, + 295, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 630, + 295, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 630, + 295, + 714 + ], + "type": "text", + "content": "Implement details: Our training and testing settings are adapted from the approach outlined in the previous study [26] with several key modifications. Specifically, early stopping was employed during model training, with an initial learning rate of " + }, + { + "bbox": [ + 55, + 630, + 295, + 714 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-4}" + }, + { + "bbox": [ + 55, + 630, + 295, + 714 + ], + "type": "text", + "content": " and a batch size of 32. Additionally, the Lora layers are configured with hyperparameters " + }, + { + "bbox": [ + 55, + 630, + 295, + 714 + ], + "type": "inline_equation", + "content": "lora_{r} = 6" + }, + { + "bbox": [ + 55, + 630, + 295, + 714 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 55, + 630, + 295, + 714 + ], + "type": "inline_equation", + "content": "lora_{\\alpha} = 6" + }, + { + "bbox": [ + 55, + 630, + 295, + 714 + ], + "type": "text", + "content": ", and a dropout rate of 0.8," + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 324, + 71, + 545, + 137 + ], + "blocks": [ + { + "bbox": [ + 324, + 71, + 545, + 137 + ], + "lines": [ + { + "bbox": [ + 324, + 71, + 545, + 137 + ], + "spans": [ + { + "bbox": [ + 324, + 71, + 545, + 137 + ], + "type": "table", + "html": "
#STS moduleCFDL modulefeature enhancementUnivFD apmDataset accm
197.3781.64
297.4190.17
397.3989.98
498.5293.61
", + "image_path": "b8e851f0a6c0a3b01c9ead4418bfc9f62fb6a7e0eaf71b764adcc5459768e91e.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 313, + 148, + 553, + 192 + ], + "lines": [ + { + "bbox": [ + 313, + 148, + 553, + 192 + ], + "spans": [ + { + "bbox": [ + 313, + 148, + 553, + 192 + ], + "type": "text", + "content": "Table 4. Ablation study of the proposed modules on the UnivFD Dataset. We show the mean accuracy " + }, + { + "bbox": [ + 313, + 148, + 553, + 192 + ], + "type": "inline_equation", + "content": "(acc_m)" + }, + { + "bbox": [ + 313, + 148, + 553, + 192 + ], + "type": "text", + "content": " and average precision " + }, + { + "bbox": [ + 313, + 148, + 553, + 192 + ], + "type": "inline_equation", + "content": "(ap_m)" + }, + { + "bbox": [ + 313, + 148, + 553, + 192 + ], + "type": "text", + "content": ". Red and underline indicate the best and the second-best result, respectively." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 198, + 553, + 221 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 198, + 553, + 221 + ], + "spans": [ + { + "bbox": [ + 313, + 198, + 553, + 221 + ], + "type": "text", + "content": "while " + }, + { + "bbox": [ + 313, + 198, + 553, + 221 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 313, + 198, + 553, + 221 + ], + "type": "text", + "content": " is set to 8.0. The proposed method is implemented using Pytorch on 2 Nvidia GeForce RTX A6000 GPUs." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 228, + 435, + 241 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 228, + 435, + 241 + ], + "spans": [ + { + "bbox": [ + 313, + 228, + 435, + 241 + ], + "type": "text", + "content": "4.2. Comparison Results" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 245, + 553, + 317 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 245, + 553, + 317 + ], + "spans": [ + { + "bbox": [ + 313, + 245, + 553, + 317 + ], + "type": "text", + "content": "The UnivFD dataset includes a diverse range of models, allowing for a comprehensive evaluation of our method across both GAN and diffusion generative models. In addition, the SynRIS dataset provides images generated by cutting-edge generative models. The overall experimental results are presented in Tab. 1, Tab. 2, and Tab. 3." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 319, + 554, + 473 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 319, + 554, + 473 + ], + "spans": [ + { + "bbox": [ + 313, + 319, + 554, + 473 + ], + "type": "text", + "content": "Results on UnivFD dataset. Results show that our proposed method achieves superior performance compared to the UnivFD and FatFormer. Notably, without the biased interpretation introduced by coarse-grained text prompts, SDD surpasses the latest state-of-the-art method FatFormer by the mean AP " + }, + { + "bbox": [ + 313, + 319, + 554, + 473 + ], + "type": "inline_equation", + "content": "(ap_{m})" + }, + { + "bbox": [ + 313, + 319, + 554, + 473 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 313, + 319, + 554, + 473 + ], + "type": "inline_equation", + "content": "0.34\\%" + }, + { + "bbox": [ + 313, + 319, + 554, + 473 + ], + "type": "text", + "content": " and the mean acc " + }, + { + "bbox": [ + 313, + 319, + 554, + 473 + ], + "type": "inline_equation", + "content": "(acc_{m})" + }, + { + "bbox": [ + 313, + 319, + 554, + 473 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 313, + 319, + 554, + 473 + ], + "type": "inline_equation", + "content": "2.75\\%" + }, + { + "bbox": [ + 313, + 319, + 554, + 473 + ], + "type": "text", + "content": ". Moreover, compared with methods based on relatively monotonous forgery features in Tab. 1 and Tab. 2, our approach can outperform all of them with a large improvement. The above evidence indicates effective combination of visual concepts and forgery features can contribute model to extract sufficient forgery patterns and eliminate the superfluous features." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 475, + 554, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 475, + 554, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 475, + 554, + 713 + ], + "type": "text", + "content": "Results on SynRIS dataset. As shown in Tab. 3, when confronted with high-fidelity images generated by text-to-image models, methods leveraging pre-trained vision-language models, such as UnivFD and FatFormer, lose their competitiveness. In contrast, NPR, which focuses on neighboring pixel relationships, retains its edge. We assume that current generative methods grasp the relationships between visual information and semantic concepts in images but cannot refine local forgery details at the pixel level. Considering that excessive reliance on concepts misses abnormal pixel arrangements and focusing on monotonous forgery patterns can cause overfitting, our detector, which emphasizes low-level features with visual concepts, is trained on lower-fidelity fake images generated by Stable Diffusion [52] to capture concept-specific lacunae. We follow the evaluation protocol from SynRIS [5]. In comparison, our detector achieves an impressive mean AUROC of " + }, + { + "bbox": [ + 313, + 475, + 554, + 713 + ], + "type": "inline_equation", + "content": "95.1\\%" + }, + { + "bbox": [ + 313, + 475, + 554, + 713 + ], + "type": "text", + "content": ", surpassing the state-of-the-art method by " + }, + { + "bbox": [ + 313, + 475, + 554, + 713 + ], + "type": "inline_equation", + "content": "4.8\\%" + }, + { + "bbox": [ + 313, + 475, + 554, + 713 + ], + "type": "text", + "content": ". This demonstrates its superior ability to tackle the challenges posed by evolving generative models." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "18394" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 77, + 72, + 190, + 169 + ], + "blocks": [ + { + "bbox": [ + 77, + 72, + 190, + 169 + ], + "lines": [ + { + "bbox": [ + 77, + 72, + 190, + 169 + ], + "spans": [ + { + "bbox": [ + 77, + 72, + 190, + 169 + ], + "type": "image", + "image_path": "0fc9b0eeef54d2a107d0b1c0f6d6c4e979f1491fe980fcbbc07675e0143222a1.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 179, + 559, + 202 + ], + "lines": [ + { + "bbox": [ + 55, + 179, + 559, + 202 + ], + "spans": [ + { + "bbox": [ + 55, + 179, + 559, + 202 + ], + "type": "text", + "content": "Figure 5. Performance of function on Figure 6. T-SNE visualization of real and fake images [47]. The feature space is based on our adaptive weights. classifier. Each randomly samples 500 real and 500 fake images." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 211, + 69, + 291, + 171 + ], + "blocks": [ + { + "bbox": [ + 211, + 69, + 291, + 171 + ], + "lines": [ + { + "bbox": [ + 211, + 69, + 291, + 171 + ], + "spans": [ + { + "bbox": [ + 211, + 69, + 291, + 171 + ], + "type": "image", + "image_path": "9ba89e1496d6b16bdccd57c575440594fea729573cfd64ec69d877d04d24eeca.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 301, + 70, + 378, + 171 + ], + "blocks": [ + { + "bbox": [ + 301, + 70, + 378, + 171 + ], + "lines": [ + { + "bbox": [ + 301, + 70, + 378, + 171 + ], + "spans": [ + { + "bbox": [ + 301, + 70, + 378, + 171 + ], + "type": "image", + "image_path": "53ea1b5f353a96c9738269ec99c6192074f98e4c9590ea3b568427e6f48b7455.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 389, + 70, + 465, + 171 + ], + "blocks": [ + { + "bbox": [ + 389, + 70, + 465, + 171 + ], + "lines": [ + { + "bbox": [ + 389, + 70, + 465, + 171 + ], + "spans": [ + { + "bbox": [ + 389, + 70, + 465, + 171 + ], + "type": "image", + "image_path": "12508e5e81b25cf63bb1033d94fba7fce7241302bce0d94e1bdf758f953e09b0.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 478, + 70, + 555, + 171 + ], + "blocks": [ + { + "bbox": [ + 478, + 70, + 555, + 171 + ], + "lines": [ + { + "bbox": [ + 478, + 70, + 555, + 171 + ], + "spans": [ + { + "bbox": [ + 478, + 70, + 555, + 171 + ], + "type": "image", + "image_path": "88bdb2dfb8a557c7e1f64a77e634075a41c2959d745cddd5ef7d47a86cb6ada7.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 56, + 207, + 294, + 330 + ], + "blocks": [ + { + "bbox": [ + 56, + 207, + 294, + 330 + ], + "lines": [ + { + "bbox": [ + 56, + 207, + 294, + 330 + ], + "spans": [ + { + "bbox": [ + 56, + 207, + 294, + 330 + ], + "type": "image", + "image_path": "3f926a12a11783a8f3703d8b1e19a10696a9a49ef46cb72f9c96a4c3486c118f.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 336, + 295, + 359 + ], + "lines": [ + { + "bbox": [ + 55, + 336, + 295, + 359 + ], + "spans": [ + { + "bbox": [ + 55, + 336, + 295, + 359 + ], + "type": "text", + "content": "Figure 7. The showcase of attention on images which are input into our model." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 366, + 147, + 379 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 366, + 147, + 379 + ], + "spans": [ + { + "bbox": [ + 55, + 366, + 147, + 379 + ], + "type": "text", + "content": "4.3. Ablation study" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 385, + 296, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 385, + 296, + 433 + ], + "spans": [ + { + "bbox": [ + 55, + 385, + 296, + 433 + ], + "type": "text", + "content": "We perform comprehensive ablation studies on the UnivFD dataset under the original experimental configurations, reporting the mean accuracy " + }, + { + "bbox": [ + 55, + 385, + 296, + 433 + ], + "type": "inline_equation", + "content": "(acc_{m})" + }, + { + "bbox": [ + 55, + 385, + 296, + 433 + ], + "type": "text", + "content": " and mean average precision " + }, + { + "bbox": [ + 55, + 385, + 296, + 433 + ], + "type": "inline_equation", + "content": "(ap_{m})" + }, + { + "bbox": [ + 55, + 385, + 296, + 433 + ], + "type": "text", + "content": " as the primary evaluation metrics." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 435, + 296, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 435, + 296, + 567 + ], + "spans": [ + { + "bbox": [ + 55, + 435, + 296, + 567 + ], + "type": "text", + "content": "Effect of Each Component: We study the effects of removing STS module, CFDL module, and feature enhancement in our method. The results, presented in Tab. 4, demonstrate that these components are essential for improving performance in generalization on unseen models. This empirical finding suggests that the CFDL effectively captures forgery discrepancies associated with semantic concepts, while the enhancer module plays a crucial role in identifying robust forgery artifacts. The collaboration of all modules enhances the model's ability to distinguish between real and fake images." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 55, + 568, + 295, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 568, + 295, + 674 + ], + "spans": [ + { + "bbox": [ + 55, + 568, + 295, + 674 + ], + "type": "text", + "content": "Effect of function on adaptive weights: To check how well the proposed function works in the SDD, we select two conventional functions for comparison: " + }, + { + "bbox": [ + 55, + 568, + 295, + 674 + ], + "type": "inline_equation", + "content": "f(x) = |x|" + }, + { + "bbox": [ + 55, + 568, + 295, + 674 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 568, + 295, + 674 + ], + "type": "inline_equation", + "content": "f(x) = x^2" + }, + { + "bbox": [ + 55, + 568, + 295, + 674 + ], + "type": "text", + "content": ". The corresponding results are presented in Fig. 5. We find that our proposed function yields improvements in both " + }, + { + "bbox": [ + 55, + 568, + 295, + 674 + ], + "type": "inline_equation", + "content": "ap_m" + }, + { + "bbox": [ + 55, + 568, + 295, + 674 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 568, + 295, + 674 + ], + "type": "inline_equation", + "content": "acc_m" + }, + { + "bbox": [ + 55, + 568, + 295, + 674 + ], + "type": "text", + "content": " compared to the selected functions. These results demonstrate that our proposed function is conducive to capturing robust and distinctive forgery features." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 55, + 677, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 677, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 677, + 296, + 713 + ], + "type": "text", + "content": "Visualization of learned latent space: As shown in Fig. 6, the input images can be distinctly categorized into two clusters: real and fake. Nevertheless, why does the di" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 210, + 555, + 341 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 210, + 555, + 341 + ], + "spans": [ + { + "bbox": [ + 313, + 210, + 555, + 341 + ], + "type": "text", + "content": "vided boundary of ProGAN appear ambiguous in contrast to other models? Additionally, why do the real clusters of CycleGAN and StyleGAN separate from each other? We attribute these to the influence of visual semantic concepts. Perceptively, with the supervision of visual semantic concepts, the learned boundary of ProGAN is more complex and nuanced, rather than just simple straight lines or curves. Similarly, the images generated by StyleGAN and CycleGAN are projected into the corresponding semantic concept distribution and then separated from the real images based on the visual semantic concepts." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 342, + 556, + 474 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 342, + 556, + 474 + ], + "spans": [ + { + "bbox": [ + 313, + 342, + 556, + 474 + ], + "type": "text", + "content": "Visualization of attention on images: We apply Class Activation Mapping (CAM) [58] to visualize the learned representations. From our perspective, Fig. 7 illustrates that with the aid of semantic information, our model can focus on different regions of fake images, including the background, local object regions, and marginal details. This suggests that our fine-grained model is capable of capturing intricate discrepancies generalized to unseen models. Notably, the real images nearly always show no forgery discrepancy regions, which demonstrates the effectiveness of the reconstruction loss in the forgery detection task." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 483, + 388, + 496 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 483, + 388, + 496 + ], + "spans": [ + { + "bbox": [ + 313, + 483, + 388, + 496 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 504, + 556, + 707 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 504, + 556, + 707 + ], + "spans": [ + { + "bbox": [ + 313, + 504, + 556, + 707 + ], + "type": "text", + "content": "In this paper, we propose a novel method, SDD, for generalizable forgery image detection. The findings show that our method establishes a new state-of-the-art in detecting images generated by generative models from different periods, which underscores its robustness and superior generalization capability. To the best of our knowledge, in pre-trained vision-language paradigms, our approach is the first to rely solely on visual information, without text prompts. Based on experimental results, we conclude that leveraging sampled tokens and reconstruction techniques effectively aligns the visual semantic concept space with the forgery space. Additionally, refining low-level forgery features under the supervision of visual semantic concepts enhances the performance of forgery detection. Although SDD performs well across various generative methods, there is still room for improvement as generative technologies continue to advance. Future research will explore this further." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "18395" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 153, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 72, + 153, + 85 + ], + "spans": [ + { + "bbox": [ + 56, + 72, + 153, + 85 + ], + "type": "text", + "content": "Acknowledgement" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 91, + 296, + 116 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 91, + 296, + 116 + ], + "spans": [ + { + "bbox": [ + 56, + 91, + 296, + 116 + ], + "type": "text", + "content": "This work is supported by the National Natural Science Foundation of China (Grants Nos. 62372238,62476133)" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 128, + 115, + 140 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 128, + 115, + 140 + ], + "spans": [ + { + "bbox": [ + 56, + 128, + 115, + 140 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 57, + 148, + 296, + 713 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 62, + 148, + 296, + 192 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 148, + 296, + 192 + ], + "spans": [ + { + "bbox": [ + 62, + 148, + 296, + 192 + ], + "type": "text", + "content": "[1] V.Ya. Arkhipkin, Andrei Filatov, Viacheslav Vasilev, Anastasia Maltseva, Said Azizov, Igor Pavlov, Julia Agafonova, Andrey Kuznetsov, and Denis Dimitrov. Kandinsky 3.0 technical report. ArXiv, abs/2312.03511, 2023. 6" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 61, + 194, + 296, + 236 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 194, + 296, + 236 + ], + "spans": [ + { + "bbox": [ + 61, + 194, + 296, + 236 + ], + "type": "text", + "content": "[2] Jacob Austin, Daniel D. Johnson, Jonathan Ho, Daniel Tarlow, and Rianne van den Berg. Structured denoising diffusion models in discrete state-spaces. ArXiv, abs/2107.03006, 2021. 1" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 62, + 239, + 296, + 294 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 239, + 296, + 294 + ], + "spans": [ + { + "bbox": [ + 62, + 239, + 296, + 294 + ], + "type": "text", + "content": "[3] James Betker, Gabriel Goh, Li Jing, † TimBrooks, Jianfeng Wang, Linjie Li, † LongOuyang, † JuntangZhuang, † JoyceLee, † YufeiGuo, † WesamManassra, † PrafullaDhariwal, † CaseyChu, † YunxinJiao, and Aditya Ramesh. Improving image generation with better captions. 6" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 62, + 296, + 295, + 327 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 296, + 295, + 327 + ], + "spans": [ + { + "bbox": [ + 62, + 296, + 295, + 327 + ], + "type": "text", + "content": "[4] Andrew Brock, Jeff Donahue, and Karen Simonyan. Large scale gan training for high fidelity natural image synthesis. ArXiv, abs/1809.11096, 2018. 6" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 62, + 330, + 296, + 395 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 330, + 296, + 395 + ], + "spans": [ + { + "bbox": [ + 62, + 330, + 296, + 395 + ], + "type": "text", + "content": "[5] George Cazenavette, Avneesh Sud, Thomas Leung, and Ben Usman. Fake inversion: Learning to detect images from unseen text-to-image models by inverting stable diffusion. 2024 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 10759-10769, 2024. 2, 3, 6, 7" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 62, + 397, + 296, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 397, + 296, + 441 + ], + "spans": [ + { + "bbox": [ + 62, + 397, + 296, + 441 + ], + "type": "text", + "content": "[6] Lucy Chai, David Bau, Ser-Nam Lim, and Phillip Isola. What makes fake images detectable? understanding properties that generalize. In European Conference on Computer Vision, 2020. 1, 7" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 62, + 442, + 296, + 485 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 442, + 296, + 485 + ], + "spans": [ + { + "bbox": [ + 62, + 442, + 296, + 485 + ], + "type": "text", + "content": "[7] Cheng Chen, Qifeng Chen, Jia Xu, and Vladlen Koltun. Learning to see in the dark. 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3291-3300, 2018. 6" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 62, + 487, + 296, + 531 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 487, + 296, + 531 + ], + "spans": [ + { + "bbox": [ + 62, + 487, + 296, + 531 + ], + "type": "text", + "content": "[8] Junsong Chen, Yue Wu, Simian Luo, Enze Xie, Sayak Paul, Ping Luo, Hang Zhao, and Zhenguo Li. Pixart- " + }, + { + "bbox": [ + 62, + 487, + 296, + 531 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 62, + 487, + 296, + 531 + ], + "type": "text", + "content": ": Fast and controllable image generation with latent consistency models. ArXiv, abs/2401.05252, 2024. 6" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 62, + 533, + 296, + 576 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 533, + 296, + 576 + ], + "spans": [ + { + "bbox": [ + 62, + 533, + 296, + 576 + ], + "type": "text", + "content": "[9] Qifeng Chen and Vladlen Koltun. Photographic image synthesis with cascaded refinement networks. 2017 IEEE International Conference on Computer Vision (ICCV), pages 1520-1529, 2017. 6" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 57, + 579, + 295, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 579, + 295, + 633 + ], + "spans": [ + { + "bbox": [ + 57, + 579, + 295, + 633 + ], + "type": "text", + "content": "[10] Yunjey Choi, Min-Je Choi, Mun Su Kim, Jung-Woo Ha, Sunghun Kim, and Jaegul Choo. Stargan: Unified generative adversarial networks for multi-domain image-to-image translation. 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8789-8797, 2017. 6" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 57, + 635, + 295, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 635, + 295, + 689 + ], + "spans": [ + { + "bbox": [ + 57, + 635, + 295, + 689 + ], + "type": "text", + "content": "[11] Tao Dai, Jianrui Cai, Yongbing Zhang, Shutao Xia, and Lei Zhang. Second-order attention network for single image super-resolution. 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 11057-11066, 2019. 6" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 57, + 691, + 295, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 691, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 57, + 691, + 295, + 713 + ], + "type": "text", + "content": "[12] Prafulla Dhariwal and Alex Nichol. Diffusion models beat gans on image synthesis. ArXiv, abs/2105.05233, 2021. 6" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 553, + 713 + ], + "type": "list", + "angle": 0, + "index": 30, + "blocks": [ + { + "bbox": [ + 316, + 73, + 553, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 73, + 553, + 116 + ], + "spans": [ + { + "bbox": [ + 316, + 73, + 553, + 116 + ], + "type": "text", + "content": "[13] Joel Cameron Frank, Thorsten Eisenhofer, Lea Schonherr, Asja Fischer, Dorothea Kolossa, and Thorsten Holz. Leveraging frequency analysis for deep fake image recognition. ArXiv, abs/2003.08685, 2020. 1, 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 317, + 118, + 553, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 118, + 553, + 161 + ], + "spans": [ + { + "bbox": [ + 317, + 118, + 553, + 161 + ], + "type": "text", + "content": "[14] Ian J. Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron C. Courville, and Yoshua Bengio. Generative adversarial networks. Communications of the ACM, 63:139 - 144, 2014. 1" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 317, + 162, + 553, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 162, + 553, + 205 + ], + "spans": [ + { + "bbox": [ + 317, + 162, + 553, + 205 + ], + "type": "text", + "content": "[15] Yatharth Gupta, Vishnu V. Jaddipal, Harish Prabhala, Sayak Paul, and Patrick von Platen. Progressive knowledge distillation of stable diffusion xl using layer level loss. ArXiv, abs/2401.02677, 2024. 6" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 317, + 206, + 553, + 271 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 206, + 553, + 271 + ], + "spans": [ + { + "bbox": [ + 317, + 206, + 553, + 271 + ], + "type": "text", + "content": "[16] Zhizhong Han, Xiyang Wang, Yu-Shen Liu, and Matthias Zwicker. Multi-angle point cloud-vae: Unsupervised feature learning for 3d point clouds from multiple angles by joint self-reconstruction and half-to-half prediction. In 2019 IEEE/CVF International Conference on Computer Vision (ICCV), pages 10441-10450. IEEE, 2019. 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 317, + 272, + 553, + 315 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 272, + 553, + 315 + ], + "spans": [ + { + "bbox": [ + 317, + 272, + 553, + 315 + ], + "type": "text", + "content": "[17] J. Edward Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, and Weizhu Chen. Lora: Low-rank adaptation of large language models. ArXiv, abs/2106.09685, 2021. 4" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 317, + 316, + 553, + 348 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 316, + 553, + 348 + ], + "spans": [ + { + "bbox": [ + 317, + 316, + 553, + 348 + ], + "type": "text", + "content": "[18] Tero Karras, Timo Aila, Samuli Laine, and Jaakko Lehtinen. Progressive growing of gans for improved quality, stability, and variation. ArXiv, abs/1710.10196, 2017. 6" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 317, + 350, + 553, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 350, + 553, + 392 + ], + "spans": [ + { + "bbox": [ + 317, + 350, + 553, + 392 + ], + "type": "text", + "content": "[19] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4396-4405, 2018. 6" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 317, + 394, + 553, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 394, + 553, + 437 + ], + "spans": [ + { + "bbox": [ + 317, + 394, + 553, + 437 + ], + "type": "text", + "content": "[20] Prabhat Kumar, Mayank Vatsa, and Richa Singh. Detecting face2face facial reenactment in videos. 2020 IEEE Winter Conference on Applications of Computer Vision (WACV), pages 2578-2586, 2020. 6" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 438, + 553, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 438, + 553, + 480 + ], + "spans": [ + { + "bbox": [ + 316, + 438, + 553, + 480 + ], + "type": "text", + "content": "[21] Daiqing Li, Aleks Kamko, Ehsan Akhgari, Ali Sabet, Linmiao Xu, and Suhail Doshi. Playground v2.5: Three insights towards enhancing aesthetic quality in text-to-image generation. ArXiv, abs/2402.17245, 2024. 7" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 317, + 482, + 553, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 482, + 553, + 525 + ], + "spans": [ + { + "bbox": [ + 317, + 482, + 553, + 525 + ], + "type": "text", + "content": "[22] Ke Li, Tianhao Zhang, and Jitendra Malik. Diverse image synthesis from semantic layouts via conditional imle. 2019 IEEE/CVF International Conference on Computer Vision (ICCV), pages 4219-4228, 2018. 6" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 317, + 526, + 553, + 581 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 526, + 553, + 581 + ], + "spans": [ + { + "bbox": [ + 317, + 526, + 553, + 581 + ], + "type": "text", + "content": "[23] Zhenghong Li, Hao Chen, Jiangjiang Wu, Jun Li, and Ning Jing. Segmind: Semisupervised remote sensing image semantic segmentation with masked image modeling and contrastive learning method. IEEE Transactions on Geoscience and Remote Sensing, 61:1-17, 2023. 6" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 317, + 582, + 553, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 582, + 553, + 613 + ], + "spans": [ + { + "bbox": [ + 317, + 582, + 553, + 613 + ], + "type": "text", + "content": "[24] Bo Liu, Fan Yang, Xiuli Bi, Bin Xiao, Weisheng Li, and Xinbo Gao. Detecting generated images by real images. In European Conference on Computer Vision, 2022. 3" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 317, + 615, + 553, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 615, + 553, + 679 + ], + "spans": [ + { + "bbox": [ + 317, + 615, + 553, + 679 + ], + "type": "text", + "content": "[25] Honggu Liu, Xiaodan Li, Wenbo Zhou, Yuefeng Chen, Yuan He, Hui Xue, Weiming Zhang, and Nenghai Yu. Spatial-phase shallow learning: Rethinking face forgery detection in frequency domain. 2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 772-781, 2021. 2" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 317, + 681, + 553, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 681, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 317, + 681, + 553, + 713 + ], + "type": "text", + "content": "[26] Huan Liu, Zichang Tan, Chuangchuang Tan, Yunchao Wei, Yao Zhao, and Jingdong Wang. Forgery-aware adaptive transformer for generalizable synthetic image detection." + } + ] + } + ], + "index": 29 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "18396" + } + ] + } + ], + "index": 31 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 296, + 715 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 76, + 72, + 296, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 72, + 296, + 106 + ], + "spans": [ + { + "bbox": [ + 76, + 72, + 296, + 106 + ], + "type": "text", + "content": "2024 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 10770-10780, 2023. 1, 2, 3, 4, 5, 7" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 107, + 296, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 107, + 296, + 162 + ], + "spans": [ + { + "bbox": [ + 56, + 107, + 296, + 162 + ], + "type": "text", + "content": "[27] Xinhai Liu, Xinchen Liu, Zhizhong Han, and Yu-Shen Liu. Spu-net: Self-supervised point cloud upsampling by coarse-to-fine reconstruction with self-projection optimization. IEEE Transactions on Image Processing, 31:4213-4226, 2020. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 163, + 296, + 207 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 163, + 296, + 207 + ], + "spans": [ + { + "bbox": [ + 56, + 163, + 296, + 207 + ], + "type": "text", + "content": "[28] Zhengzhe Liu, Xiaojuan Qi, Jiaya Jia, and Philip H. S. Torr. Global texture enhancement for fake face detection in the wild. 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 8057-8066, 2020. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 57, + 209, + 280, + 219 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 209, + 280, + 219 + ], + "spans": [ + { + "bbox": [ + 57, + 209, + 280, + 219 + ], + "type": "text", + "content": "[29] Midjourney. Midjourney, n.d. Accessed: 2025-03-04. 6" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 220, + 294, + 275 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 220, + 294, + 275 + ], + "spans": [ + { + "bbox": [ + 56, + 220, + 294, + 275 + ], + "type": "text", + "content": "[30] Lakshmanan Nataraj, Tajuddin Manhar Mohammed, B. S. Manjunath, Shivkumar Chandrasekaran, Arjuna Flenner, Jawadul H. Bappy, and Amit K. Roy-Chowdhury. Detecting gan generated fake images using co-occurrence matrices. ArXiv, abs/1903.06836, 2019. 7" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 277, + 294, + 331 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 277, + 294, + 331 + ], + "spans": [ + { + "bbox": [ + 56, + 277, + 294, + 331 + ], + "type": "text", + "content": "[31] Alex Nichol, Prafulla Dhariwal, Aditya Ramesh, Pranav Shyam, Pamela Mishkin, Bob McGrew, Ilya Sutskever, and Mark Chen. Glide: Towards photorealistic image generation and editing with text-guided diffusion models. In International Conference on Machine Learning, 2021. 6" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 333, + 294, + 387 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 333, + 294, + 387 + ], + "spans": [ + { + "bbox": [ + 56, + 333, + 294, + 387 + ], + "type": "text", + "content": "[32] Utkarsh Ojha, Yuheng Li, and Yong Jae Lee. Towards universal fake image detectors that generalize across generative models. 2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 24480-24489, 2023. 1, 2, 3, 5, 6, 7" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 388, + 294, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 388, + 294, + 422 + ], + "spans": [ + { + "bbox": [ + 56, + 388, + 294, + 422 + ], + "type": "text", + "content": "[33] Pablo Pernias, Dominic Rampas, Mats L. Richter, and Marc Aubreville. Wuerstchen: Efficient pretraining of text-to-image models. ArXiv, abs/2306.00637, 2023. 6" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 423, + 294, + 466 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 423, + 294, + 466 + ], + "spans": [ + { + "bbox": [ + 56, + 423, + 294, + 466 + ], + "type": "text", + "content": "[34] Dustin Podell, Zion English, Kyle Lacey, A. Blattmann, Tim Dockhorn, Jonas Muller, Joe Penna, and Robin Rombach. Sdxl: Improving latent diffusion models for high-resolution image synthesis. ArXiv, abs/2307.01952, 2023. 6" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 468, + 294, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 468, + 294, + 510 + ], + "spans": [ + { + "bbox": [ + 56, + 468, + 294, + 510 + ], + "type": "text", + "content": "[35] Zequn Qin, Pengyi Zhang, Fei Wu, and Xi Li. Fcanet: Frequency channel attention networks. 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pages 763-772, 2020. 6" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 513, + 294, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 513, + 294, + 578 + ], + "spans": [ + { + "bbox": [ + 56, + 513, + 294, + 578 + ], + "type": "text", + "content": "[36] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In International Conference on Machine Learning, 2021. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 580, + 294, + 623 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 580, + 294, + 623 + ], + "spans": [ + { + "bbox": [ + 56, + 580, + 294, + 623 + ], + "type": "text", + "content": "[37] Aditya Ramesh, Mikhail Pavlov, Gabriel Goh, Scott Gray, Chelsea Voss, Alec Radford, Mark Chen, and Ilya Sutskever. Zero-shot text-to-image generation. ArXiv, abs/2102.12092, 2021. 6" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 624, + 294, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 624, + 294, + 690 + ], + "spans": [ + { + "bbox": [ + 56, + 624, + 294, + 690 + ], + "type": "text", + "content": "[38] Anton Razzhigaev, Arseniy Shakhmatov, Anastasia Maltseva, V.Ya. Arkhipkin, Igor Pavlov, Ilya Ryabov, Angelina Kuts, Alexander Panchenko, Andrey Kuznetsov, and Denis Dimitrov. Kandinsky: an improved text-to-image synthesis with image prior and latent diffusion. In Conference on Empirical Methods in Natural Language Processing, 2023. 6" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 56, + 692, + 294, + 715 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 692, + 294, + 715 + ], + "spans": [ + { + "bbox": [ + 56, + 692, + 294, + 715 + ], + "type": "text", + "content": "[39] Jonas Ricker, Denis Lukovnikov, and Asja Fischer. Aeroblade: Training-free detection of latent diffusion images using" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 553, + 715 + ], + "type": "list", + "angle": 0, + "index": 28, + "blocks": [ + { + "bbox": [ + 333, + 73, + 553, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 73, + 553, + 106 + ], + "spans": [ + { + "bbox": [ + 333, + 73, + 553, + 106 + ], + "type": "text", + "content": "autoencoder reconstruction error. 2024 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 9130-9140, 2024. 3, 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 108, + 553, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 108, + 553, + 162 + ], + "spans": [ + { + "bbox": [ + 316, + 108, + 553, + 162 + ], + "type": "text", + "content": "[40] Robin Rombach, A. Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 10674-10685, 2021. 6" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 163, + 553, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 163, + 553, + 250 + ], + "spans": [ + { + "bbox": [ + 316, + 163, + 553, + 250 + ], + "type": "text", + "content": "[41] Christoph Schuhmann, Romain Beaumont, Richard Vencu, Cade Gordon, Ross Wightman, Mehdi Cherti, Theo Coombes, Aarush Katta, Clayton Mullis, Mitchell Wortsman, Patrick Schramowski, Srivatsa Kundurthy, Katherine Crowson, Ludwig Schmidt, Robert Kaczmarczyk, and Jenia Jitsev. Laion-5b: An open large-scale dataset for training next generation image-text models. ArXiv, abs/2210.08402, 2022.6" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 252, + 553, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 252, + 553, + 274 + ], + "spans": [ + { + "bbox": [ + 316, + 252, + 553, + 274 + ], + "type": "text", + "content": "[42] Segmind. Announcing ssd-1b: A leap in efficient t2i generation., 2023. 6" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 276, + 553, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 276, + 553, + 319 + ], + "spans": [ + { + "bbox": [ + 316, + 276, + 553, + 319 + ], + "type": "text", + "content": "[43] Minghe Shen, Hongping Gan, Chao Ning, Yi Hua, and Tao Zhang. Transc: A transformer-based hybrid architecture for image compressed sensing. IEEE Transactions on Image Processing, 31:6991-7005, 2022. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 321, + 553, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 321, + 553, + 376 + ], + "spans": [ + { + "bbox": [ + 316, + 321, + 553, + 376 + ], + "type": "text", + "content": "[44] Chuangchuang Tan, Yao Zhao, Shikui Wei, Guanghua Gu, and Yunchao Wei. Learning on gradients: Generalized artifacts representation for gan-generated images detection. 2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 12105-12114, 2023. 1, 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 377, + 553, + 442 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 377, + 553, + 442 + ], + "spans": [ + { + "bbox": [ + 316, + 377, + 553, + 442 + ], + "type": "text", + "content": "[45] Chuangchuang Tan, Huan Liu, Yao Zhao, Shikui Wei, Guanghua Gu, Ping Liu, and Yunchao Wei. Rethinking the up-sampling operations in cnn-based generative network for generalizable deepfake detection. 2024 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 28130-28139, 2024. 2, 7" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 445, + 553, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 445, + 553, + 498 + ], + "spans": [ + { + "bbox": [ + 316, + 445, + 553, + 498 + ], + "type": "text", + "content": "[46] Devavrat Tomar, Manana Lortkipanidze, Guillaume Vray, Behzad Bozorgtabar, and Jean-Philippe Thiran. Self-attentive spatial adaptive normalization for cross-modality domain adaptation. IEEE Transactions on Medical Imaging, 40:2926-2938, 2021. 6" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 500, + 553, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 500, + 553, + 533 + ], + "spans": [ + { + "bbox": [ + 316, + 500, + 553, + 533 + ], + "type": "text", + "content": "[47] Laurens van der Maaten and Geoffrey E. Hinton. Visualizing data using t-sne. Journal of Machine Learning Research, 9: 2579-2605, 2008. 8" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 535, + 553, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 535, + 553, + 567 + ], + "spans": [ + { + "bbox": [ + 316, + 535, + 553, + 567 + ], + "type": "text", + "content": "[48] Tim van Erven and Peter Harremoës. Rényi divergence and kullback-leibler divergence. IEEE Transactions on Information Theory, 60:3797-3820, 2012. 4" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 316, + 569, + 553, + 634 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 569, + 553, + 634 + ], + "spans": [ + { + "bbox": [ + 316, + 569, + 553, + 634 + ], + "type": "text", + "content": "[49] Bram Wallace, Meihua Dang, Rafael Rafailov, Linqi Zhou, Aaron Lou, Senthil Purushwalkam, Stefano Ermon, Caiming Xiong, Shafiq R. Joty, and Nikhil Naik. Diffusion model alignment using direct preference optimization. 2024 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 8228-8238, 2023. 6" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 316, + 635, + 553, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 635, + 553, + 690 + ], + "spans": [ + { + "bbox": [ + 316, + 635, + 553, + 690 + ], + "type": "text", + "content": "[50] Sheng-Yu Wang, Oliver Wang, Richard Zhang, Andrew Owens, and Alexei A. Efros. Cnn-generated images are surprisingly easy to spot... for now. 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 8692-8701, 2019. 1, 7, 2, 3" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 316, + 692, + 553, + 715 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 692, + 553, + 715 + ], + "spans": [ + { + "bbox": [ + 316, + 692, + 553, + 715 + ], + "type": "text", + "content": "[51] Zhendong Wang, Jianmin Bao, Wen gang Zhou, Weilun Wang, Hezhen Hu, Hong Chen, and Houqiang Li. Dire for" + } + ] + } + ], + "index": 27 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "18397" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 295, + 518 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 76, + 72, + 294, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 72, + 294, + 105 + ], + "spans": [ + { + "bbox": [ + 76, + 72, + 294, + 105 + ], + "type": "text", + "content": "diffusion-generated image detection. 2023 IEEE/CVF International Conference on Computer Vision (ICCV), pages 22388-22398, 2023. 3, 7" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 107, + 295, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 107, + 295, + 162 + ], + "spans": [ + { + "bbox": [ + 56, + 107, + 295, + 162 + ], + "type": "text", + "content": "[52] Zijie J. Wang, Evan Montoya, David Munechika, Haoyang Yang, Benjamin Hoover, and Duen Horng Chau. Diffusiondb: A large-scale prompt gallery dataset for text-to-image generative models. In Annual Meeting of the Association for Computational Linguistics, 2022. 6, 7" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 163, + 294, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 163, + 294, + 217 + ], + "spans": [ + { + "bbox": [ + 56, + 163, + 294, + 217 + ], + "type": "text", + "content": "[53] Syed Waqas Zamir, Aditya Arora, Salman Hameed Khan, Munawar Hayat, Fahad Shahbaz Khan, Ming-Hsuan Yang, and Ling Shao. Learning enriched features for fast image restoration and enhancement. IEEE Transactions on Pattern Analysis and Machine Intelligence, 45:1934-1948, 2022. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 219, + 294, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 219, + 294, + 262 + ], + "spans": [ + { + "bbox": [ + 56, + 219, + 294, + 262 + ], + "type": "text", + "content": "[54] Maxime Zanella and Ismail Ben Ayed. Low-rank few-shot adaptation of vision-language models. 2024 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 1593-1603, 2024. 4" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 264, + 294, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 264, + 294, + 297 + ], + "spans": [ + { + "bbox": [ + 56, + 264, + 294, + 297 + ], + "type": "text", + "content": "[55] Kai Zhang, Lingbo Mo, Wenhu Chen, Huan Sun, and Yu Su. Magicbrush: A manually annotated dataset for instruction-guided image editing. ArXiv, abs/2306.10012, 2023. 4, 5" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 297, + 294, + 361 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 297, + 294, + 361 + ], + "spans": [ + { + "bbox": [ + 56, + 297, + 294, + 361 + ], + "type": "text", + "content": "[56] Lingzhi Zhang, Zhengjie Xu, Connelly Barnes, Yuqian Zhou, Qing Liu, He Zhang, Sohrab Amirghodsi, Zhe Lin, Eli Shechtman, and Jianbo Shi. Perceptual artifacts localization for image synthesis tasks. 2023 IEEE/CVF International Conference on Computer Vision (ICCV), pages 7545-7556, 2023. 5" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 364, + 294, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 364, + 294, + 407 + ], + "spans": [ + { + "bbox": [ + 56, + 364, + 294, + 407 + ], + "type": "text", + "content": "[57] Xu Zhang, Svebor Karaman, and Shih-Fu Chang. Detecting and simulating artifacts in gan fake images. 2019 IEEE International Workshop on Information Forensics and Security (WIFS), pages 1-6, 2019. 7" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 409, + 294, + 462 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 409, + 294, + 462 + ], + "spans": [ + { + "bbox": [ + 56, + 409, + 294, + 462 + ], + "type": "text", + "content": "[58] Bolei Zhou, Aditya Khosla, Ågata Lapedriza, Aude Oliva, and Antonio Torralba. Learning deep features for discriminative localization. 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 2921-2929, 2015. 8" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 464, + 294, + 518 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 464, + 294, + 518 + ], + "spans": [ + { + "bbox": [ + 56, + 464, + 294, + 518 + ], + "type": "text", + "content": "[59] Jun-Yan Zhu, Taesung Park, Phillip Isola, and Alexei A. Efros. Unpaired image-to-image translation using cycle-consistent adversarial networks. 2017 IEEE International Conference on Computer Vision (ICCV), pages 2242-2251, 2017. 6" + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "type": "text", + "content": "18398" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file