diff --git "a/2023/Zero-Shot Model Diagnosis/layout.json" "b/2023/Zero-Shot Model Diagnosis/layout.json" new file mode 100644--- /dev/null +++ "b/2023/Zero-Shot Model Diagnosis/layout.json" @@ -0,0 +1,12258 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 212, + 103, + 381, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 212, + 103, + 381, + 121 + ], + "spans": [ + { + "bbox": [ + 212, + 103, + 381, + 121 + ], + "type": "text", + "content": "Zero-shot Model Diagnosis" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 59, + 143, + 112, + 158 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 143, + 112, + 158 + ], + "spans": [ + { + "bbox": [ + 59, + 143, + 112, + 158 + ], + "type": "text", + "content": "Jinqi Luo*" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 143, + 215, + 158 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 143, + 215, + 158 + ], + "spans": [ + { + "bbox": [ + 132, + 143, + 215, + 158 + ], + "type": "text", + "content": "Zhaoning Wang*" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 235, + 144, + 315, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 235, + 144, + 315, + 157 + ], + "spans": [ + { + "bbox": [ + 235, + 144, + 315, + 157 + ], + "type": "text", + "content": "Chen Henry Wu" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 340, + 144, + 404, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 340, + 144, + 404, + 157 + ], + "spans": [ + { + "bbox": [ + 340, + 144, + 404, + 157 + ], + "type": "text", + "content": "Dong Huang" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 429, + 144, + 533, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 429, + 144, + 533, + 156 + ], + "spans": [ + { + "bbox": [ + 429, + 144, + 533, + 156 + ], + "type": "text", + "content": "Fernando De la Torre" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 228, + 159, + 364, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 228, + 159, + 364, + 171 + ], + "spans": [ + { + "bbox": [ + 228, + 159, + 364, + 171 + ], + "type": "text", + "content": "Carnegie Mellon University" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 146, + 174, + 443, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 146, + 174, + 443, + 185 + ], + "spans": [ + { + "bbox": [ + 146, + 174, + 443, + 185 + ], + "type": "text", + "content": "{jinqil, zhaoning, chenwu2, dghuang, ftorre}@cs.cmu.edu" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 143, + 213, + 192, + 225 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 213, + 192, + 225 + ], + "spans": [ + { + "bbox": [ + 143, + 213, + 192, + 225 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 45, + 229, + 287, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 229, + 287, + 361 + ], + "spans": [ + { + "bbox": [ + 45, + 229, + 287, + 361 + ], + "type": "text", + "content": "When it comes to deploying deep vision models, the behavior of these systems must be explicable to ensure confidence in their reliability and fairness. A common approach to evaluate deep learning models is to build a labeled test set with attributes of interest and assess how well it performs. However, creating a balanced test set (i.e., one that is uniformly sampled over all the important traits) is often time-consuming, expensive, and prone to mistakes. The question we try to address is: can we evaluate the sensitivity of deep learning models to arbitrary visual attributes without an annotated test set?" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 45, + 361, + 288, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 361, + 288, + 540 + ], + "spans": [ + { + "bbox": [ + 45, + 361, + 288, + 540 + ], + "type": "text", + "content": "This paper argues the case that Zero-shot Model Diagnosis (ZOOM) is possible without the need for a test set nor labeling. To avoid the need for test sets, our system relies on a generative model and CLIP. The key idea is enabling the user to select a set of prompts (relevant to the problem) and our system will automatically search for semantic counterfactual images (i.e., synthesized images that flip the prediction in the case of a binary classifier) using the generative model. We evaluate several visual tasks (classification, key-point detection, and segmentation) in multiple visual domains to demonstrate the viability of our methodology. Extensive experiments demonstrate that our method is capable of producing counterfactual images and offering sensitivity analysis for model diagnosis without the need for a test set." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 554, + 127, + 566 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 554, + 127, + 566 + ], + "spans": [ + { + "bbox": [ + 47, + 554, + 127, + 566 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 46, + 574, + 287, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 574, + 287, + 694 + ], + "spans": [ + { + "bbox": [ + 46, + 574, + 287, + 694 + ], + "type": "text", + "content": "Deep learning models inherit data biases, which can be accentuated or downplayed depending on the model's architecture and optimization strategy. Deploying a computer vision deep learning model requires extensive testing and evaluation, with a particular focus on features with potentially dire social consequences (e.g., non-uniform behavior across gender or ethnicity). Given the importance of the problem, it is common to collect and label large-scale datasets to evaluate the behavior of these models across attributes of interest. Unfortunately, collecting these test" + } + ] + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 309, + 213, + 539, + 361 + ], + "blocks": [ + { + "bbox": [ + 309, + 213, + 539, + 361 + ], + "lines": [ + { + "bbox": [ + 309, + 213, + 539, + 361 + ], + "spans": [ + { + "bbox": [ + 309, + 213, + 539, + 361 + ], + "type": "image", + "image_path": "3c343307fb18c4ea29ef25ad4087d7b7663b31d9277a7b39fe95b2f82af49943.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 370, + 545, + 437 + ], + "lines": [ + { + "bbox": [ + 304, + 370, + 545, + 437 + ], + "spans": [ + { + "bbox": [ + 304, + 370, + 545, + 437 + ], + "type": "text", + "content": "Figure 1. Given a differentiable deep learning model (e.g., a cat/dog classifier) and user-defined text attributes, how can we determine the model's sensitivity to specific attributes without using labeled test data? Our system generates counterfactual images (bottom right) based on the textual directions provided by the user, while also computing the sensitivity histogram (top right)." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 449, + 545, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 449, + 545, + 567 + ], + "spans": [ + { + "bbox": [ + 304, + 449, + 545, + 567 + ], + "type": "text", + "content": "datasets is extremely time-consuming, error-prone, and expensive. Moreover, a balanced dataset, that is uniformly distributed across all attributes of interest, is also typically impractical to acquire due to its combinatorial nature. Even with careful metric analysis in this test set, no robustness nor fairness can be guaranteed since there can be a mismatch between the real and test distributions [25]. This research will explore model diagnosis without relying on a test set in an effort to democratize model diagnosis and lower the associated cost." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 570, + 546, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 570, + 546, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 570, + 546, + 713 + ], + "type": "text", + "content": "Counterfactual explainability as a means of model diagnosis is drawing the community's attention [5,20]. Counterfactual images visualize the sensitive factors of an input image that can influence the model's outputs. In other words, counterfactuals answer the question: \"How can we modify the input image " + }, + { + "bbox": [ + 304, + 570, + 546, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 304, + 570, + 546, + 713 + ], + "type": "text", + "content": " (while fixing the ground truth) so that the model prediction would diverge from " + }, + { + "bbox": [ + 304, + 570, + 546, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{y}" + }, + { + "bbox": [ + 304, + 570, + 546, + 713 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 304, + 570, + 546, + 713 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{y}}" + }, + { + "bbox": [ + 304, + 570, + 546, + 713 + ], + "type": "text", + "content": "?\". The parameterization of such counterfactuals will provide insights into identifying key factors of where the model fails. Unlike existing image-space adversary techniques [4,18], counterfactuals provide semantic perturbations that are interpretable by humans. However, existing counterfactual studies re" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 702, + 126, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 702, + 126, + 712 + ], + "spans": [ + { + "bbox": [ + 58, + 702, + 126, + 712 + ], + "type": "text", + "content": "*Equal contribution." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "11631" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 286, + 108 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 286, + 108 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 286, + 108 + ], + "type": "text", + "content": "require the user to either collect uniform test sets [10], annotate discovered bias [15], or train a model-specific explanation every time the user wants to diagnose a new model [13]." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 108, + 286, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 108, + 286, + 262 + ], + "spans": [ + { + "bbox": [ + 46, + 108, + 286, + 262 + ], + "type": "text", + "content": "On the other hand, recent advances in Contrastive Language-Image Pretraining (CLIP) [24] can help to overcome the above challenges. CLIP enables text-driven applications that map user text representations to visual manifolds for downstream tasks such as avatar generation [7], motion generation [37] or neural rendering [22, 30]. In the domain of image synthesis, StyleCLIP [21] reveals that text-conditioned optimization in the StyleGAN [12] latent space can decompose latent directions for image editing, allowing for the mutation of a specific attribute without disturbing others. With such capability, users can freely edit semantic attributes conditioned on text inputs. This paper further explores its use in the scope of model diagnosis." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 263, + 286, + 466 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 263, + 286, + 466 + ], + "spans": [ + { + "bbox": [ + 46, + 263, + 286, + 466 + ], + "type": "text", + "content": "The central concept of the paper is depicted in Fig. 1. Consider a user interested in evaluating which factors contribute to the lack of robustness in a cat/dog classifier (target model). By selecting a list of keyword attributes, the user is able to (1) see counterfactual images where semantic variations flip the target model predictions (see the classifier score in the top-right corner of the counterfactual images) and (2) quantify the sensitivity of each attribute for the target model (see sensitivity histogram on the top). Instead of using a test set, we propose using a StyleGAN generator as the picture engine for sampling counterfactual images. CLIP transforms user's text input, and enables model diagnosis in an open-vocabulary setting. This is a major advantage since there is no need for collecting and annotating images and minimal user expert knowledge. In addition, we are not tied to a particular annotation from datasets (e.g., specific attributes in CelebA [16])." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 467, + 286, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 467, + 286, + 491 + ], + "spans": [ + { + "bbox": [ + 47, + 467, + 286, + 491 + ], + "type": "text", + "content": "To summarize, our proposed work offers three major improvements over earlier efforts:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 58, + 495, + 286, + 654 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 58, + 495, + 286, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 495, + 286, + 555 + ], + "spans": [ + { + "bbox": [ + 58, + 495, + 286, + 555 + ], + "type": "text", + "content": "- The user requires neither a labeled, balanced test dataset, and minimal expert knowledge in order to evaluate where a model fails (i.e., model diagnosis). In addition, the method provides a sensitivity histogram across the attributes of interest." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 58, + 563, + 286, + 599 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 563, + 286, + 599 + ], + "spans": [ + { + "bbox": [ + 58, + 563, + 286, + 599 + ], + "type": "text", + "content": "- When a different target model or a new user-defined attribute space is introduced, it is not necessary to retrain our system, allowing for practical use." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 58, + 606, + 286, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 606, + 286, + 654 + ], + "spans": [ + { + "bbox": [ + 58, + 606, + 286, + 654 + ], + "type": "text", + "content": "- The target model fine-tuned with counterfactual images not only slightly improves the classification performance, but also greatly increases the distributional robustness against counterfactual images." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 47, + 669, + 133, + 681 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 669, + 133, + 681 + ], + "spans": [ + { + "bbox": [ + 47, + 669, + 133, + 681 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 689, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 286, + 713 + ], + "type": "text", + "content": "This section reviews prior work on attribute editing with generative models and recent efforts on model diagnosis." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 72, + 524, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 72, + 524, + 84 + ], + "spans": [ + { + "bbox": [ + 306, + 72, + 524, + 84 + ], + "type": "text", + "content": "2.1. Attribute Editing with Generative Models" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 87, + 545, + 291 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 87, + 545, + 291 + ], + "spans": [ + { + "bbox": [ + 304, + 87, + 545, + 291 + ], + "type": "text", + "content": "With recent progress in generative models, GANs supports high-quality image synthesis, as well as semantic attributes editing [35]. [1, 6] edit the images by perturbing the intermediate latent space encoded from the original images. These methods rely on images to be encoded to latent vectors to perform attribute editing. On the contrary, StyleGAN [12] can produce images by sampling the latent space. Many works have explored ways to edit attributes in the latent space of StyleGAN, either by relying on image annotations [27] or in an unsupervised manner [8, 28]. StyleSpace [34] further disentangles the latent space of StyleGAN and can perform specific attribute edits by disentangled style vectors. Based upon StyleSpace, StyleCLIP [21] builds the connection between the CLIP language space and StyleGAN latent space to enable arbitrary edits specified by the text. Our work adopts this concept for fine-grained attribute editing." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 297, + 406, + 309 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 297, + 406, + 309 + ], + "spans": [ + { + "bbox": [ + 306, + 297, + 406, + 309 + ], + "type": "text", + "content": "2.2. Model Diagnosis" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 316, + 545, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 316, + 545, + 578 + ], + "spans": [ + { + "bbox": [ + 304, + 316, + 545, + 578 + ], + "type": "text", + "content": "To the best of our knowledge, model diagnosis without a test set is a relatively unexplored problem. In the adversarial learning literature, it is common to find methods that show how image-space perturbations [4, 18] flip the model prediction; however, such perturbations lack visual interpretability. [36] pioneers in synthesizing adversaries by GANs. More recently, [9, 23, 26] propose generative methods to synthesize semantically perturbed images to visualize where the target model fails. However, their attribute editing is limited within the dataset's annotated labels. Instead, our framework allows users to easily customize their own attribute space, in which we visualize and quantify the biased factors that affect the model prediction. On the bias detection track, [13] co-trains a model-specific StyleGAN with each target model, and requires human annotators to name attribute coordinates in the Stylespace. [3, 14, 15] synthesize counterfactual images by either optimally traversing the latent space or learning an attribute hyperplane, after which the user will inspect the represented bias. Unlike previous work, we propose to diagnose a deep learning model without any model-specific re-training, new test sets, or manual annotations/inspections." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 588, + 361, + 600 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 588, + 361, + 600 + ], + "spans": [ + { + "bbox": [ + 306, + 588, + 361, + 600 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 605, + 545, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 605, + 545, + 664 + ], + "spans": [ + { + "bbox": [ + 304, + 605, + 545, + 664 + ], + "type": "text", + "content": "This section firstly describes our method to generate counterfactual images guided by CLIP in a zero-shot manner. We then introduce how we perform the sensitivity analysis across attributes of interest. Fig. 2 shows the overview of our framework." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 671, + 481, + 682 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 671, + 481, + 682 + ], + "spans": [ + { + "bbox": [ + 306, + 671, + 481, + 682 + ], + "type": "text", + "content": "3.1. Notation and Problem Definition" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "inline_equation", + "content": "f_{\\theta}" + }, + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "text", + "content": ", parameterized by " + }, + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "text", + "content": ", be the target model that we want to diagnose. In this paper, " + }, + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "inline_equation", + "content": "f_{\\theta}" + }, + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "text", + "content": " denotes two types of" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "11632" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 69, + 68, + 525, + 205 + ], + "blocks": [ + { + "bbox": [ + 69, + 68, + 525, + 205 + ], + "lines": [ + { + "bbox": [ + 69, + 68, + 525, + 205 + ], + "spans": [ + { + "bbox": [ + 69, + 68, + 525, + 205 + ], + "type": "image", + "image_path": "247dfc97e41f80fd7d1d2a86fd3efb21ea4c42cb07451c98651a328c990284e9.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 216, + 547, + 262 + ], + "lines": [ + { + "bbox": [ + 46, + 216, + 547, + 262 + ], + "spans": [ + { + "bbox": [ + 46, + 216, + 547, + 262 + ], + "type": "text", + "content": "Figure 2. The ZOOM framework. Black solid lines stand for forward passes, red dashed lines stand for backpropagation, and purple dashed lines stand for inference after the optimization converges. The user inputs single or multiple attributes, and we map them into edit directions with the method in Sec. 3.2. Then we assign to each edit direction (attribute) a weight, which represents how much we are adding/removing this attribute. We iteratively perform adversarial learning on the attribute space to maximize the counterfactual effectiveness." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "spans": [ + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "text", + "content": "deep nets: binary attribute classifiers and face keypoint detectors. Note that our approach is extendable to any end-to-end differentiable target deep models. Let " + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_{\\phi}" + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "text", + "content": ", parameterized by " + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "text", + "content": ", be the style generator that synthesizes images by " + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "inline_equation", + "content": "\\mathbf{x} = \\mathcal{G}_{\\phi}(\\mathbf{s})" + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "inline_equation", + "content": "\\mathbf{s}" + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "text", + "content": " is the style vector in Style Space " + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "text", + "content": " [34]. We denote a counterfactual image as " + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{x}}" + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "text", + "content": ", which is a synthesized image that misleads the target model " + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "inline_equation", + "content": "f_{\\theta}" + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "text", + "content": ", and denote the original reference image as " + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "text", + "content": " is defined as a single user input text-based attribute, with its domain " + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "inline_equation", + "content": "\\mathcal{A} = \\{a_i\\}_{i=1}^N" + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "text", + "content": " input attributes. " + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{x}}" + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "text", + "content": " differs only along attribute directions " + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "text", + "content": ". Given a set of " + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "inline_equation", + "content": "\\{f_{\\theta}, \\mathcal{G}_{\\phi}, \\mathcal{A}\\}" + }, + { + "bbox": [ + 46, + 276, + 289, + 515 + ], + "type": "text", + "content": ", our goal is to perform counterfactual-based diagnosis to interpret where the model fails without manually collecting nor labeling any test set. Unlike traditional approaches of image-space noises which lack explainability to users, our method adversarially searches the counterfactual in the user-designed semantic space. To this end, our diagnosis will have three outputs, namely counterfactual images (Sec. 3.3), sensitivity histograms (Sec. 3.4), and distributionally robust models (Sec. 3.5)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 521, + 194, + 533 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 521, + 194, + 533 + ], + "spans": [ + { + "bbox": [ + 47, + 521, + 194, + 533 + ], + "type": "text", + "content": "3.2. Extracting Edit Directions" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 536, + 287, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 536, + 287, + 608 + ], + "spans": [ + { + "bbox": [ + 46, + 536, + 287, + 608 + ], + "type": "text", + "content": "This section examines the terminologies, method, and modification we adopt in ZOOM to extract suitable global directions for attribute editing. Since CLIP has shown strong capability in disentangling visual representation [19], we incorporate style channel relevance from Style-CLIP [21] to find edit directions for each attribute." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 609, + 288, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 609, + 288, + 693 + ], + "spans": [ + { + "bbox": [ + 46, + 609, + 288, + 693 + ], + "type": "text", + "content": "Given the user's input strings of attributes, we want to find an image manipulation direction " + }, + { + "bbox": [ + 46, + 609, + 288, + 693 + ], + "type": "inline_equation", + "content": "\\Delta \\mathbf{s}" + }, + { + "bbox": [ + 46, + 609, + 288, + 693 + ], + "type": "text", + "content": " for any " + }, + { + "bbox": [ + 46, + 609, + 288, + 693 + ], + "type": "inline_equation", + "content": "\\mathbf{s} \\sim \\mathcal{S}" + }, + { + "bbox": [ + 46, + 609, + 288, + 693 + ], + "type": "text", + "content": ", such that the generated image " + }, + { + "bbox": [ + 46, + 609, + 288, + 693 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_{\\phi}(\\mathbf{s} + \\Delta \\mathbf{s})" + }, + { + "bbox": [ + 46, + 609, + 288, + 693 + ], + "type": "text", + "content": " only varies in the input attributes. Recall that CLIP maps strings into a text embedding " + }, + { + "bbox": [ + 46, + 609, + 288, + 693 + ], + "type": "inline_equation", + "content": "\\mathbf{t} \\in \\mathcal{T}" + }, + { + "bbox": [ + 46, + 609, + 288, + 693 + ], + "type": "text", + "content": ", the text embedding space. For a string attribute description " + }, + { + "bbox": [ + 46, + 609, + 288, + 693 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 46, + 609, + 288, + 693 + ], + "type": "text", + "content": " and a neutral prefix " + }, + { + "bbox": [ + 46, + 609, + 288, + 693 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 46, + 609, + 288, + 693 + ], + "type": "text", + "content": ", we obtain the CLIP text embedding difference " + }, + { + "bbox": [ + 46, + 609, + 288, + 693 + ], + "type": "inline_equation", + "content": "\\Delta \\mathbf{t}" + }, + { + "bbox": [ + 46, + 609, + 288, + 693 + ], + "type": "text", + "content": " by:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 86, + 697, + 287, + 711 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 697, + 287, + 711 + ], + "spans": [ + { + "bbox": [ + 86, + 697, + 287, + 711 + ], + "type": "interline_equation", + "content": "\\Delta \\mathbf {t} = \\operatorname {C L I P} _ {\\text {t e x t}} (p \\oplus a) - \\operatorname {C L I P} _ {\\text {t e x t}} (p) \\tag {1}", + "image_path": "406522a59249e10ab5cd88783268d84c4058e8f220dfd4af0401d849dff5e944.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 305, + 276, + 545, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 276, + 545, + 312 + ], + "spans": [ + { + "bbox": [ + 305, + 276, + 545, + 312 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 276, + 545, + 312 + ], + "type": "inline_equation", + "content": "\\oplus" + }, + { + "bbox": [ + 305, + 276, + 545, + 312 + ], + "type": "text", + "content": " is the string concatenation operator. To take 'Eyeglasses' as an example, we can get " + }, + { + "bbox": [ + 305, + 276, + 545, + 312 + ], + "type": "inline_equation", + "content": "\\Delta t = \\mathrm{CLIP}_{\\mathrm{text}}" + }, + { + "bbox": [ + 305, + 276, + 545, + 312 + ], + "type": "text", + "content": " (a face with Eyeglasses) - " + }, + { + "bbox": [ + 305, + 276, + 545, + 312 + ], + "type": "inline_equation", + "content": "\\mathrm{CLIP}_{\\mathrm{text}}" + }, + { + "bbox": [ + 305, + 276, + 545, + 312 + ], + "type": "text", + "content": " (a face)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 312, + 546, + 432 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 312, + 546, + 432 + ], + "spans": [ + { + "bbox": [ + 304, + 312, + 546, + 432 + ], + "type": "text", + "content": "To get the edit direction, " + }, + { + "bbox": [ + 304, + 312, + 546, + 432 + ], + "type": "inline_equation", + "content": "\\Delta \\mathbf{s}" + }, + { + "bbox": [ + 304, + 312, + 546, + 432 + ], + "type": "text", + "content": ", we need to utilize a style relevance mapper " + }, + { + "bbox": [ + 304, + 312, + 546, + 432 + ], + "type": "inline_equation", + "content": "\\mathbf{M} \\in \\mathbb{R}^{c_S \\times c_T}" + }, + { + "bbox": [ + 304, + 312, + 546, + 432 + ], + "type": "text", + "content": " to map between the CLIP text embedding vectors of length " + }, + { + "bbox": [ + 304, + 312, + 546, + 432 + ], + "type": "inline_equation", + "content": "c_{\\mathcal{T}}" + }, + { + "bbox": [ + 304, + 312, + 546, + 432 + ], + "type": "text", + "content": " and the Style space vector of length " + }, + { + "bbox": [ + 304, + 312, + 546, + 432 + ], + "type": "inline_equation", + "content": "c_{\\mathcal{S}}" + }, + { + "bbox": [ + 304, + 312, + 546, + 432 + ], + "type": "text", + "content": ". StyleCLIP optimizes " + }, + { + "bbox": [ + 304, + 312, + 546, + 432 + ], + "type": "inline_equation", + "content": "\\mathbf{M}" + }, + { + "bbox": [ + 304, + 312, + 546, + 432 + ], + "type": "text", + "content": " by iteratively searching meaningful style channels: mutating each channel in " + }, + { + "bbox": [ + 304, + 312, + 546, + 432 + ], + "type": "inline_equation", + "content": "\\mathcal{S}" + }, + { + "bbox": [ + 304, + 312, + 546, + 432 + ], + "type": "text", + "content": " and encoding the mutated images by CLIP to assess whether there is a significant change in " + }, + { + "bbox": [ + 304, + 312, + 546, + 432 + ], + "type": "inline_equation", + "content": "\\mathcal{T}" + }, + { + "bbox": [ + 304, + 312, + 546, + 432 + ], + "type": "text", + "content": " space. To prevent undesired edits that are irrelevant to the user prompt, the edit direction " + }, + { + "bbox": [ + 304, + 312, + 546, + 432 + ], + "type": "inline_equation", + "content": "\\Delta \\mathbf{s}" + }, + { + "bbox": [ + 304, + 312, + 546, + 432 + ], + "type": "text", + "content": " will filter out channels that the style value change is insignificant:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 348, + 439, + 545, + 452 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 348, + 439, + 545, + 452 + ], + "spans": [ + { + "bbox": [ + 348, + 439, + 545, + 452 + ], + "type": "interline_equation", + "content": "\\Delta \\mathbf {s} = (\\mathbf {M} \\cdot \\Delta \\mathbf {t}) \\odot \\mathbb {1} ((\\mathbf {M} \\cdot \\Delta \\mathbf {t}) > \\lambda), \\tag {2}", + "image_path": "f4eabcda229d25393671dbfd5547dd2e9e1ebf9423d0a153d2128a4fc59d47f9.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 460, + 545, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 460, + 545, + 544 + ], + "spans": [ + { + "bbox": [ + 304, + 460, + 545, + 544 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 460, + 545, + 544 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 304, + 460, + 545, + 544 + ], + "type": "text", + "content": " is the hyper-parameter for the threshold value. " + }, + { + "bbox": [ + 304, + 460, + 545, + 544 + ], + "type": "inline_equation", + "content": "\\mathbb{1}(\\cdot)" + }, + { + "bbox": [ + 304, + 460, + 545, + 544 + ], + "type": "text", + "content": " is the indicator function, and " + }, + { + "bbox": [ + 304, + 460, + 545, + 544 + ], + "type": "inline_equation", + "content": "\\odot" + }, + { + "bbox": [ + 304, + 460, + 545, + 544 + ], + "type": "text", + "content": " is the element-wise product operator. Since the success of attribute editing by the extracted edit directions will be the key to our approach, Appendix A will show the capability of CLIP by visualizing the global edit direction on multiple sampled images, conducting the user study, and analyzing the effect of " + }, + { + "bbox": [ + 304, + 460, + 545, + 544 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 304, + 460, + 545, + 544 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 550, + 473, + 563 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 550, + 473, + 563 + ], + "spans": [ + { + "bbox": [ + 306, + 550, + 473, + 563 + ], + "type": "text", + "content": "3.3. Style Counterfactual Synthesis" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 569, + 545, + 690 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 569, + 545, + 690 + ], + "spans": [ + { + "bbox": [ + 304, + 569, + 545, + 690 + ], + "type": "text", + "content": "Identifying semantic counterfactuals necessitates a manageable parametrization of the semantic space for effective exploration. For ease of notation, we denote " + }, + { + "bbox": [ + 304, + 569, + 545, + 690 + ], + "type": "inline_equation", + "content": "(\\Delta \\mathbf{s})_i" + }, + { + "bbox": [ + 304, + 569, + 545, + 690 + ], + "type": "text", + "content": " as the global edit direction for " + }, + { + "bbox": [ + 304, + 569, + 545, + 690 + ], + "type": "inline_equation", + "content": "i^{th}" + }, + { + "bbox": [ + 304, + 569, + 545, + 690 + ], + "type": "text", + "content": " attribute " + }, + { + "bbox": [ + 304, + 569, + 545, + 690 + ], + "type": "inline_equation", + "content": "a_i \\in \\mathcal{A}" + }, + { + "bbox": [ + 304, + 569, + 545, + 690 + ], + "type": "text", + "content": " from the user input. After these " + }, + { + "bbox": [ + 304, + 569, + 545, + 690 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 304, + 569, + 545, + 690 + ], + "type": "text", + "content": " attributes are provided and the edit directions are calculated, we initialize the control vectors " + }, + { + "bbox": [ + 304, + 569, + 545, + 690 + ], + "type": "inline_equation", + "content": "\\mathbf{w}" + }, + { + "bbox": [ + 304, + 569, + 545, + 690 + ], + "type": "text", + "content": " of length " + }, + { + "bbox": [ + 304, + 569, + 545, + 690 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 304, + 569, + 545, + 690 + ], + "type": "text", + "content": " where the " + }, + { + "bbox": [ + 304, + 569, + 545, + 690 + ], + "type": "inline_equation", + "content": "i^{th}" + }, + { + "bbox": [ + 304, + 569, + 545, + 690 + ], + "type": "text", + "content": " element " + }, + { + "bbox": [ + 304, + 569, + 545, + 690 + ], + "type": "inline_equation", + "content": "w_i" + }, + { + "bbox": [ + 304, + 569, + 545, + 690 + ], + "type": "text", + "content": " controls the strength of the " + }, + { + "bbox": [ + 304, + 569, + 545, + 690 + ], + "type": "inline_equation", + "content": "i^{th}" + }, + { + "bbox": [ + 304, + 569, + 545, + 690 + ], + "type": "text", + "content": " edit direction. Our counterfactual edit will be a linear combination of normalized edit directions: " + }, + { + "bbox": [ + 304, + 569, + 545, + 690 + ], + "type": "inline_equation", + "content": "\\mathbf{s}_{edit} = \\sum_{i=1}^{N} w_i \\frac{(\\Delta \\mathbf{s})_i}{||(\\Delta \\mathbf{s})_i||}" + }, + { + "bbox": [ + 304, + 569, + 545, + 690 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 690, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 690, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 690, + 545, + 713 + ], + "type": "text", + "content": "The black arrows in Fig. 2 show the forward inference to synthesize counterfactual images. Given the parametriza" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "11633" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "content": "tion of attribute editing strengths and the final loss value, our framework searches for counterfactual examples in the. \noptimizable edit weight space. The original sampled image is " + }, + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "inline_equation", + "content": "\\mathbf{x} = G_{\\phi}(\\mathbf{s})" + }, + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "content": " , and the counterfactual image is" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 127, + 287, + 159 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 127, + 287, + 159 + ], + "spans": [ + { + "bbox": [ + 56, + 127, + 287, + 159 + ], + "type": "interline_equation", + "content": "\\hat {\\mathbf {x}} = G _ {\\phi} (\\mathbf {s} + \\mathbf {s} _ {e d i t}) = G _ {\\phi} \\left(\\mathbf {s} + \\sum_ {i = 1} ^ {N} w _ {i} \\frac {(\\Delta \\mathbf {s}) _ {i}}{| | (\\Delta \\mathbf {s}) _ {i} | |}\\right), \\tag {3}", + "image_path": "522c65c1ac30a4ab308cd5179ecaf205912e6f30edc1fc9af47f3d6006d5dddd.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 165, + 287, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 165, + 287, + 190 + ], + "spans": [ + { + "bbox": [ + 47, + 165, + 287, + 190 + ], + "type": "text", + "content": "which is obtained by minimizing the following loss, " + }, + { + "bbox": [ + 47, + 165, + 287, + 190 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 47, + 165, + 287, + 190 + ], + "type": "text", + "content": ", that is the weighted sum of three terms:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 196, + 287, + 211 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 196, + 287, + 211 + ], + "spans": [ + { + "bbox": [ + 53, + 196, + 287, + 211 + ], + "type": "interline_equation", + "content": "\\mathcal {L} (\\mathbf {s}, \\mathbf {w}) = \\alpha \\mathcal {L} _ {\\text {t a r g e t}} (\\hat {\\mathbf {x}}) + \\beta \\mathcal {L} _ {\\text {s t r u c t}} (\\hat {\\mathbf {x}}) + \\gamma \\mathcal {L} _ {\\text {a t t r}} (\\hat {\\mathbf {x}}). \\tag {4}", + "image_path": "0e08d11381d1726448aeadb5be3a180bcf2b4ac7e56b7bab7222f213f756654c.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 216, + 287, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 216, + 287, + 239 + ], + "spans": [ + { + "bbox": [ + 47, + 216, + 287, + 239 + ], + "type": "text", + "content": "We back-propagate to optimize " + }, + { + "bbox": [ + 47, + 216, + 287, + 239 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 47, + 216, + 287, + 239 + ], + "type": "text", + "content": " w.r.t the weights of the edit directions " + }, + { + "bbox": [ + 47, + 216, + 287, + 239 + ], + "type": "inline_equation", + "content": "\\mathbf{w}" + }, + { + "bbox": [ + 47, + 216, + 287, + 239 + ], + "type": "text", + "content": ", shown as the red pipeline in Fig. 2." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 240, + 287, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 240, + 287, + 348 + ], + "spans": [ + { + "bbox": [ + 46, + 240, + 287, + 348 + ], + "type": "text", + "content": "The targeted adversarial loss " + }, + { + "bbox": [ + 46, + 240, + 287, + 348 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{target}" + }, + { + "bbox": [ + 46, + 240, + 287, + 348 + ], + "type": "text", + "content": " for binary attribute classifiers minimizes the distance between the current model prediction " + }, + { + "bbox": [ + 46, + 240, + 287, + 348 + ], + "type": "inline_equation", + "content": "f_{\\theta}(\\hat{\\mathbf{x}})" + }, + { + "bbox": [ + 46, + 240, + 287, + 348 + ], + "type": "text", + "content": " with the flip of original prediction " + }, + { + "bbox": [ + 46, + 240, + 287, + 348 + ], + "type": "inline_equation", + "content": "\\hat{p}_{cls} = 1 - f_{\\theta}(\\mathbf{x})" + }, + { + "bbox": [ + 46, + 240, + 287, + 348 + ], + "type": "text", + "content": ". In the case of an eyeglass classifier on a person wearing eyeglasses, " + }, + { + "bbox": [ + 46, + 240, + 287, + 348 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{target}" + }, + { + "bbox": [ + 46, + 240, + 287, + 348 + ], + "type": "text", + "content": " will guide the optimization to search w such that the model predicts no eyeglasses. For a keypoint detector, the adversarial loss will minimize the distance between the model keypoint prediction with a set of random points " + }, + { + "bbox": [ + 46, + 240, + 287, + 348 + ], + "type": "inline_equation", + "content": "\\hat{p}_{kp} \\sim \\mathcal{N}" + }, + { + "bbox": [ + 46, + 240, + 287, + 348 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 354, + 287, + 368 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 354, + 287, + 368 + ], + "spans": [ + { + "bbox": [ + 52, + 354, + 287, + 368 + ], + "type": "text", + "content": "(binary classifier) " + }, + { + "bbox": [ + 52, + 354, + 287, + 368 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{target}(\\hat{\\mathbf{x}}) = L_{CE}(f_{\\theta}(\\hat{\\mathbf{x}}),\\hat{p}_{cls})" + }, + { + "bbox": [ + 52, + 354, + 287, + 368 + ], + "type": "text", + "content": " (5)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 370, + 287, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 370, + 287, + 384 + ], + "spans": [ + { + "bbox": [ + 52, + 370, + 287, + 384 + ], + "type": "text", + "content": "(keypoint detector) " + }, + { + "bbox": [ + 52, + 370, + 287, + 384 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{target}(\\hat{\\mathbf{x}}) = L_{MSE}(f_{\\theta}(\\hat{\\mathbf{x}}),\\hat{p}_{kp})" + }, + { + "bbox": [ + 52, + 370, + 287, + 384 + ], + "type": "text", + "content": " (6)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 389, + 287, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 389, + 287, + 555 + ], + "spans": [ + { + "bbox": [ + 46, + 389, + 287, + 555 + ], + "type": "text", + "content": "If we only optimize " + }, + { + "bbox": [ + 46, + 389, + 287, + 555 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\text {target }}" + }, + { + "bbox": [ + 46, + 389, + 287, + 555 + ], + "type": "text", + "content": " w.r.t the global edit directions, it is possible that the method will not preserve image statistics of the original image and can include the particular attribute that we are diagnosing. To constrain the optimization, we added a structural loss " + }, + { + "bbox": [ + 46, + 389, + 287, + 555 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\text {struct }}" + }, + { + "bbox": [ + 46, + 389, + 287, + 555 + ], + "type": "text", + "content": " and an attribute consistency loss " + }, + { + "bbox": [ + 46, + 389, + 287, + 555 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\text {attr }}" + }, + { + "bbox": [ + 46, + 389, + 287, + 555 + ], + "type": "text", + "content": " to avoid generation collapse. " + }, + { + "bbox": [ + 46, + 389, + 287, + 555 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\text {struct }}" + }, + { + "bbox": [ + 46, + 389, + 287, + 555 + ], + "type": "text", + "content": " [32] aims to preserve global image statistics of the original image x including image contrasts, background, or shape identity during the adversarial editing. While " + }, + { + "bbox": [ + 46, + 389, + 287, + 555 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\text {attr }}" + }, + { + "bbox": [ + 46, + 389, + 287, + 555 + ], + "type": "text", + "content": " enforces that the target attribute (perceived ground truth) be consistent on the style edits. For example, when diagnosing the eyeglasses classifier, ZOOM preserves the original status of eyeglasses and precludes direct eyeglasses addition/removal." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 83, + 563, + 287, + 576 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 563, + 287, + 576 + ], + "spans": [ + { + "bbox": [ + 83, + 563, + 287, + 576 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {s t r u c t}} (\\hat {\\mathbf {x}}) = L _ {\\text {S S I M}} (\\hat {\\mathbf {x}}, \\mathbf {x}) \\tag {7}", + "image_path": "e9c01bbec353f59f0d838e57d9d7712ef0a9488489304a1687502582e5818dec.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 91, + 578, + 287, + 591 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 578, + 287, + 591 + ], + "spans": [ + { + "bbox": [ + 91, + 578, + 287, + 591 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {a t t r}} (\\hat {\\mathbf {x}}) = L _ {C E} \\left(\\operatorname {C L I P} (\\hat {\\mathbf {x}}), \\operatorname {C L I P} (\\mathbf {x})\\right) \\tag {8}", + "image_path": "e866dfe2b33896db5340deca40595d244352f71f167079ca62e58d7367ae29e3.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 598, + 287, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 598, + 287, + 647 + ], + "spans": [ + { + "bbox": [ + 47, + 598, + 287, + 647 + ], + "type": "text", + "content": "Given a pretrained target model " + }, + { + "bbox": [ + 47, + 598, + 287, + 647 + ], + "type": "inline_equation", + "content": "f_{\\theta}" + }, + { + "bbox": [ + 47, + 598, + 287, + 647 + ], + "type": "text", + "content": ", a domain-specific style generator " + }, + { + "bbox": [ + 47, + 598, + 287, + 647 + ], + "type": "inline_equation", + "content": "G_{\\phi}" + }, + { + "bbox": [ + 47, + 598, + 287, + 647 + ], + "type": "text", + "content": ", and a text-driven attribute space " + }, + { + "bbox": [ + 47, + 598, + 287, + 647 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 47, + 598, + 287, + 647 + ], + "type": "text", + "content": ", our goal is to sample an original style vector " + }, + { + "bbox": [ + 47, + 598, + 287, + 647 + ], + "type": "inline_equation", + "content": "\\mathbf{s}" + }, + { + "bbox": [ + 47, + 598, + 287, + 647 + ], + "type": "text", + "content": " for each image and search its counterfactual edit strength " + }, + { + "bbox": [ + 47, + 598, + 287, + 647 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{w}}" + }, + { + "bbox": [ + 47, + 598, + 287, + 647 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 121, + 653, + 287, + 672 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 653, + 287, + 672 + ], + "spans": [ + { + "bbox": [ + 121, + 653, + 287, + 672 + ], + "type": "interline_equation", + "content": "\\hat {\\mathbf {w}} = \\underset {\\mathbf {w}} {\\operatorname {a r g m i n}} \\mathcal {L} (\\mathbf {s}, \\mathbf {w}). \\tag {9}", + "image_path": "99d40a5c06afea29793207b057f81887dd56d07deff5621674d95fb698b54539.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 677, + 257, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 677, + 257, + 689 + ], + "spans": [ + { + "bbox": [ + 47, + 677, + 257, + 689 + ], + "type": "text", + "content": "Unless otherwise stated, we iteratively update " + }, + { + "bbox": [ + 47, + 677, + 257, + 689 + ], + "type": "inline_equation", + "content": "\\mathbf{w}" + }, + { + "bbox": [ + 47, + 677, + 257, + 689 + ], + "type": "text", + "content": " as:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 102, + 696, + 287, + 711 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 696, + 287, + 711 + ], + "spans": [ + { + "bbox": [ + 102, + 696, + 287, + 711 + ], + "type": "interline_equation", + "content": "\\mathbf {w} = \\operatorname {c l a m p} _ {[ - \\epsilon , \\epsilon ]} (\\mathbf {w} - \\eta \\nabla_ {\\mathbf {w}} \\mathcal {L}), \\tag {10}", + "image_path": "7fe0063f5c0604d3a3512f4ac37243bacd08cbab0e142ef20282d6717d310e28.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 72, + 545, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 180 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 180 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 72, + 545, + 180 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 304, + 72, + 545, + 180 + ], + "type": "text", + "content": " is the step size and " + }, + { + "bbox": [ + 304, + 72, + 545, + 180 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 304, + 72, + 545, + 180 + ], + "type": "text", + "content": " is the clamp bound to avoid synthesis collapse caused by exaggerated edit. Note that the maximum counterfactual effectiveness does not indicate the maximum edit strength (i.e., " + }, + { + "bbox": [ + 304, + 72, + 545, + 180 + ], + "type": "inline_equation", + "content": "w_{i} = \\epsilon" + }, + { + "bbox": [ + 304, + 72, + 545, + 180 + ], + "type": "text", + "content": "), since the attribute edit direction does not necessarily overlap with the target classifier direction. The attribute change is bi-directional, as the " + }, + { + "bbox": [ + 304, + 72, + 545, + 180 + ], + "type": "inline_equation", + "content": "w_{i}" + }, + { + "bbox": [ + 304, + 72, + 545, + 180 + ], + "type": "text", + "content": " can be negative in Eq. 3. Details of using other optimization approaches (e.g., linear approximation [18]) will be discussed in Appendix C." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 189, + 466, + 202 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 189, + 466, + 202 + ], + "spans": [ + { + "bbox": [ + 306, + 189, + 466, + 202 + ], + "type": "text", + "content": "3.4. Attribute Sensitivity Analysis" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 208, + 545, + 279 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 208, + 545, + 279 + ], + "spans": [ + { + "bbox": [ + 304, + 208, + 545, + 279 + ], + "type": "text", + "content": "Single-attribute counterfactual reflects the sensitivity of target model on the individual attribute. By optimizing independently along the edit direction for a single attribute and averaging the model probability changes over images, our model generates independent sensitivity score " + }, + { + "bbox": [ + 304, + 208, + 545, + 279 + ], + "type": "inline_equation", + "content": "h_i" + }, + { + "bbox": [ + 304, + 208, + 545, + 279 + ], + "type": "text", + "content": " for each attribute " + }, + { + "bbox": [ + 304, + 208, + 545, + 279 + ], + "type": "inline_equation", + "content": "a_i" + }, + { + "bbox": [ + 304, + 208, + 545, + 279 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 325, + 291, + 545, + 305 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 325, + 291, + 545, + 305 + ], + "spans": [ + { + "bbox": [ + 325, + 291, + 545, + 305 + ], + "type": "interline_equation", + "content": "h _ {i} = \\mathbb {E} _ {\\mathbf {x} \\sim \\mathcal {P} (\\mathbf {x}), \\hat {\\mathbf {x}} = \\mathrm {Z O O M} (\\mathbf {x}, a _ {i})} | f _ {\\theta} (\\mathbf {x}) - f _ {\\theta} (\\hat {\\mathbf {x}}) |. \\tag {11}", + "image_path": "448e395994d09b00225f77ca21f4b85795d2b755bc56b9177583da538e271b57.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 314, + 545, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 314, + 545, + 434 + ], + "spans": [ + { + "bbox": [ + 304, + 314, + 545, + 434 + ], + "type": "text", + "content": "The sensitivity score " + }, + { + "bbox": [ + 304, + 314, + 545, + 434 + ], + "type": "inline_equation", + "content": "h_i" + }, + { + "bbox": [ + 304, + 314, + 545, + 434 + ], + "type": "text", + "content": " is the probability difference between the original image " + }, + { + "bbox": [ + 304, + 314, + 545, + 434 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 304, + 314, + 545, + 434 + ], + "type": "text", + "content": " and generated image " + }, + { + "bbox": [ + 304, + 314, + 545, + 434 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{x}}" + }, + { + "bbox": [ + 304, + 314, + 545, + 434 + ], + "type": "text", + "content": ", at the most counterfactual point when changing attribute " + }, + { + "bbox": [ + 304, + 314, + 545, + 434 + ], + "type": "inline_equation", + "content": "a_i" + }, + { + "bbox": [ + 304, + 314, + 545, + 434 + ], + "type": "text", + "content": ". We synthesize a number of images from " + }, + { + "bbox": [ + 304, + 314, + 545, + 434 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_{\\phi}" + }, + { + "bbox": [ + 304, + 314, + 545, + 434 + ], + "type": "text", + "content": ", then iteratively compute the sensitivity for each given attribute, and finally normalize all sensitivities to draw the histogram as shown in Fig. 4. The histogram indicates the sensitivity of the evaluated model " + }, + { + "bbox": [ + 304, + 314, + 545, + 434 + ], + "type": "inline_equation", + "content": "f_{\\theta}" + }, + { + "bbox": [ + 304, + 314, + 545, + 434 + ], + "type": "text", + "content": " on each of the user-defined attributes. Higher sensitivity of one attribute means that the model is more easily affected by that attribute." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 306, + 443, + 443, + 456 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 443, + 443, + 456 + ], + "spans": [ + { + "bbox": [ + 306, + 443, + 443, + 456 + ], + "type": "text", + "content": "3.5. Counterfactual Training" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 462, + 545, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 462, + 545, + 582 + ], + "spans": [ + { + "bbox": [ + 304, + 462, + 545, + 582 + ], + "type": "text", + "content": "The multi-attribute counterfactual approach visualizes semantic combinations that cause the model to falter, providing valuable insights for enhancing the model's robustness. We naturally adopt the concept of iterative adversarial training [18] to robustify the target model. For each iteration, ZOOM receives the target model parameter and returns a batch of mutated counterfactual images with the model's original predictions as labels. Then the target model will be trained on the counterfactually-augmented images to achieve the robust goal:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 312, + 591, + 545, + 608 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 591, + 545, + 608 + ], + "spans": [ + { + "bbox": [ + 312, + 591, + 545, + 608 + ], + "type": "interline_equation", + "content": "\\theta^ {*} = \\underset {\\theta} {\\operatorname {a r g m i n}} \\mathbb {E} _ {\\mathbf {x} \\sim \\mathcal {P} (\\mathbf {x}), \\hat {\\mathbf {x}} = \\operatorname {Z O O M} (\\mathbf {x}, A)} L _ {C E} \\left(f _ {\\theta} (\\hat {\\mathbf {x}}), f _ {\\theta} (\\mathbf {x})\\right) \\tag {12}", + "image_path": "e30d546fe324acaf7d0ba3ea872d43bd5d6dffdc4d64f4d05f44f26e4e074ea7.jpg" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "text", + "content": "where batches of " + }, + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "text", + "content": " are randomly sampled from the StyleGAN generator " + }, + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_{\\phi}" + }, + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "text", + "content": ". In the following, we abbreviate the process as Counterfactual Training (CT). Note that, although not explicitly expressed in Eq. 12, the CT process is a min-max game. ZOOM synthesizes counterfactuals to maximize the variation of model prediction (while persevering the perceived ground truth), and the target model is learned with the counterfactual images to minimize the variation." + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "11634" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 51, + 70, + 100, + 119 + ], + "blocks": [ + { + "bbox": [ + 51, + 70, + 100, + 119 + ], + "lines": [ + { + "bbox": [ + 51, + 70, + 100, + 119 + ], + "spans": [ + { + "bbox": [ + 51, + 70, + 100, + 119 + ], + "type": "image", + "image_path": "382f5252f5eef69ed8ac56fa86f515853e05d632939c48c6660966221ace8272.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 57, + 120, + 93, + 126 + ], + "lines": [ + { + "bbox": [ + 57, + 120, + 93, + 126 + ], + "spans": [ + { + "bbox": [ + 57, + 120, + 93, + 126 + ], + "type": "text", + "content": "Open Mouth" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 100, + 70, + 149, + 119 + ], + "blocks": [ + { + "bbox": [ + 100, + 70, + 149, + 119 + ], + "lines": [ + { + "bbox": [ + 100, + 70, + 149, + 119 + ], + "spans": [ + { + "bbox": [ + 100, + 70, + 149, + 119 + ], + "type": "image", + "image_path": "6e6f7765c0c6d9f1d2dfdc190d779bfe664e680f1fa60e96badfb38c36bde1f4.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 100, + 120, + 149, + 125 + ], + "lines": [ + { + "bbox": [ + 100, + 120, + 149, + 125 + ], + "spans": [ + { + "bbox": [ + 100, + 120, + 149, + 125 + ], + "type": "inline_equation", + "content": "\\frac{1}{2}x - 1 > 0" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 149, + 70, + 198, + 119 + ], + "blocks": [ + { + "bbox": [ + 149, + 70, + 198, + 119 + ], + "lines": [ + { + "bbox": [ + 149, + 70, + 198, + 119 + ], + "spans": [ + { + "bbox": [ + 149, + 70, + 198, + 119 + ], + "type": "image", + "image_path": "d58c5d6850d439ce2439ccb870594aa331b04646e7cdef3bd0558f01e41331d5.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 149, + 120, + 198, + 125 + ], + "lines": [ + { + "bbox": [ + 149, + 120, + 198, + 125 + ], + "spans": [ + { + "bbox": [ + 149, + 120, + 198, + 125 + ], + "type": "inline_equation", + "content": "\\frac{1}{2}x - 1 > 0" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 198, + 70, + 246, + 119 + ], + "blocks": [ + { + "bbox": [ + 198, + 70, + 246, + 119 + ], + "lines": [ + { + "bbox": [ + 198, + 70, + 246, + 119 + ], + "spans": [ + { + "bbox": [ + 198, + 70, + 246, + 119 + ], + "type": "image", + "image_path": "71d46012b365d52f94b9082a48b36a98cd58e025a583732997d1de6c2f945647.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 246, + 70, + 294, + 119 + ], + "blocks": [ + { + "bbox": [ + 246, + 70, + 294, + 119 + ], + "lines": [ + { + "bbox": [ + 246, + 70, + 294, + 119 + ], + "spans": [ + { + "bbox": [ + 246, + 70, + 294, + 119 + ], + "type": "image", + "image_path": "e52e69fb3e1a7bb5d2200ba25acd415cca0e9d8625a7a2ccf1e06fd40de18944.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 250, + 120, + 290, + 125 + ], + "lines": [ + { + "bbox": [ + 250, + 120, + 290, + 125 + ], + "spans": [ + { + "bbox": [ + 250, + 120, + 290, + 125 + ], + "type": "text", + "content": "Closed Mouth" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 295, + 70, + 343, + 119 + ], + "blocks": [ + { + "bbox": [ + 295, + 70, + 343, + 119 + ], + "lines": [ + { + "bbox": [ + 295, + 70, + 343, + 119 + ], + "spans": [ + { + "bbox": [ + 295, + 70, + 343, + 119 + ], + "type": "image", + "image_path": "5998e6f16297004dcded0e568b7cc7fa1aeb1d4478fa0d868cedde8c94e8abb9.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 343, + 70, + 392, + 119 + ], + "blocks": [ + { + "bbox": [ + 343, + 70, + 392, + 119 + ], + "lines": [ + { + "bbox": [ + 343, + 70, + 392, + 119 + ], + "spans": [ + { + "bbox": [ + 343, + 70, + 392, + 119 + ], + "type": "image", + "image_path": "f8433beb32c5896e06a7aa642d6d2e15c4fb1518e7fd0631894bb952d4187d32.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 392, + 70, + 441, + 119 + ], + "blocks": [ + { + "bbox": [ + 392, + 70, + 441, + 119 + ], + "lines": [ + { + "bbox": [ + 392, + 70, + 441, + 119 + ], + "spans": [ + { + "bbox": [ + 392, + 70, + 441, + 119 + ], + "type": "image", + "image_path": "b6dd9a6086f785bfc6b1ac4c27d8833b5f9c5b1ab95041790dcf663b8f50f846.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 441, + 70, + 490, + 119 + ], + "blocks": [ + { + "bbox": [ + 441, + 70, + 490, + 119 + ], + "lines": [ + { + "bbox": [ + 441, + 70, + 490, + 119 + ], + "spans": [ + { + "bbox": [ + 441, + 70, + 490, + 119 + ], + "type": "image", + "image_path": "28703eb187d242dc891d55db71d5efddeceab3e8980a5c1277a0ce6afedee26e.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 491, + 70, + 540, + 119 + ], + "blocks": [ + { + "bbox": [ + 491, + 70, + 540, + 119 + ], + "lines": [ + { + "bbox": [ + 491, + 70, + 540, + 119 + ], + "spans": [ + { + "bbox": [ + 491, + 70, + 540, + 119 + ], + "type": "image", + "image_path": "7614e0fc2876e9a275a20e70f7ed9a9f2b21e3d09bda32999fac3b9d7e2e8d29.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 51, + 129, + 100, + 178 + ], + "blocks": [ + { + "bbox": [ + 51, + 129, + 100, + 178 + ], + "lines": [ + { + "bbox": [ + 51, + 129, + 100, + 178 + ], + "spans": [ + { + "bbox": [ + 51, + 129, + 100, + 178 + ], + "type": "image", + "image_path": "6a3a3474601bb128b509ebe8645c0463326834db7d238ec593ff709a98712b77.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 57, + 178, + 93, + 186 + ], + "lines": [ + { + "bbox": [ + 57, + 178, + 93, + 186 + ], + "spans": [ + { + "bbox": [ + 57, + 178, + 93, + 186 + ], + "type": "text", + "content": "Felidae Pupil" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 100, + 178, + 149, + 186 + ], + "lines": [ + { + "bbox": [ + 100, + 178, + 149, + 186 + ], + "spans": [ + { + "bbox": [ + 100, + 178, + 149, + 186 + ], + "type": "inline_equation", + "content": "\\frac{3}{1} + u + {4q} = 1 + u + {uq}" + }, + { + "bbox": [ + 100, + 178, + 149, + 186 + ], + "type": "text", + "content": " dH" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 100, + 129, + 149, + 178 + ], + "blocks": [ + { + "bbox": [ + 100, + 129, + 149, + 178 + ], + "lines": [ + { + "bbox": [ + 100, + 129, + 149, + 178 + ], + "spans": [ + { + "bbox": [ + 100, + 129, + 149, + 178 + ], + "type": "image", + "image_path": "4d6861e565df844de868c6952bcbc37a40413ae51cdc59f8416074b34ef31c4e.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 149, + 178, + 198, + 186 + ], + "lines": [ + { + "bbox": [ + 149, + 178, + 198, + 186 + ], + "spans": [ + { + "bbox": [ + 149, + 178, + 198, + 186 + ], + "type": "inline_equation", + "content": "\\frac{3}{1} + u + {4q} = 1 + u + {uq}" + }, + { + "bbox": [ + 149, + 178, + 198, + 186 + ], + "type": "text", + "content": " dH" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 149, + 129, + 198, + 178 + ], + "blocks": [ + { + "bbox": [ + 149, + 129, + 198, + 178 + ], + "lines": [ + { + "bbox": [ + 149, + 129, + 198, + 178 + ], + "spans": [ + { + "bbox": [ + 149, + 129, + 198, + 178 + ], + "type": "image", + "image_path": "4d8a7695d44eb9f470f709190f28a206de0c1aab536a4b0437ec3bc92cf5bd11.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 198, + 178, + 246, + 186 + ], + "lines": [ + { + "bbox": [ + 198, + 178, + 246, + 186 + ], + "spans": [ + { + "bbox": [ + 198, + 178, + 246, + 186 + ], + "type": "inline_equation", + "content": "\\frac{3}{1} + u + {4q} = 1 + u + {uq}" + }, + { + "bbox": [ + 198, + 178, + 246, + 186 + ], + "type": "text", + "content": " dH" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 198, + 129, + 246, + 178 + ], + "blocks": [ + { + "bbox": [ + 198, + 129, + 246, + 178 + ], + "lines": [ + { + "bbox": [ + 198, + 129, + 246, + 178 + ], + "spans": [ + { + "bbox": [ + 198, + 129, + 246, + 178 + ], + "type": "image", + "image_path": "d057aae1bdbe74f1ef3ade2252fe5600868dacc63d10ffa80489d5108f2465cd.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 246, + 129, + 294, + 178 + ], + "blocks": [ + { + "bbox": [ + 246, + 129, + 294, + 178 + ], + "lines": [ + { + "bbox": [ + 246, + 129, + 294, + 178 + ], + "spans": [ + { + "bbox": [ + 246, + 129, + 294, + 178 + ], + "type": "image", + "image_path": "7f0ee7ecc4f01729ebc6c197b05497ab9e0d2c9ad1e1974529aaba7ff496842a.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 250, + 178, + 290, + 186 + ], + "lines": [ + { + "bbox": [ + 250, + 178, + 290, + 186 + ], + "spans": [ + { + "bbox": [ + 250, + 178, + 290, + 186 + ], + "type": "text", + "content": "Canidae Pupil" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 295, + 129, + 343, + 178 + ], + "blocks": [ + { + "bbox": [ + 295, + 129, + 343, + 178 + ], + "lines": [ + { + "bbox": [ + 295, + 129, + 343, + 178 + ], + "spans": [ + { + "bbox": [ + 295, + 129, + 343, + 178 + ], + "type": "image", + "image_path": "950ca79c0a21d73328cb6c2141ef3a183badae2ab7c780a9fdaf31d8eff33e9a.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + } + ], + "index": 24 + }, + { + "type": "image", + "bbox": [ + 343, + 129, + 392, + 178 + ], + "blocks": [ + { + "bbox": [ + 343, + 129, + 392, + 178 + ], + "lines": [ + { + "bbox": [ + 343, + 129, + 392, + 178 + ], + "spans": [ + { + "bbox": [ + 343, + 129, + 392, + 178 + ], + "type": "image", + "image_path": "4a3892a846507c7e850b5c42d892d2c92afc49f1ac4bfb0c9247b91e1e0e3782.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 392, + 129, + 441, + 178 + ], + "blocks": [ + { + "bbox": [ + 392, + 129, + 441, + 178 + ], + "lines": [ + { + "bbox": [ + 392, + 129, + 441, + 178 + ], + "spans": [ + { + "bbox": [ + 392, + 129, + 441, + 178 + ], + "type": "image", + "image_path": "fff7032c14f3dd2afb30d5725689aeb7224f00b1835d24d2086789fd848604f4.jpg" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_body" + } + ], + "index": 26 + }, + { + "type": "image", + "bbox": [ + 441, + 129, + 490, + 178 + ], + "blocks": [ + { + "bbox": [ + 441, + 129, + 490, + 178 + ], + "lines": [ + { + "bbox": [ + 441, + 129, + 490, + 178 + ], + "spans": [ + { + "bbox": [ + 441, + 129, + 490, + 178 + ], + "type": "image", + "image_path": "f4ee52e5caa6487a1c008a41983ab01976ff119b38dd1cfdc50448e4f2284da8.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + } + ], + "index": 27 + }, + { + "type": "image", + "bbox": [ + 491, + 129, + 540, + 178 + ], + "blocks": [ + { + "bbox": [ + 491, + 129, + 540, + 178 + ], + "lines": [ + { + "bbox": [ + 491, + 129, + 540, + 178 + ], + "spans": [ + { + "bbox": [ + 491, + 129, + 540, + 178 + ], + "type": "image", + "image_path": "f01f0ae813036598b13cd750bae2ac75ae2ade78cfe29545fa92258a290b064b.jpg" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_body" + } + ], + "index": 28 + }, + { + "type": "image", + "bbox": [ + 47, + 220, + 170, + 304 + ], + "blocks": [ + { + "bbox": [ + 46, + 194, + 545, + 217 + ], + "lines": [ + { + "bbox": [ + 46, + 194, + 545, + 217 + ], + "spans": [ + { + "bbox": [ + 46, + 194, + 545, + 217 + ], + "type": "text", + "content": "Figure 3. Effect of progressively generating counterfactual images on (left) cat/dog classifier (0-Cat / 1-Dog), and (right) perceived age classifier (0-Senior / 1-Young). Model probability prediction during the process is attached at the top right corner." + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 47, + 220, + 170, + 304 + ], + "lines": [ + { + "bbox": [ + 47, + 220, + 170, + 304 + ], + "spans": [ + { + "bbox": [ + 47, + 220, + 170, + 304 + ], + "type": "image", + "image_path": "fa8994a05b33e168543b91bca0c47422be0966914496d537e87c679df5e3861f.jpg" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_body" + } + ], + "index": 30 + }, + { + "type": "image", + "bbox": [ + 171, + 220, + 293, + 304 + ], + "blocks": [ + { + "bbox": [ + 171, + 220, + 293, + 304 + ], + "lines": [ + { + "bbox": [ + 171, + 220, + 293, + 304 + ], + "spans": [ + { + "bbox": [ + 171, + 220, + 293, + 304 + ], + "type": "image", + "image_path": "727877f509e7e5d67c5b05d7d903a72ee905c64a7b8dbdfad753de413e2325fe.jpg" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_body" + } + ], + "index": 31 + }, + { + "type": "image", + "bbox": [ + 299, + 220, + 417, + 304 + ], + "blocks": [ + { + "bbox": [ + 299, + 220, + 417, + 304 + ], + "lines": [ + { + "bbox": [ + 299, + 220, + 417, + 304 + ], + "spans": [ + { + "bbox": [ + 299, + 220, + 417, + 304 + ], + "type": "image", + "image_path": "b77fcf9e992f6c5a9b18b69d5691eb5072855ddebb3e5b44b5ca39ecb67ab12b.jpg" + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_body" + } + ], + "index": 32 + }, + { + "type": "image", + "bbox": [ + 419, + 220, + 541, + 304 + ], + "blocks": [ + { + "bbox": [ + 419, + 220, + 541, + 304 + ], + "lines": [ + { + "bbox": [ + 419, + 220, + 541, + 304 + ], + "spans": [ + { + "bbox": [ + 419, + 220, + 541, + 304 + ], + "type": "image", + "image_path": "5600c547b9caf898459903860dd888f4bcb0a9050f29754b6b7151f43b1597df.jpg" + } + ] + } + ], + "index": 33, + "angle": 0, + "type": "image_body" + } + ], + "index": 33 + }, + { + "type": "image", + "bbox": [ + 47, + 316, + 170, + 396 + ], + "blocks": [ + { + "bbox": [ + 157, + 306, + 436, + 316 + ], + "lines": [ + { + "bbox": [ + 157, + 306, + 436, + 316 + ], + "spans": [ + { + "bbox": [ + 157, + 306, + 436, + 316 + ], + "type": "text", + "content": "(a) Model diagnosis histograms generated by ZOOM on four facial attribute classifiers." + } + ] + } + ], + "index": 34, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 47, + 316, + 170, + 396 + ], + "lines": [ + { + "bbox": [ + 47, + 316, + 170, + 396 + ], + "spans": [ + { + "bbox": [ + 47, + 316, + 170, + 396 + ], + "type": "image", + "image_path": "1847122c4f8c41b0eb8a40cf92eecf152e7305a956b9ffb3b71751c5c69b6fd7.jpg" + } + ] + } + ], + "index": 35, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 110, + 399, + 483, + 410 + ], + "lines": [ + { + "bbox": [ + 110, + 399, + 483, + 410 + ], + "spans": [ + { + "bbox": [ + 110, + 399, + 483, + 410 + ], + "type": "text", + "content": "(b) Model diagnosis histograms generated by ZOOM on four classifiers trained on manually-crafted imbalance data." + } + ] + } + ], + "index": 39, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 45, + 417, + 545, + 440 + ], + "lines": [ + { + "bbox": [ + 45, + 417, + 545, + 440 + ], + "spans": [ + { + "bbox": [ + 45, + 417, + 545, + 440 + ], + "type": "text", + "content": "Figure 4. Model diagnosis histograms generated by ZOOM. The vertical axis values reflect the attribute sensitivities calculated by averaging the model probability change over all sampled images. The horizontal axis is the attribute space input by user." + } + ] + } + ], + "index": 40, + "angle": 0, + "type": "image_caption" + } + ], + "index": 35 + }, + { + "type": "image", + "bbox": [ + 171, + 316, + 293, + 396 + ], + "blocks": [ + { + "bbox": [ + 171, + 316, + 293, + 396 + ], + "lines": [ + { + "bbox": [ + 171, + 316, + 293, + 396 + ], + "spans": [ + { + "bbox": [ + 171, + 316, + 293, + 396 + ], + "type": "image", + "image_path": "7aefa2cf30afb6773a8d83624c5cc365680c7a839dfb50d5f30e1c4037506cf3.jpg" + } + ] + } + ], + "index": 36, + "angle": 0, + "type": "image_body" + } + ], + "index": 36 + }, + { + "type": "image", + "bbox": [ + 295, + 316, + 417, + 396 + ], + "blocks": [ + { + "bbox": [ + 295, + 316, + 417, + 396 + ], + "lines": [ + { + "bbox": [ + 295, + 316, + 417, + 396 + ], + "spans": [ + { + "bbox": [ + 295, + 316, + 417, + 396 + ], + "type": "image", + "image_path": "3b29ecd3f3d5ddb487e1fec77c9ab7e51b89f73310a02eefdbed083bdd755f27.jpg" + } + ] + } + ], + "index": 37, + "angle": 0, + "type": "image_body" + } + ], + "index": 37 + }, + { + "type": "image", + "bbox": [ + 419, + 316, + 541, + 396 + ], + "blocks": [ + { + "bbox": [ + 419, + 316, + 541, + 396 + ], + "lines": [ + { + "bbox": [ + 419, + 316, + 541, + 396 + ], + "spans": [ + { + "bbox": [ + 419, + 316, + 541, + 396 + ], + "type": "image", + "image_path": "e39ef17850764b0a9307a0e37db6c248af616552d7b6550dc1ceacd5997a6f2f.jpg" + } + ] + } + ], + "index": 38, + "angle": 0, + "type": "image_body" + } + ], + "index": 38 + }, + { + "bbox": [ + 46, + 451, + 172, + 464 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 451, + 172, + 464 + ], + "spans": [ + { + "bbox": [ + 46, + 451, + 172, + 464 + ], + "type": "text", + "content": "4. Experimental Results" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 46, + 471, + 287, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 471, + 287, + 555 + ], + "spans": [ + { + "bbox": [ + 46, + 471, + 287, + 555 + ], + "type": "text", + "content": "This section describes the experimental validations on the effectiveness and reliability of ZOOM. First, we describe the model setup in Sec. 4.1. Sec. 4.2 and Sec. 4.3 visualize and validate the model diagnosis results for the single-attribute setting. In Sec. 4.4, we show results on synthesized multiple-attribute counterfactual images and apply them to counterfactual training." + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 46, + 561, + 129, + 574 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 561, + 129, + 574 + ], + "spans": [ + { + "bbox": [ + 46, + 561, + 129, + 574 + ], + "type": "text", + "content": "4.1. Model Setup" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 46, + 579, + 287, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 579, + 287, + 639 + ], + "spans": [ + { + "bbox": [ + 46, + 579, + 287, + 639 + ], + "type": "text", + "content": "Pre-trained models: We used Stylegan2-ADA [11] pretrained on FFHQ [12] and AFHQ [1] as our base generative networks, and the pre-trained CLIP model [24] which is parameterized by ViT-B/32. We followed StyleCLIP [21] setups to compute the channel relevance matrices " + }, + { + "bbox": [ + 46, + 579, + 287, + 639 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 46, + 579, + 287, + 639 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 46, + 639, + 287, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 639, + 287, + 687 + ], + "spans": [ + { + "bbox": [ + 46, + 639, + 287, + 687 + ], + "type": "text", + "content": "Target models: Our classifier models are ResNet50 with single fully-connected head initialized by TorchVision1. In training the binary classifiers, we use the Adam optimizer with learning rate 0.001 and batch size 128. We train binary" + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 305, + 453, + 545, + 500 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 453, + 545, + 500 + ], + "spans": [ + { + "bbox": [ + 305, + 453, + 545, + 500 + ], + "type": "text", + "content": "classifiers for Eyeglasses, Perceived Gender, Mustache, Perceived Age attributes on CelebA and for cat/dog classification on AFHQ. For the 98-keypoint detectors, we used the HR-Net architecture [31] on WFLW [33]." + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 305, + 512, + 522, + 525 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 512, + 522, + 525 + ], + "spans": [ + { + "bbox": [ + 305, + 512, + 522, + 525 + ], + "type": "text", + "content": "4.2. Visual Model Diagnosis: Single-Attribute" + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 304, + 533, + 545, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 533, + 545, + 604 + ], + "spans": [ + { + "bbox": [ + 304, + 533, + 545, + 604 + ], + "type": "text", + "content": "Understanding where deep learning model fails is an essential step towards building trustworthy models. Our proposed work allows us to generate counterfactual images (Sec. 3.3) and provide insights on sensitivities of the target model (Sec. 3.4). This section visualizes the counterfactual images in which only one attribute is modified at a time." + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 304, + 605, + 545, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 605, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 304, + 605, + 545, + 712 + ], + "type": "text", + "content": "Fig. 3 shows the single-attribute counterfactual images. Interestingly (but not unexpectedly), we can see that reducing the hair length or joyfulness causes the age classifier more likely to label the face to an older person. Note that our approach is extendable to multiple domains, as we change the cat-like pupil to dog-like, the dog-cat classification tends towards the dog. Using the counterfactual images, we can conduct model diagnosis with the method mentioned in Sec. 3.4, on which attributes the model is sen" + } + ] + } + ], + "index": 50 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 46, + 693, + 280, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 693, + 280, + 712 + ], + "spans": [ + { + "bbox": [ + 46, + 693, + 280, + 712 + ], + "type": "text", + "content": "1https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/" + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "11635" + } + ] + } + ], + "index": 51 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 108 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 108 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 108 + ], + "type": "text", + "content": "sitive to. In the histogram generated in model diagnosis, a higher bar means the model is more sensitive toward the corresponding attribute." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 109, + 288, + 276 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 109, + 288, + 276 + ], + "spans": [ + { + "bbox": [ + 46, + 109, + 288, + 276 + ], + "type": "text", + "content": "Fig. 4a shows the model diagnosis histograms on regularly-trained classifiers. For instance, the cat/dog classifier histogram shows outstanding sensitivity to green eyes and vertical pupil. The analysis is intuitive since these are cat-biased traits rarely observed in dog photos. Moreover, the histogram of eyeglasses classifier shows that the mutation on bushy eyebrows is more influential for flipping the model prediction. It potentially reveals the positional correlation between eyeglasses and bushy eyebrows. The advantage of single-attribute model diagnosis is that the score of each attribute in the histogram are independent from other attributes, enabling unambiguous understanding of exact semantics that make the model fail. Diagnosis results for additional target models can be found in Appendix B." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 284, + 243, + 297 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 284, + 243, + 297 + ], + "spans": [ + { + "bbox": [ + 47, + 284, + 243, + 297 + ], + "type": "text", + "content": "4.3. Validation of Visual Model Diagnosis" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 303, + 287, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 303, + 287, + 399 + ], + "spans": [ + { + "bbox": [ + 46, + 303, + 287, + 399 + ], + "type": "text", + "content": "Evaluating whether our zero-shot sensitivity histograms (Fig. 4) explain the true vulnerability is a difficult task, since we do not have access to a sufficiently large and balanced test set fully annotated in an open-vocabulary setting. To verify the performance, we create synthetically imbalanced cases where the model bias is known. We then compare our results with a supervised diagnosis setting [17]. In addition, we will validate the decoupling of the attributes by CLIP." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 409, + 213, + 421 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 409, + 213, + 421 + ], + "spans": [ + { + "bbox": [ + 47, + 409, + 213, + 421 + ], + "type": "text", + "content": "4.3.1 Creating imbalanced classifiers" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 426, + 287, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 426, + 287, + 664 + ], + "spans": [ + { + "bbox": [ + 46, + 426, + 287, + 664 + ], + "type": "text", + "content": "In order to evaluate whether our sensitivity histogram is correct, we train classifiers that are highly imbalanced towards a known attribute and see whether ZOOM can detect the sensitivity w.r.t the attribute. For instance, when training the perceived-age classifier (binarized as Young in CelebA), we created a dataset on which the trained classifier is strongly sensitive to Bangs (hair over forehead). The custom dataset is a CelebA training subset that consists of 20, 200 images. More specifically, there are 10,000 images that have both young people that have bangs, represented as (1, 1), and 10,000 images of people that are not young and have no bangs, represented as (0, 0). The remaining combinations of (1, 0) and (0, 1) have only 100 images. With this imbalanced dataset, bangs is the attribute that dominantly correlates with whether the person is young, and hence the perceived-age classifier would be highly sensitive towards bangs. See Fig. 5 (the right histograms) for an illustration of the sensitivity histogram computed by our method for the case of an age classifier with bangs (top) and lipstick (bottom) being imbalanced." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 665, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 665, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 665, + 287, + 713 + ], + "type": "text", + "content": "We trained multiple imbalanced classifiers with this methodology, and visualize the model diagnosis histograms of these imbalanced classifiers in Fig. 4b. We can observe that the ZOOM histograms successfully detect the" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 306, + 72, + 544, + 267 + ], + "blocks": [ + { + "bbox": [ + 306, + 72, + 544, + 267 + ], + "lines": [ + { + "bbox": [ + 306, + 72, + 544, + 267 + ], + "spans": [ + { + "bbox": [ + 306, + 72, + 544, + 267 + ], + "type": "image", + "image_path": "03c482988bb95c3fb2b913e7d04e7753f129604886046e43297446927fd39540.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 273, + 545, + 296 + ], + "lines": [ + { + "bbox": [ + 305, + 273, + 545, + 296 + ], + "spans": [ + { + "bbox": [ + 305, + 273, + 545, + 296 + ], + "type": "text", + "content": "Figure 5. The sensitivity of the age classifier is evaluated with ZOOM (right) and AttGAN (left), achieving comparable results." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 310, + 304, + 423, + 411 + ], + "blocks": [ + { + "bbox": [ + 310, + 304, + 423, + 411 + ], + "lines": [ + { + "bbox": [ + 310, + 304, + 423, + 411 + ], + "spans": [ + { + "bbox": [ + 310, + 304, + 423, + 411 + ], + "type": "image", + "image_path": "868104e74e862e5874b0fc8bdea2db78e9c3a4eddc9dca8eb7eac1b667dd688e.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 329, + 414, + 403, + 423 + ], + "lines": [ + { + "bbox": [ + 329, + 414, + 403, + 423 + ], + "spans": [ + { + "bbox": [ + 329, + 414, + 403, + 423 + ], + "type": "text", + "content": "(a) Mustache classifier" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 305, + 427, + 545, + 460 + ], + "lines": [ + { + "bbox": [ + 305, + 427, + 545, + 460 + ], + "spans": [ + { + "bbox": [ + 305, + 427, + 545, + 460 + ], + "type": "text", + "content": "Figure 6. Confusion matrix of CLIP score variation (vertical axis) when perturbing attributes (horizontal axis). This shows that attributes in ZOOM are highly decoupled." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 427, + 304, + 542, + 411 + ], + "blocks": [ + { + "bbox": [ + 427, + 304, + 542, + 411 + ], + "lines": [ + { + "bbox": [ + 427, + 304, + 542, + 411 + ], + "spans": [ + { + "bbox": [ + 427, + 304, + 542, + 411 + ], + "type": "image", + "image_path": "dcbaf559d890ab0d776db1dfd7c533b9d30df76634298628b22e1c7d2c277833.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 440, + 414, + 527, + 423 + ], + "lines": [ + { + "bbox": [ + 440, + 414, + 527, + 423 + ], + "spans": [ + { + "bbox": [ + 440, + 414, + 527, + 423 + ], + "type": "text", + "content": "(b) Perceived age classifier" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 472, + 545, + 497 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 472, + 545, + 497 + ], + "spans": [ + { + "bbox": [ + 305, + 472, + 545, + 497 + ], + "type": "text", + "content": "synthetically-made bias, which are shown as the highest bars in histograms. See the caption for more information." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 506, + 503, + 517 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 506, + 503, + 517 + ], + "spans": [ + { + "bbox": [ + 306, + 506, + 503, + 517 + ], + "type": "text", + "content": "4.3.2 Comparison with supervised diagnosis" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 521, + 545, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 521, + 545, + 677 + ], + "spans": [ + { + "bbox": [ + 304, + 521, + 545, + 677 + ], + "type": "text", + "content": "We also validated our histogram by comparing it with the case in which we have access to a generative model that has been explicitly trained to disentangle attributes. We follow the work on [17] and used AttGAN [6] trained on the CelebA training set over 15 attributes2. After the training converged, we performed adversarial learning in the attribute space of AttGAN and create a sensitivity histogram using the same approach as Sec. 3.4. Fig. 5 shows the result of this method on the perceived-age classifier which is made biased towards bangs. As anticipated, the AttGAN histogram (left) corroborates the histogram derived from our method (right). Interestingly, unlike ZOOM, AttGAN show less sensitivity to remaining attributes. This is likely" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 306, + 683, + 545, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 683, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 683, + 545, + 713 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 306, + 683, + 545, + 713 + ], + "type": "text", + "content": "Bald, Bangs, Black_Hair, Blond_Hair, Brown_Hair, Bushy_Eyesbrows, Eyeglasses, Male, Mouth_Slightly_Open, Mustache, No_Board, Pale_Skin, Young, Smiling, Wearing_Lipstick." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "11636" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 60, + 81, + 108, + 130 + ], + "blocks": [ + { + "bbox": [ + 50, + 94, + 59, + 177 + ], + "lines": [ + { + "bbox": [ + 50, + 94, + 59, + 177 + ], + "spans": [ + { + "bbox": [ + 50, + 94, + 59, + 177 + ], + "type": "text", + "content": "Counterfactual Original" + } + ] + } + ], + "index": 3, + "angle": 270, + "type": "image_caption" + }, + { + "bbox": [ + 60, + 81, + 108, + 130 + ], + "lines": [ + { + "bbox": [ + 60, + 81, + 108, + 130 + ], + "spans": [ + { + "bbox": [ + 60, + 81, + 108, + 130 + ], + "type": "image", + "image_path": "971f72c88bb26074e4f23622c9671dc69ad2c5c8ef52b1de0e3abff7f432ed84.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 60, + 131, + 108, + 177 + ], + "blocks": [ + { + "bbox": [ + 60, + 131, + 108, + 177 + ], + "lines": [ + { + "bbox": [ + 60, + 131, + 108, + 177 + ], + "spans": [ + { + "bbox": [ + 60, + 131, + 108, + 177 + ], + "type": "image", + "image_path": "59a77b820be44c6ef95e06c79ed435d4aea09f30e59a51a44334ce5918612f50.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 187, + 525, + 198 + ], + "lines": [ + { + "bbox": [ + 67, + 187, + 525, + 198 + ], + "spans": [ + { + "bbox": [ + 67, + 187, + 525, + 198 + ], + "type": "text", + "content": "Figure 7. Multi-attribute counterfactual in faces. The model probability is documented in the upper right corner of each image." + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 108, + 81, + 156, + 130 + ], + "blocks": [ + { + "bbox": [ + 108, + 81, + 156, + 130 + ], + "lines": [ + { + "bbox": [ + 108, + 81, + 156, + 130 + ], + "spans": [ + { + "bbox": [ + 108, + 81, + 156, + 130 + ], + "type": "image", + "image_path": "f9405d19790d274a6d1931829f8e4e575bd3f7cc28b5941c5c136c6e6fe50c9f.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 108, + 131, + 156, + 177 + ], + "blocks": [ + { + "bbox": [ + 108, + 131, + 156, + 177 + ], + "lines": [ + { + "bbox": [ + 108, + 131, + 156, + 177 + ], + "spans": [ + { + "bbox": [ + 108, + 131, + 156, + 177 + ], + "type": "image", + "image_path": "0b74918e51861880fb6780c140d8792c3dd7328306aa4b189e381e22a0959b59.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 157, + 81, + 204, + 130 + ], + "blocks": [ + { + "bbox": [ + 157, + 81, + 204, + 130 + ], + "lines": [ + { + "bbox": [ + 157, + 81, + 204, + 130 + ], + "spans": [ + { + "bbox": [ + 157, + 81, + 204, + 130 + ], + "type": "image", + "image_path": "62d9231115f02f5bfbba46a4a4e0d5568e7376856e2196457f2c2cadb092c30d.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 157, + 131, + 204, + 177 + ], + "blocks": [ + { + "bbox": [ + 157, + 131, + 204, + 177 + ], + "lines": [ + { + "bbox": [ + 157, + 131, + 204, + 177 + ], + "spans": [ + { + "bbox": [ + 157, + 131, + 204, + 177 + ], + "type": "image", + "image_path": "95c9e54324f64ecb0c2b49a24c3925de7336d3b45bf0e60163eb774073f3797e.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 205, + 81, + 252, + 130 + ], + "blocks": [ + { + "bbox": [ + 205, + 81, + 252, + 130 + ], + "lines": [ + { + "bbox": [ + 205, + 81, + 252, + 130 + ], + "spans": [ + { + "bbox": [ + 205, + 81, + 252, + 130 + ], + "type": "image", + "image_path": "35b5f6e20e6167a44791b6cb91234ca74c030f5e4c36171689c395c8065cd4e6.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 205, + 131, + 252, + 177 + ], + "blocks": [ + { + "bbox": [ + 205, + 131, + 252, + 177 + ], + "lines": [ + { + "bbox": [ + 205, + 131, + 252, + 177 + ], + "spans": [ + { + "bbox": [ + 205, + 131, + 252, + 177 + ], + "type": "image", + "image_path": "97e45ff9d147e87912d810e4d276a4c527a7d1ad64ea900907f4c91ed774a842.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 253, + 81, + 300, + 130 + ], + "blocks": [ + { + "bbox": [ + 253, + 81, + 300, + 130 + ], + "lines": [ + { + "bbox": [ + 253, + 81, + 300, + 130 + ], + "spans": [ + { + "bbox": [ + 253, + 81, + 300, + 130 + ], + "type": "image", + "image_path": "da9051d7fff8dc80e0c62ad7ee8bb1f22aee7c66ceee600246b30ecd46b68ec9.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 253, + 131, + 300, + 177 + ], + "blocks": [ + { + "bbox": [ + 253, + 131, + 300, + 177 + ], + "lines": [ + { + "bbox": [ + 253, + 131, + 300, + 177 + ], + "spans": [ + { + "bbox": [ + 253, + 131, + 300, + 177 + ], + "type": "image", + "image_path": "fb66c5f6fd296b219f4e39383cf0d5e8611263f518e77b88dcc07bac8524b5fb.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 301, + 81, + 349, + 130 + ], + "blocks": [ + { + "bbox": [ + 301, + 81, + 349, + 130 + ], + "lines": [ + { + "bbox": [ + 301, + 81, + 349, + 130 + ], + "spans": [ + { + "bbox": [ + 301, + 81, + 349, + 130 + ], + "type": "image", + "image_path": "64ca890b2eb603a0c45fbe34384d65cb3f74b4fb1c434434a634b531abed726f.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 301, + 131, + 349, + 177 + ], + "blocks": [ + { + "bbox": [ + 301, + 131, + 349, + 177 + ], + "lines": [ + { + "bbox": [ + 301, + 131, + 349, + 177 + ], + "spans": [ + { + "bbox": [ + 301, + 131, + 349, + 177 + ], + "type": "image", + "image_path": "75a69bd63b6bb345d672a5f742d694288aff8578f206e84032f24c69b9a9b894.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 350, + 81, + 398, + 130 + ], + "blocks": [ + { + "bbox": [ + 350, + 81, + 398, + 130 + ], + "lines": [ + { + "bbox": [ + 350, + 81, + 398, + 130 + ], + "spans": [ + { + "bbox": [ + 350, + 81, + 398, + 130 + ], + "type": "image", + "image_path": "ca61c210042de2fc0e449802d99fb5d4d50854952d4b0d1c3b744eeb1bc247eb.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 350, + 131, + 398, + 177 + ], + "blocks": [ + { + "bbox": [ + 350, + 131, + 398, + 177 + ], + "lines": [ + { + "bbox": [ + 350, + 131, + 398, + 177 + ], + "spans": [ + { + "bbox": [ + 350, + 131, + 398, + 177 + ], + "type": "image", + "image_path": "946eed3c0bbd26dbe64a2cfba69b326b2847fd1f8cba97cbe6ac25e8917ff046.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 399, + 81, + 447, + 130 + ], + "blocks": [ + { + "bbox": [ + 399, + 81, + 447, + 130 + ], + "lines": [ + { + "bbox": [ + 399, + 81, + 447, + 130 + ], + "spans": [ + { + "bbox": [ + 399, + 81, + 447, + 130 + ], + "type": "image", + "image_path": "d123ff6aac734384ccd47ae0c44a41de25a370497a5d42d8c5f027dda49c9db5.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 399, + 131, + 447, + 177 + ], + "blocks": [ + { + "bbox": [ + 399, + 131, + 447, + 177 + ], + "lines": [ + { + "bbox": [ + 399, + 131, + 447, + 177 + ], + "spans": [ + { + "bbox": [ + 399, + 131, + 447, + 177 + ], + "type": "image", + "image_path": "0ef9843e612c9e58dda93b42683eadb3af0fd3f8500c8a006c5c82c974bc2501.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 448, + 81, + 494, + 130 + ], + "blocks": [ + { + "bbox": [ + 448, + 81, + 494, + 130 + ], + "lines": [ + { + "bbox": [ + 448, + 81, + 494, + 130 + ], + "spans": [ + { + "bbox": [ + 448, + 81, + 494, + 130 + ], + "type": "image", + "image_path": "117f9d63d2885bbdb0be3707734cbab387bc97bca6cd5b6a8ae619f0b9996fa3.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 448, + 131, + 494, + 177 + ], + "blocks": [ + { + "bbox": [ + 448, + 131, + 494, + 177 + ], + "lines": [ + { + "bbox": [ + 448, + 131, + 494, + 177 + ], + "spans": [ + { + "bbox": [ + 448, + 131, + 494, + 177 + ], + "type": "image", + "image_path": "f48d716b2acde35b9f94c46749523a8fb01e8260331bb184c25132e442f4c22e.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 496, + 81, + 543, + 130 + ], + "blocks": [ + { + "bbox": [ + 496, + 81, + 543, + 130 + ], + "lines": [ + { + "bbox": [ + 496, + 81, + 543, + 130 + ], + "spans": [ + { + "bbox": [ + 496, + 81, + 543, + 130 + ], + "type": "image", + "image_path": "c909bca5f6f918435a78f23e7f3cbf31b341cf4d888a1eecd8568c29216efa66.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 496, + 131, + 543, + 177 + ], + "blocks": [ + { + "bbox": [ + 496, + 131, + 543, + 177 + ], + "lines": [ + { + "bbox": [ + 496, + 131, + 543, + 177 + ], + "spans": [ + { + "bbox": [ + 496, + 131, + 543, + 177 + ], + "type": "image", + "image_path": "2e4a4d43d388288a2a74b5386a43481c22c06d07edd49bac51178d01e8a5a1bf.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + } + ], + "index": 23 + }, + { + "bbox": [ + 46, + 209, + 286, + 292 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 209, + 286, + 292 + ], + "spans": [ + { + "bbox": [ + 46, + 209, + 286, + 292 + ], + "type": "text", + "content": "because AttGAN has a latent space learned in a supervised manner and hence attributes are better disentangled than with StyleGAN. Note that AttGAN is trained with a fixed set of attributes; if a new attribute of interest is introduced, the dataset needs to be re-labeled and AttGAN retrained. ZOOM, however, merely calls for the addition of a new text prompt. More results in Appendix B." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 47, + 301, + 253, + 312 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 301, + 253, + 312 + ], + "spans": [ + { + "bbox": [ + 47, + 301, + 253, + 312 + ], + "type": "text", + "content": "4.3.3 Measuring disentanglement of attributes" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 46, + 317, + 286, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 317, + 286, + 376 + ], + "spans": [ + { + "bbox": [ + 46, + 317, + 286, + 376 + ], + "type": "text", + "content": "Previous works demonstrated that the StyleGAN's latent space can be entangled [2, 27], adding undesired dependencies when searching single-attribute counterfactuals. This section verifies that our framework can disentangle the attributes and mostly edit the desirable attributes." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 46, + 377, + 286, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 377, + 286, + 567 + ], + "spans": [ + { + "bbox": [ + 46, + 377, + 286, + 567 + ], + "type": "text", + "content": "We use CLIP as a super annotator to measure attribute changes during single-attribute modifications. For 1,000 images, we record the attribute change after performing adversarial learning in each attribute, and average the attribute score change. Fig. 6 shows the confusion matrix during single-attribute counterfactual synthesis. The horizontal axis is the attribute being edited during the optimization, and the vertical axis represents the CLIP prediction changed by the process. For instance, the first column of Fig. 6a is generated when we optimize over bangs for the mustache classifier. We record the CLIP prediction variation. It clearly shows that bangs is the dominant attribute changing during the optimization. From the main diagonal of matrices, it is evident that the ZOOM mostly perturbs the attribute of interest. The results indicate reasonable disentanglement among attributes." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 47, + 575, + 264, + 587 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 575, + 264, + 587 + ], + "spans": [ + { + "bbox": [ + 47, + 575, + 264, + 587 + ], + "type": "text", + "content": "4.4. Visual Model Diagnosis: Multi-Attributes" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 46, + 594, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 594, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 594, + 286, + 713 + ], + "type": "text", + "content": "In the previous sections, we have visualized and validated single-attribute model diagnosis histograms and counterfactual images. In this section, we will assess ZOOM's ability to produce counterfactual images by concurrently exploring multiple attributes within " + }, + { + "bbox": [ + 46, + 594, + 286, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 46, + 594, + 286, + 713 + ], + "type": "text", + "content": ", the domain of user-defined attributes. The approach conducts multi-attribute counterfactual searches across various edit directions, identifying distinct semantic combinations that result in the target model's failure. By doing so, we can effectively create more powerful counterfactuals images (see Fig. 9)." + } + ] + } + ], + "index": 30 + }, + { + "type": "image", + "bbox": [ + 308, + 215, + 353, + 251 + ], + "blocks": [ + { + "bbox": [ + 308, + 215, + 353, + 251 + ], + "lines": [ + { + "bbox": [ + 308, + 215, + 353, + 251 + ], + "spans": [ + { + "bbox": [ + 308, + 215, + 353, + 251 + ], + "type": "image", + "image_path": "d63f194e849545ee4d327760565ffb442da574671e4f852701c57f64f75ccd06.jpg" + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_body" + } + ], + "index": 32 + }, + { + "type": "image", + "bbox": [ + 308, + 251, + 353, + 289 + ], + "blocks": [ + { + "bbox": [ + 308, + 251, + 353, + 289 + ], + "lines": [ + { + "bbox": [ + 308, + 251, + 353, + 289 + ], + "spans": [ + { + "bbox": [ + 308, + 251, + 353, + 289 + ], + "type": "image", + "image_path": "2a8b19bda7e3c098f3b7515ecb4c437e82aff219568f13b46ca980369ed954b0.jpg" + } + ] + } + ], + "index": 33, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 306, + 298, + 545, + 320 + ], + "lines": [ + { + "bbox": [ + 306, + 298, + 545, + 320 + ], + "spans": [ + { + "bbox": [ + 306, + 298, + 545, + 320 + ], + "type": "text", + "content": "Figure 8. Multi-attribute counterfactual on Cat/Dog classifier. The number in each image is the predicted probability of being a dog." + } + ] + } + ], + "index": 44, + "angle": 0, + "type": "image_caption" + } + ], + "index": 33 + }, + { + "type": "image", + "bbox": [ + 354, + 215, + 392, + 251 + ], + "blocks": [ + { + "bbox": [ + 386, + 206, + 474, + 214 + ], + "lines": [ + { + "bbox": [ + 386, + 206, + 474, + 214 + ], + "spans": [ + { + "bbox": [ + 386, + 206, + 474, + 214 + ], + "type": "text", + "content": "Cat / Dog Classifier (0-Cat / 1-Dog)" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 354, + 215, + 392, + 251 + ], + "lines": [ + { + "bbox": [ + 354, + 215, + 392, + 251 + ], + "spans": [ + { + "bbox": [ + 354, + 215, + 392, + 251 + ], + "type": "image", + "image_path": "97575becca7c2b0a58f05e7bd3efea1ce8f805407f9a9a8bc5b0b8fa133de821.jpg" + } + ] + } + ], + "index": 34, + "angle": 0, + "type": "image_body" + } + ], + "index": 34 + }, + { + "type": "image", + "bbox": [ + 354, + 252, + 392, + 289 + ], + "blocks": [ + { + "bbox": [ + 354, + 252, + 392, + 289 + ], + "lines": [ + { + "bbox": [ + 354, + 252, + 392, + 289 + ], + "spans": [ + { + "bbox": [ + 354, + 252, + 392, + 289 + ], + "type": "image", + "image_path": "9f015960384356c923f938a96ad155d7920e2f40848154f6927b5ca61d80c9da.jpg" + } + ] + } + ], + "index": 35, + "angle": 0, + "type": "image_body" + } + ], + "index": 35 + }, + { + "type": "image", + "bbox": [ + 392, + 215, + 430, + 251 + ], + "blocks": [ + { + "bbox": [ + 392, + 215, + 430, + 251 + ], + "lines": [ + { + "bbox": [ + 392, + 215, + 430, + 251 + ], + "spans": [ + { + "bbox": [ + 392, + 215, + 430, + 251 + ], + "type": "image", + "image_path": "9dc79a7da6f29d71c72a5adf20673275f04537bf9eea485fde0a25d54eae61fd.jpg" + } + ] + } + ], + "index": 36, + "angle": 0, + "type": "image_body" + } + ], + "index": 36 + }, + { + "type": "image", + "bbox": [ + 392, + 252, + 430, + 289 + ], + "blocks": [ + { + "bbox": [ + 392, + 252, + 430, + 289 + ], + "lines": [ + { + "bbox": [ + 392, + 252, + 430, + 289 + ], + "spans": [ + { + "bbox": [ + 392, + 252, + 430, + 289 + ], + "type": "image", + "image_path": "b0ef6e22515f9fbe3fe524912b72ea323ef911ac09d0c03ab0e227513c25dffc.jpg" + } + ] + } + ], + "index": 37, + "angle": 0, + "type": "image_body" + } + ], + "index": 37 + }, + { + "type": "image", + "bbox": [ + 430, + 215, + 468, + 251 + ], + "blocks": [ + { + "bbox": [ + 430, + 215, + 468, + 251 + ], + "lines": [ + { + "bbox": [ + 430, + 215, + 468, + 251 + ], + "spans": [ + { + "bbox": [ + 430, + 215, + 468, + 251 + ], + "type": "image", + "image_path": "eacf43272b02d9d8200813f9d64795ddfee3402d5fde9196f88ee01631e050d5.jpg" + } + ] + } + ], + "index": 38, + "angle": 0, + "type": "image_body" + } + ], + "index": 38 + }, + { + "type": "image", + "bbox": [ + 430, + 252, + 468, + 289 + ], + "blocks": [ + { + "bbox": [ + 430, + 252, + 468, + 289 + ], + "lines": [ + { + "bbox": [ + 430, + 252, + 468, + 289 + ], + "spans": [ + { + "bbox": [ + 430, + 252, + 468, + 289 + ], + "type": "image", + "image_path": "00c3b77821922449ec954cf2a408c8a712da33871a82ab4f911ece894aeb4d58.jpg" + } + ] + } + ], + "index": 39, + "angle": 0, + "type": "image_body" + } + ], + "index": 39 + }, + { + "type": "image", + "bbox": [ + 468, + 215, + 506, + 251 + ], + "blocks": [ + { + "bbox": [ + 468, + 215, + 506, + 251 + ], + "lines": [ + { + "bbox": [ + 468, + 215, + 506, + 251 + ], + "spans": [ + { + "bbox": [ + 468, + 215, + 506, + 251 + ], + "type": "image", + "image_path": "73ef0f2cf59bc1d4b27d02b70df889a8d0b602bd88ce7e79c4b3b4e6e43cb0e9.jpg" + } + ] + } + ], + "index": 40, + "angle": 0, + "type": "image_body" + } + ], + "index": 40 + }, + { + "type": "image", + "bbox": [ + 468, + 252, + 506, + 289 + ], + "blocks": [ + { + "bbox": [ + 468, + 252, + 506, + 289 + ], + "lines": [ + { + "bbox": [ + 468, + 252, + 506, + 289 + ], + "spans": [ + { + "bbox": [ + 468, + 252, + 506, + 289 + ], + "type": "image", + "image_path": "44bba462e0e42241c95319ec2b01d11c8a422c41b0ce523d8f3136a9bc721633.jpg" + } + ] + } + ], + "index": 41, + "angle": 0, + "type": "image_body" + } + ], + "index": 41 + }, + { + "type": "image", + "bbox": [ + 507, + 215, + 543, + 251 + ], + "blocks": [ + { + "bbox": [ + 507, + 215, + 543, + 251 + ], + "lines": [ + { + "bbox": [ + 507, + 215, + 543, + 251 + ], + "spans": [ + { + "bbox": [ + 507, + 215, + 543, + 251 + ], + "type": "image", + "image_path": "93e5220bf68cbf225c272fed33def41a45c1fad35b0f715590fc34da455c23ed.jpg" + } + ] + } + ], + "index": 42, + "angle": 0, + "type": "image_body" + } + ], + "index": 42 + }, + { + "type": "image", + "bbox": [ + 507, + 252, + 543, + 289 + ], + "blocks": [ + { + "bbox": [ + 507, + 252, + 543, + 289 + ], + "lines": [ + { + "bbox": [ + 507, + 252, + 543, + 289 + ], + "spans": [ + { + "bbox": [ + 507, + 252, + 543, + 289 + ], + "type": "image", + "image_path": "b8335552498ed460f4d22b2fd05b6d4609a5b26d9c180d8d4bc002a607f780d4.jpg" + } + ] + } + ], + "index": 43, + "angle": 0, + "type": "image_body" + } + ], + "index": 43 + }, + { + "type": "image", + "bbox": [ + 308, + 332, + 355, + 378 + ], + "blocks": [ + { + "bbox": [ + 308, + 332, + 355, + 378 + ], + "lines": [ + { + "bbox": [ + 308, + 332, + 355, + 378 + ], + "spans": [ + { + "bbox": [ + 308, + 332, + 355, + 378 + ], + "type": "image", + "image_path": "5d137f7d3aa9d683b7868c27d6b50d55214821f413f2a649b34ec317e8ccc450.jpg" + } + ] + } + ], + "index": 45, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 310, + 380, + 353, + 386 + ], + "lines": [ + { + "bbox": [ + 310, + 380, + 353, + 386 + ], + "spans": [ + { + "bbox": [ + 310, + 380, + 353, + 386 + ], + "type": "text", + "content": "Original Reference" + } + ] + } + ], + "index": 46, + "angle": 0, + "type": "image_caption" + } + ], + "index": 45 + }, + { + "type": "image", + "bbox": [ + 356, + 332, + 402, + 378 + ], + "blocks": [ + { + "bbox": [ + 356, + 332, + 402, + 378 + ], + "lines": [ + { + "bbox": [ + 356, + 332, + 402, + 378 + ], + "spans": [ + { + "bbox": [ + 356, + 332, + 402, + 378 + ], + "type": "image", + "image_path": "cd94d5ea6748597fb9c03d4f85d7fe91a324a817224d3587d29b461780e9954f.jpg" + } + ] + } + ], + "index": 47, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 362, + 380, + 395, + 386 + ], + "lines": [ + { + "bbox": [ + 362, + 380, + 395, + 386 + ], + "spans": [ + { + "bbox": [ + 362, + 380, + 395, + 386 + ], + "type": "text", + "content": "SAC by Beard" + } + ] + } + ], + "index": 48, + "angle": 0, + "type": "image_caption" + } + ], + "index": 47 + }, + { + "type": "image", + "bbox": [ + 403, + 332, + 449, + 378 + ], + "blocks": [ + { + "bbox": [ + 403, + 332, + 449, + 378 + ], + "lines": [ + { + "bbox": [ + 403, + 332, + 449, + 378 + ], + "spans": [ + { + "bbox": [ + 403, + 332, + 449, + 378 + ], + "type": "image", + "image_path": "ef8f24a423832c377b502751ea1a6ec5c02870f6c9bf6f57ca957e36e35ccf1e.jpg" + } + ] + } + ], + "index": 49, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 406, + 380, + 447, + 386 + ], + "lines": [ + { + "bbox": [ + 406, + 380, + 447, + 386 + ], + "spans": [ + { + "bbox": [ + 406, + 380, + 447, + 386 + ], + "type": "text", + "content": "SAC by Pale Skin" + } + ] + } + ], + "index": 50, + "angle": 0, + "type": "image_caption" + } + ], + "index": 49 + }, + { + "type": "image", + "bbox": [ + 450, + 332, + 497, + 378 + ], + "blocks": [ + { + "bbox": [ + 450, + 332, + 497, + 378 + ], + "lines": [ + { + "bbox": [ + 450, + 332, + 497, + 378 + ], + "spans": [ + { + "bbox": [ + 450, + 332, + 497, + 378 + ], + "type": "image", + "image_path": "14f558865c5e0601cdaa01a946868a1455678b57180ec80975d4ed1ececa0a91.jpg" + } + ] + } + ], + "index": 51, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 452, + 380, + 495, + 386 + ], + "lines": [ + { + "bbox": [ + 452, + 380, + 495, + 386 + ], + "spans": [ + { + "bbox": [ + 452, + 380, + 495, + 386 + ], + "type": "text", + "content": "SAC by Black Hair" + } + ] + } + ], + "index": 52, + "angle": 0, + "type": "image_caption" + } + ], + "index": 51 + }, + { + "type": "image", + "bbox": [ + 498, + 332, + 545, + 378 + ], + "blocks": [ + { + "bbox": [ + 498, + 332, + 545, + 378 + ], + "lines": [ + { + "bbox": [ + 498, + 332, + 545, + 378 + ], + "spans": [ + { + "bbox": [ + 498, + 332, + 545, + 378 + ], + "type": "image", + "image_path": "d503aca254f8fbd0ba1ddfc316957c35f64f0e256656c8f21634b487f29c57bd.jpg" + } + ] + } + ], + "index": 53, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 501, + 380, + 542, + 386 + ], + "lines": [ + { + "bbox": [ + 501, + 380, + 542, + 386 + ], + "spans": [ + { + "bbox": [ + 501, + 380, + 542, + 386 + ], + "type": "text", + "content": "Multiple-Attribute" + } + ] + } + ], + "index": 54, + "angle": 0, + "type": "image_caption" + } + ], + "index": 53 + }, + { + "type": "image", + "bbox": [ + 308, + 392, + 355, + 438 + ], + "blocks": [ + { + "bbox": [ + 308, + 392, + 355, + 438 + ], + "lines": [ + { + "bbox": [ + 308, + 392, + 355, + 438 + ], + "spans": [ + { + "bbox": [ + 308, + 392, + 355, + 438 + ], + "type": "image", + "image_path": "4175e75073b501fe5f39001245d583575431f4cbce9915be62cc70647315c29c.jpg" + } + ] + } + ], + "index": 55, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 310, + 440, + 353, + 447 + ], + "lines": [ + { + "bbox": [ + 310, + 440, + 353, + 447 + ], + "spans": [ + { + "bbox": [ + 310, + 440, + 353, + 447 + ], + "type": "text", + "content": "Original Reference" + } + ] + } + ], + "index": 56, + "angle": 0, + "type": "image_caption" + } + ], + "index": 55 + }, + { + "type": "image", + "bbox": [ + 356, + 392, + 402, + 438 + ], + "blocks": [ + { + "bbox": [ + 356, + 392, + 402, + 438 + ], + "lines": [ + { + "bbox": [ + 356, + 392, + 402, + 438 + ], + "spans": [ + { + "bbox": [ + 356, + 392, + 402, + 438 + ], + "type": "image", + "image_path": "5d65bb23276b3a3c12c60e02c786251d3ba8fc8c89d311fea013e13ecf6352fa.jpg" + } + ] + } + ], + "index": 57, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 358, + 440, + 400, + 447 + ], + "lines": [ + { + "bbox": [ + 358, + 440, + 400, + 447 + ], + "spans": [ + { + "bbox": [ + 358, + 440, + 400, + 447 + ], + "type": "text", + "content": "SAC by Lips Color" + } + ] + } + ], + "index": 58, + "angle": 0, + "type": "image_caption" + } + ], + "index": 57 + }, + { + "type": "image", + "bbox": [ + 403, + 392, + 450, + 438 + ], + "blocks": [ + { + "bbox": [ + 403, + 392, + 450, + 438 + ], + "lines": [ + { + "bbox": [ + 403, + 392, + 450, + 438 + ], + "spans": [ + { + "bbox": [ + 403, + 392, + 450, + 438 + ], + "type": "image", + "image_path": "3fa0538d80ef39f3390920970221e72096153f9028e5e434697ae973a6488b8f.jpg" + } + ] + } + ], + "index": 59, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 408, + 440, + 444, + 447 + ], + "lines": [ + { + "bbox": [ + 408, + 440, + 444, + 447 + ], + "spans": [ + { + "bbox": [ + 408, + 440, + 444, + 447 + ], + "type": "text", + "content": "SAC by Smiling" + } + ] + } + ], + "index": 60, + "angle": 0, + "type": "image_caption" + } + ], + "index": 59 + }, + { + "type": "image", + "bbox": [ + 450, + 392, + 497, + 438 + ], + "blocks": [ + { + "bbox": [ + 450, + 392, + 497, + 438 + ], + "lines": [ + { + "bbox": [ + 450, + 392, + 497, + 438 + ], + "spans": [ + { + "bbox": [ + 450, + 392, + 497, + 438 + ], + "type": "image", + "image_path": "1fa5c86fcdf03b6475a88a8351d7ebf59208fccfc5f637b753b80fc3793cd0d7.jpg" + } + ] + } + ], + "index": 61, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 457, + 440, + 490, + 447 + ], + "lines": [ + { + "bbox": [ + 457, + 440, + 490, + 447 + ], + "spans": [ + { + "bbox": [ + 457, + 440, + 490, + 447 + ], + "type": "text", + "content": "SAC by Bangs" + } + ] + } + ], + "index": 62, + "angle": 0, + "type": "image_caption" + } + ], + "index": 61 + }, + { + "type": "image", + "bbox": [ + 498, + 392, + 545, + 438 + ], + "blocks": [ + { + "bbox": [ + 498, + 392, + 545, + 438 + ], + "lines": [ + { + "bbox": [ + 498, + 392, + 545, + 438 + ], + "spans": [ + { + "bbox": [ + 498, + 392, + 545, + 438 + ], + "type": "image", + "image_path": "06ab8a56e1dd662eb7212a310fc18e69a77dde02e44ad7df8ead56fe5f2145de.jpg" + } + ] + } + ], + "index": 63, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 501, + 440, + 539, + 447 + ], + "lines": [ + { + "bbox": [ + 501, + 440, + 539, + 447 + ], + "spans": [ + { + "bbox": [ + 501, + 440, + 539, + 447 + ], + "type": "text", + "content": "Multiple-Attribute" + } + ] + } + ], + "index": 64, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 306, + 453, + 545, + 497 + ], + "lines": [ + { + "bbox": [ + 306, + 453, + 545, + 497 + ], + "spans": [ + { + "bbox": [ + 306, + 453, + 545, + 497 + ], + "type": "text", + "content": "Figure 9. Multiple-Attribute Counterfactual (MAC, Sec. 4.4) compared with Single-Attribute Counterfactual (SAC, Sec. 4.2). We can see that optimization along multiple directions enable the generation of more powerful counterfactuals." + } + ] + } + ], + "index": 65, + "angle": 0, + "type": "image_caption" + } + ], + "index": 63 + }, + { + "bbox": [ + 304, + 514, + 545, + 657 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 514, + 545, + 657 + ], + "spans": [ + { + "bbox": [ + 304, + 514, + 545, + 657 + ], + "type": "text", + "content": "Fig. 7 and Fig. 8 show examples of multi-attribute counterfactual images generated by ZOOM, against human and animal face classifiers. It can be observed that multiple face attributes such as lipsticks or hair color are edited in Fig. 7, and various cat/dog attributes like nose pinkness, eye shape, and fur patterns are edited in Fig. 8. These attribute edits are blended to affect the target model prediction. Appendix B further illustrates ZOOM counterfactual images for semantic segmentation, multi-class classification, and a church classifier. By mutating semantic representations, ZOOM reveals semantic combinations as outliers where the target model underfits." + } + ] + } + ], + "index": 66 + }, + { + "bbox": [ + 304, + 660, + 545, + 719 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 660, + 545, + 719 + ], + "spans": [ + { + "bbox": [ + 304, + 660, + 545, + 719 + ], + "type": "text", + "content": "In the following sections, we will use the Flip Rate (the percentage of counterfactuals that flipped the model prediction) and Flip Resistance (the percentage of counterfactuals for which the model successfully withheld its prediction) to evaluate the multi-attribute setting." + } + ] + } + ], + "index": 67 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 73, + 71, + 242, + 79 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 71, + 242, + 79 + ], + "spans": [ + { + "bbox": [ + 73, + 71, + 242, + 79 + ], + "type": "text", + "content": "Eyeglasses Classifier (0-No Eyeglasses / 1-Eyeglasses)" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 254, + 71, + 397, + 79 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 254, + 71, + 397, + 79 + ], + "spans": [ + { + "bbox": [ + 254, + 71, + 397, + 79 + ], + "type": "text", + "content": "Perceived Age Classifier (0-Senior / 1-Young)" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 415, + 71, + 526, + 79 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 415, + 71, + 526, + 79 + ], + "spans": [ + { + "bbox": [ + 415, + 71, + 526, + 79 + ], + "type": "text", + "content": "Facial Keypoint Detector (WFLW)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "11637" + } + ] + } + ], + "index": 68 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 47, + 69, + 166, + 149 + ], + "blocks": [ + { + "bbox": [ + 47, + 69, + 166, + 149 + ], + "lines": [ + { + "bbox": [ + 47, + 69, + 166, + 149 + ], + "spans": [ + { + "bbox": [ + 47, + 69, + 166, + 149 + ], + "type": "image", + "image_path": "3a31fe81e15c74b575c6093e46f0834299f52fbd05a5297a4af77ec9e568073e.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 48, + 151, + 286, + 161 + ], + "lines": [ + { + "bbox": [ + 48, + 151, + 286, + 161 + ], + "spans": [ + { + "bbox": [ + 48, + 151, + 286, + 161 + ], + "type": "text", + "content": "(a) Sensitivity histograms generated by ZOOM on attribute combinations." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 167, + 69, + 286, + 149 + ], + "blocks": [ + { + "bbox": [ + 167, + 69, + 286, + 149 + ], + "lines": [ + { + "bbox": [ + 167, + 69, + 286, + 149 + ], + "spans": [ + { + "bbox": [ + 167, + 69, + 286, + 149 + ], + "type": "image", + "image_path": "b0c505f7748d04f811c5d12aed9bbf43c879e1b7ff0a3273c81dba77b6e38c5b.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 47, + 162, + 284, + 228 + ], + "blocks": [ + { + "bbox": [ + 47, + 162, + 284, + 228 + ], + "lines": [ + { + "bbox": [ + 47, + 162, + 284, + 228 + ], + "spans": [ + { + "bbox": [ + 47, + 162, + 284, + 228 + ], + "type": "image", + "image_path": "0801f88cca854414287012d32b85e15604c661f7e8ca63dfc868dc3e24766f24.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 47, + 229, + 286, + 249 + ], + "lines": [ + { + "bbox": [ + 47, + 229, + 286, + 249 + ], + "spans": [ + { + "bbox": [ + 47, + 229, + 286, + 249 + ], + "type": "text", + "content": "(b) Model diagnosis by ZOOM over 19 attributes. Our framework is generalizable to analyze facial attributes of various domains." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 73, + 254, + 260, + 265 + ], + "lines": [ + { + "bbox": [ + 73, + 254, + 260, + 265 + ], + "spans": [ + { + "bbox": [ + 73, + 254, + 260, + 265 + ], + "type": "text", + "content": "Figure 10. Customizing attribute space for ZOOM." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 275, + 200, + 288 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 275, + 200, + 288 + ], + "spans": [ + { + "bbox": [ + 47, + 275, + 200, + 288 + ], + "type": "text", + "content": "4.4.1 Customizing attribute space" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 289, + 287, + 492 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 289, + 287, + 492 + ], + "spans": [ + { + "bbox": [ + 46, + 289, + 287, + 492 + ], + "type": "text", + "content": "In some circumstances, users may finish one round of model diagnosis and proceed to another round by adding new attributes, or trying a new attribute space. The linear nature of attribute editing (Eq. 3) in ZOOM makes it possible to easily add or remove attributes. Table 1 shows the flip rates results when adding new attributes into " + }, + { + "bbox": [ + 46, + 289, + 287, + 492 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 46, + 289, + 287, + 492 + ], + "type": "text", + "content": " for perceived age classifier and big lips classifier. We can observe that a different attribute space will result in different effectiveness of counterfactual images. Also, increasing the search iteration will make counterfactual more effective (see last row). Note that neither re-training the StyleGAN nor user-collection/labeling of data is required at any point in this procedure. Moreover, Fig. 10a shows the model diagnosis histograms generated with combinations of two attributes. Fig. 10b demonstrates the capability of ZOOM in a rich vocabulary setting where we can analyze attributes that are not labeled in existing datasets [16, 29]." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 497, + 212, + 509 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 497, + 212, + 509 + ], + "spans": [ + { + "bbox": [ + 47, + 497, + 212, + 509 + ], + "type": "text", + "content": "4.4.2 Counterfactual training results" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 514, + 287, + 716 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 514, + 287, + 716 + ], + "spans": [ + { + "bbox": [ + 46, + 514, + 287, + 716 + ], + "type": "text", + "content": "This section evaluates regular classifiers trained on CelebA [16] and counterfactually-trained (CT) classifiers on a mix of CelebA data and counterfactual images as described in Sec. 3.5. Table 2 presents accuracy and flip resistance (FR) results. CT outperforms the regular classifier. FR is assessed over 10,000 counterfactual images, with FR-25 and FR-100 denoting Flip Resistance after 25 and 100 optimization iterations, respectively. Both use " + }, + { + "bbox": [ + 46, + 514, + 287, + 716 + ], + "type": "inline_equation", + "content": "\\eta = 0.2" + }, + { + "bbox": [ + 46, + 514, + 287, + 716 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 514, + 287, + 716 + ], + "type": "inline_equation", + "content": "\\epsilon = 30" + }, + { + "bbox": [ + 46, + 514, + 287, + 716 + ], + "type": "text", + "content": ". We can observe that the classifiers after CT are way less likely to be flipped by counterfactual images while maintaining a decent accuracy on the CalebA testset. Our approach robustifies the model by increasing the tolerance toward counterfactuals. Note that CT slightly improves the CelebA classifier when trained on a mixture of CelebA images (original images) and the counterfactual images generated with a generative model trained in the FFHQ [12] images (different domain)." + } + ] + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 307, + 70, + 544, + 140 + ], + "blocks": [ + { + "bbox": [ + 307, + 70, + 544, + 140 + ], + "lines": [ + { + "bbox": [ + 307, + 70, + 544, + 140 + ], + "spans": [ + { + "bbox": [ + 307, + 70, + 544, + 140 + ], + "type": "table", + "html": "
MethodAC Flip Rate (%)BC Flip Rate (%)
Initialize ZOOM by A61.9583.47
+ Attribute: Beard72.0890.07
+ Attribute: Smiling87.4796.27
+ Attribute: Lipstick90.9694.07
+ Iterations increased to 20092.9194.87
", + "image_path": "4427e736fac19a282ea9a3abd30cb3b7bf9f5849ac3eecf1d3b8a74b1709f21c.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "type": "table", + "bbox": [ + 307, + 186, + 547, + 270 + ], + "blocks": [ + { + "bbox": [ + 306, + 148, + 545, + 182 + ], + "lines": [ + { + "bbox": [ + 306, + 148, + 545, + 182 + ], + "spans": [ + { + "bbox": [ + 306, + 148, + 545, + 182 + ], + "type": "text", + "content": "Table 1. Model flip rate study. The initial attribute space " + }, + { + "bbox": [ + 306, + 148, + 545, + 182 + ], + "type": "inline_equation", + "content": "\\mathcal{A} =" + }, + { + "bbox": [ + 306, + 148, + 545, + 182 + ], + "type": "text", + "content": " {Bangs, Blond Hair, Bushy Eyebrows, Pale Skin, Pointy Nose}. AC is the perceived age classifier and BC is the big lips classifier." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 307, + 186, + 547, + 270 + ], + "lines": [ + { + "bbox": [ + 307, + 186, + 547, + 270 + ], + "spans": [ + { + "bbox": [ + 307, + 186, + 547, + 270 + ], + "type": "table", + "html": "
AttributeMetricRegular (%)CT (Ours) (%)
Perceived AgeCelebA Accuracy86.1086.29
ZOOM FR-2519.5497.36
ZOOM FR-1009.0495.65
Big LipsCelebA Accuracy74.3675.39
ZOOM FR-2514.1299.19
ZOOM FR-1005.9388.91
", + "image_path": "019909ecbdeabb30964a6186068d59ea0fd09e455a4976227c0994ceed8f2d1e.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_body" + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 278, + 545, + 323 + ], + "lines": [ + { + "bbox": [ + 305, + 278, + 545, + 323 + ], + "spans": [ + { + "bbox": [ + 305, + 278, + 545, + 323 + ], + "type": "text", + "content": "Table 2. Results of network inference on CelebA original images and ZOOM-generated counterfactual. The CT classifier is significantly less prone to be flipped by counterfactual images, while test accuracy on CelebA remains performant." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 306, + 327, + 457, + 339 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 327, + 457, + 339 + ], + "spans": [ + { + "bbox": [ + 306, + 327, + 457, + 339 + ], + "type": "text", + "content": "5. Conclusion and Discussion" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 342, + 545, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 342, + 545, + 460 + ], + "spans": [ + { + "bbox": [ + 304, + 342, + 545, + 460 + ], + "type": "text", + "content": "In this paper, we present ZOOM, a zero-shot model diagnosis framework that generates sensitivity histograms based on user's input of natural language attributes. ZOOM assesses failures and generates corresponding sensitivity histograms for each attribute. A significant advantage of our technique is its ability to analyze the failures of a target deep model without the need for laborious collection and annotation of test sets. ZOOM effectively visualizes the correlation between attributes and model outputs, elucidating model behaviors and intrinsic biases." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 461, + 546, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 461, + 546, + 652 + ], + "spans": [ + { + "bbox": [ + 304, + 461, + 546, + 652 + ], + "type": "text", + "content": "Our work has three primary limitations. First, users should possess domain knowledge as their input (text of attributes of interest) should be relevant to the target domain. Recall that it is a small price to pay for model evaluation without an annotated test set. Second, StyleGAN2-ADA struggles with generating out-of-domain samples. Nevertheless, our adversarial learning framework can be adapted to other generative models (e.g., stable diffusion), and the generator can be improved by training on more images. We have rigorously tested our generator with various user inputs, confirming its effectiveness for regular diagnosis requests. Currently, we are exploring stable diffusion models to generate a broader range of classes while maintaining the core concept. Finally, we rely on a pre-trained model like CLIP which we presume to be free of bias and capable of encompassing all relevant attributes." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 653, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 653, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 653, + 545, + 713 + ], + "type": "text", + "content": "Acknowledgements: We would like to thank George Cazenavette, Tianyuan Zhang, Yinong Wang, Hanzhe Hu, Bharath Raj for suggestions in the presentation and experiments. We sincerely thank Ken Ziyu Liu, Jiashun Wang, Bowen Li, and Ce Zheng for revisions to improve this work." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "11638" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 286, + 712 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 53, + 91, + 286, + 123 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 286, + 123 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 286, + 123 + ], + "type": "text", + "content": "[1] Yunjey Choi, Youngjung Uh, Jaejun Yoo, and Jung-Woo Ha. StarGAN v2: Diverse Image Synthesis for Multiple Domains. In CVPR, 2020." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 124, + 286, + 156 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 124, + 286, + 156 + ], + "spans": [ + { + "bbox": [ + 53, + 124, + 286, + 156 + ], + "type": "text", + "content": "[2] Edo Collins, Raja Bala, Bob Price, and Sabine Susstrunk. Editing in Style: Uncovering the Local Semantics of GANs. In CVPR, 2020." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 54, + 158, + 286, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 158, + 286, + 201 + ], + "spans": [ + { + "bbox": [ + 54, + 158, + 286, + 201 + ], + "type": "text", + "content": "[3] Emily Denton and Ben Hutchinson and Margaret Mitchell and Timnit Gebru and Andrew Zaldivar. Image counterfactual sensitivity analysis for detecting unintended bias. arXiv preprint arXiv:1906.06439, 2019." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 54, + 202, + 286, + 224 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 202, + 286, + 224 + ], + "spans": [ + { + "bbox": [ + 54, + 202, + 286, + 224 + ], + "type": "text", + "content": "[4] Ian J. Goodfellow, Jonathon Shlens, and Christian Szegedy. Explaining and Harnessing Adversarial Examples. 2014." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 54, + 225, + 286, + 256 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 225, + 286, + 256 + ], + "spans": [ + { + "bbox": [ + 54, + 225, + 286, + 256 + ], + "type": "text", + "content": "[5] Yash Goyal, Ziyan Wu, Jan Ernst, Dhruv Batra, Devi Parikh, and Stefan Lee. Counterfactual Visual Explanations. In ICML, 2019." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 258, + 286, + 290 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 258, + 286, + 290 + ], + "spans": [ + { + "bbox": [ + 53, + 258, + 286, + 290 + ], + "type": "text", + "content": "[6] Z. He, W. Zuo, M. Kan, S. Shan, and X. Chen. AttGAN: Facial Attribute Editing by Only Changing What You Want. In IEEE TIP, 2019." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 291, + 286, + 334 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 291, + 286, + 334 + ], + "spans": [ + { + "bbox": [ + 53, + 291, + 286, + 334 + ], + "type": "text", + "content": "[7] Fangzhou Hong, Mingyuan Zhang, Liang Pan, Zhongang Cai, Lei Yang, and Ziwei Liu. AvatarCLIP: Zero-Shot Text-Driven Generation and Animation of 3D Avatars. In ACM TOG, 2022." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 335, + 286, + 368 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 335, + 286, + 368 + ], + "spans": [ + { + "bbox": [ + 53, + 335, + 286, + 368 + ], + "type": "text", + "content": "[8] Erik Härkönen, Aaron Hertzmann, Jaakko Lehtinen, and Sylvain Paris. GANSpace: Discovering Interpretable GAN Controls. In NeurIPS, 2020." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 369, + 286, + 402 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 369, + 286, + 402 + ], + "spans": [ + { + "bbox": [ + 53, + 369, + 286, + 402 + ], + "type": "text", + "content": "[9] Ameya Joshi, Amitangshu Mukherjee, Soumik Sarkar, and Chinmay Hegde. Semantic Adversarial Attacks: Parametric Transformations That Fool Deep Classifiers. In ICCV, 2019." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 403, + 286, + 435 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 403, + 286, + 435 + ], + "spans": [ + { + "bbox": [ + 48, + 403, + 286, + 435 + ], + "type": "text", + "content": "[10] Kimmo Karkkainen and Jungseock Joo. FairFace: Face Attribute Dataset for Balanced Race, Gender, and Age for Bias Measurement and Mitigation. In WACV, 2021." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 436, + 286, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 436, + 286, + 468 + ], + "spans": [ + { + "bbox": [ + 48, + 436, + 286, + 468 + ], + "type": "text", + "content": "[11] Tero Karras, Miika Aittala, Janne Hellsten, Samuli Laine, Jaakko Lehtinen, and Timo Aila. Training Generative Adversarial Networks with Limited Data. In NeurIPS, 2020." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 469, + 286, + 501 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 469, + 286, + 501 + ], + "spans": [ + { + "bbox": [ + 48, + 469, + 286, + 501 + ], + "type": "text", + "content": "[12] Tero Karras, Samuli Laine, and Timo Aila. A Style-Based Generator Architecture for Generative Adversarial Networks. In CVPR, 2019." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 502, + 286, + 556 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 502, + 286, + 556 + ], + "spans": [ + { + "bbox": [ + 48, + 502, + 286, + 556 + ], + "type": "text", + "content": "[13] Oran Lang, Yossi Gandelsman, Michal Yarom, Yoav Wald, Gal Elidan, Avinatan Hassidim, William T. Freeman, Phillip Isola, Amir Globerson, Michal Irani, and Inbar Mosseri. Explaining in Style: Training a GAN To Explain a Classifier in StyleSpace. In ICCV, 2021." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 558, + 286, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 558, + 286, + 601 + ], + "spans": [ + { + "bbox": [ + 48, + 558, + 286, + 601 + ], + "type": "text", + "content": "[14] Bo Li, Qiulin Wang, Jiquan Pei, Yu Yang, and Xiangyang Ji. Which Style Makes Me Attractive? Interpretable Control Discovery and Counterfactual Explanation on StyleGAN. arXiv preprint arXiv:2201.09689, 2022." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 48, + 602, + 286, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 602, + 286, + 624 + ], + "spans": [ + { + "bbox": [ + 48, + 602, + 286, + 624 + ], + "type": "text", + "content": "[15] Zhiheng Li and Chenliang Xu. Discover the Unknown Biased Attribute of an Image Classifier. In ICCV, 2021." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 48, + 624, + 286, + 646 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 624, + 286, + 646 + ], + "spans": [ + { + "bbox": [ + 48, + 624, + 286, + 646 + ], + "type": "text", + "content": "[16] Ziwei Liu, Ping Luo, Xiaogang Wang, and Xiaou Tang. Deep Learning Face Attributes in the Wild. In ICCV, 2015." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 48, + 647, + 286, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 647, + 286, + 679 + ], + "spans": [ + { + "bbox": [ + 48, + 647, + 286, + 679 + ], + "type": "text", + "content": "[17] Jinqi Luo, Zhaoning Wang, Chen Henry Wu, Dong Huang, and Fernando De la Torre. Semantic image attack for visual model diagnosis. arXiv preprint arXiv:2303.13010, 2023." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 48, + 681, + 286, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 681, + 286, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 681, + 286, + 712 + ], + "type": "text", + "content": "[18] Aleksander Madry, Aleksandar Makelov, Ludwig Schmidt, Dimitris Tsipras, and Adrian Vladu. Towards Deep Learning Models Resistant to Adversarial Attacks. In ICLR, 2018." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 712 + ], + "type": "list", + "angle": 0, + "index": 37, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 105 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 105 + ], + "type": "text", + "content": "[19] Joanna Materzynska, Antonio Torralba, and David Bau. Disentangling Visual and Written Concepts in CLIP. In CVPR, 2022." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 107, + 545, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 107, + 545, + 140 + ], + "spans": [ + { + "bbox": [ + 307, + 107, + 545, + 140 + ], + "type": "text", + "content": "[20] Ramaravind K. Mothilal, Amit Sharma, and Chenhao Tan. Explaining Machine Learning Classifiers through Diverse Counterfactual Explanations. In ACM FAccT, 2020." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 141, + 545, + 175 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 141, + 545, + 175 + ], + "spans": [ + { + "bbox": [ + 307, + 141, + 545, + 175 + ], + "type": "text", + "content": "[21] Or Patashnik, Zongze Wu, Eli Shechtman, Daniel Cohen-Or, and Dani Lischinski. StyleCLIP: Text-Driven Manipulation of StyleGAN Imagery. In ICCV, 2021." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 177, + 545, + 209 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 177, + 545, + 209 + ], + "spans": [ + { + "bbox": [ + 307, + 177, + 545, + 209 + ], + "type": "text", + "content": "[22] Ben Poole, Ajay Jain, Jonathan T. Barron, and Ben Mildenhall. DreamFusion: Text-to-3D using 2D Diffusion. arXiv preprint arXiv:2209.14988, 2022." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 211, + 545, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 211, + 545, + 255 + ], + "spans": [ + { + "bbox": [ + 307, + 211, + 545, + 255 + ], + "type": "text", + "content": "[23] Haonan Qiu, Chaowei Xiao, Lei Yang, Xinchen Yan, Honglak Lee, and Bo Li. SemanticAdv: Generating Adversarial Examples via Attribute-conditioned Image Editing. In ECCV, 2020." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 256, + 545, + 322 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 256, + 545, + 322 + ], + "spans": [ + { + "bbox": [ + 307, + 256, + 545, + 322 + ], + "type": "text", + "content": "[24] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning Transferable Visual Models From Natural Language Supervision. In ICML, 2021." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 323, + 545, + 357 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 323, + 545, + 357 + ], + "spans": [ + { + "bbox": [ + 307, + 323, + 545, + 357 + ], + "type": "text", + "content": "[25] Vikram V. Ramaswamy, Sunnie S. Y. Kim, and Olga Russakovsky. Fair Attribute Classification Through Latent Space De-Biasing. In CVPR, 2021." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 358, + 545, + 379 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 358, + 545, + 379 + ], + "spans": [ + { + "bbox": [ + 307, + 358, + 545, + 379 + ], + "type": "text", + "content": "[26] Axel Sauer and Andreas Geiger. Counterfactual Generative Networks. In ICLR, 2021." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 381, + 545, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 381, + 545, + 415 + ], + "spans": [ + { + "bbox": [ + 307, + 381, + 545, + 415 + ], + "type": "text", + "content": "[27] Yujun Shen, Ceyuan Yang, Xiaou Tang, and Bolei Zhou. InterFaceGAN: Interpreting the Disentangled Face Representation Learned by GANs. In IEEE TPAMI, 2020." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 307, + 416, + 545, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 416, + 545, + 437 + ], + "spans": [ + { + "bbox": [ + 307, + 416, + 545, + 437 + ], + "type": "text", + "content": "[28] Yujun Shen and Bolei Zhou. Closed-Form Factorization of Latent Semantics in GANs. In CVPR, 2021." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 307, + 440, + 545, + 483 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 440, + 545, + 483 + ], + "spans": [ + { + "bbox": [ + 307, + 440, + 545, + 483 + ], + "type": "text", + "content": "[29] Philipp Terhörst, Daniel Fährmann, Jan Niklas Kolf, Naser Damer, Florian Kirchbuchner, and Arjan Kuijper. MAAD-Face: A Massively Annotated Attribute Dataset for Face Images. In IEEE TIFS, 2021." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 307, + 485, + 545, + 518 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 485, + 545, + 518 + ], + "spans": [ + { + "bbox": [ + 307, + 485, + 545, + 518 + ], + "type": "text", + "content": "[30] Can Wang, Menglei Chai, Mingming He, Dongdong Chen, and Jing Liao. CLIP-NeRF: Text-and-Image Driven Manipulation of Neural Radiance Fields. In CVPR, 2022." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 307, + 520, + 545, + 574 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 520, + 545, + 574 + ], + "spans": [ + { + "bbox": [ + 307, + 520, + 545, + 574 + ], + "type": "text", + "content": "[31] Jingdong Wang, Ke Sun, Tianheng Cheng, Borui Jiang, Chaorui Deng, Yang Zhao, Dong Liu, Yadong Mu, Mingkui Tan, Xinggang Wang, Wenyu Liu, and Bin Xiao. Deep High-Resolution Representation Learning for Visual Recognition. In IEEE TPAMI, 2019." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 307, + 576, + 545, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 576, + 545, + 609 + ], + "spans": [ + { + "bbox": [ + 307, + 576, + 545, + 609 + ], + "type": "text", + "content": "[32] Zhou Wang, A.C. Bovik, H.R. Sheikh, and E.P. Simoncelli. Image Quality Assessment: from Error Visibility to Structural Similarity. In IEEE TIP, 2004." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 307, + 611, + 545, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 611, + 545, + 643 + ], + "spans": [ + { + "bbox": [ + 307, + 611, + 545, + 643 + ], + "type": "text", + "content": "[33] Wayne Wu, Chen Qian, Shuo Yang, Quan Wang, Yici Cai, and Qiang Zhou. Look at Boundary: A Boundary-Aware Face Alignment Algorithm. In CVPR, 2018." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 307, + 646, + 545, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 646, + 545, + 678 + ], + "spans": [ + { + "bbox": [ + 307, + 646, + 545, + 678 + ], + "type": "text", + "content": "[34] Zongze Wu, Dani Lischinski, and Eli Shechtman. StyleSpace Analysis: Disentangled Controls for StyleGAN Image Generation. In CVPR, 2021." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 307, + 680, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 680, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 307, + 680, + 545, + 712 + ], + "type": "text", + "content": "[35] Weihao Xia, Yulun Zhang, Yujiu Yang, Jing-Hao Xue, Bolei Zhou, and Ming-Hsuan Yang. GAN Inversion: A Survey. In IEEE TPAMI, 2022." + } + ] + } + ], + "index": 36 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "type": "text", + "content": "11639" + } + ] + } + ], + "index": 38 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 150 + ], + "type": "list", + "angle": 0, + "index": 2, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 105 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 105 + ], + "type": "text", + "content": "[36] Chaowei Xiao, Bo Li, Jun-yan Zhu, Warren He, Mingyan Liu, and Dawn Song. Generating Adversarial Examples with Adversarial Networks. In *IJCAI*, 2018." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 107, + 287, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 107, + 287, + 150 + ], + "spans": [ + { + "bbox": [ + 48, + 107, + 287, + 150 + ], + "type": "text", + "content": "[37] Mingyuan Zhang, Zhongang Cai, Liang Pan, Fangzhou Hong, Xinying Guo, Lei Yang, and Ziwei Liu. MotionDiffuse: Text-Driven Human Motion Generation with Diffusion Model. arXiv preprint arXiv:2208.15001, 2022." + } + ] + } + ], + "index": 1 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "type": "text", + "content": "11640" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file